summaryrefslogtreecommitdiffstats
path: root/third_party/libwebrtc/modules/video_coding
diff options
context:
space:
mode:
Diffstat (limited to 'third_party/libwebrtc/modules/video_coding')
-rw-r--r--third_party/libwebrtc/modules/video_coding/BUILD.gn1329
-rw-r--r--third_party/libwebrtc/modules/video_coding/DEPS25
-rw-r--r--third_party/libwebrtc/modules/video_coding/OWNERS7
-rw-r--r--third_party/libwebrtc/modules/video_coding/chain_diff_calculator.cc62
-rw-r--r--third_party/libwebrtc/modules/video_coding/chain_diff_calculator.h46
-rw-r--r--third_party/libwebrtc/modules/video_coding/chain_diff_calculator_gn/moz.build225
-rw-r--r--third_party/libwebrtc/modules/video_coding/chain_diff_calculator_unittest.cc126
-rw-r--r--third_party/libwebrtc/modules/video_coding/codec_globals_headers_gn/moz.build205
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/av1/BUILD.gn110
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/av1/DEPS4
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/av1/av1_svc_config.cc118
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/av1/av1_svc_config.h32
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/av1/av1_svc_config_gn/moz.build225
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/av1/av1_svc_config_unittest.cc171
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/av1/dav1d_decoder.cc205
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/av1/dav1d_decoder.h23
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/av1/libaom_av1_encoder.cc825
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/av1/libaom_av1_encoder.h31
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/av1/libaom_av1_encoder_unittest.cc264
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/av1/libaom_av1_unittest.cc368
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/h264/DEPS5
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/h264/OWNERS2
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/h264/h264.cc166
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/h264/h264_color_space.cc178
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/h264/h264_color_space.h38
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/h264/h264_decoder_impl.cc657
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/h264/h264_decoder_impl.h109
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/h264/h264_encoder_impl.cc713
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/h264/h264_encoder_impl.h125
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/h264/h264_encoder_impl_unittest.cc89
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/h264/h264_simulcast_unittest.cc107
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/h264/include/h264.h72
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/h264/include/h264_globals.h85
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/h264/test/h264_impl_unittest.cc99
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/interface/common_constants.h28
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/interface/libvpx_interface.cc373
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/interface/libvpx_interface.h128
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/interface/mock_libvpx_interface.h147
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/multiplex/augmented_video_frame_buffer.cc65
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/multiplex/include/augmented_video_frame_buffer.h62
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/multiplex/include/multiplex_decoder_adapter.h80
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/multiplex/include/multiplex_encoder_adapter.h91
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/multiplex/multiplex_decoder_adapter.cc266
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/multiplex/multiplex_encoded_image_packer.cc277
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/multiplex/multiplex_encoded_image_packer.h120
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/multiplex/multiplex_encoder_adapter.cc353
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/multiplex/test/multiplex_adapter_unittest.cc319
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/test/android_codec_factory_helper.cc78
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/test/android_codec_factory_helper.h30
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/test/batch/empty-runtime-deps1
-rwxr-xr-xthird_party/libwebrtc/modules/video_coding/codecs/test/batch/run-instantiation-tests.sh56
-rwxr-xr-xthird_party/libwebrtc/modules/video_coding/codecs/test/batch/run-videoprocessor-tests.sh70
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/test/encoded_video_frame_producer.cc77
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/test/encoded_video_frame_producer.h100
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/test/objc_codec_factory_helper.h28
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/test/objc_codec_factory_helper.mm30
-rwxr-xr-xthird_party/libwebrtc/modules/video_coding/codecs/test/plot_webrtc_test_logs.py438
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/test/video_codec_analyzer.cc186
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/test/video_codec_analyzer.h65
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/test/video_codec_analyzer_unittest.cc141
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/test/video_codec_test.cc456
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/test/video_codec_tester_impl.cc325
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/test/video_codec_tester_impl.h53
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/test/video_codec_tester_impl_unittest.cc259
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/test/video_codec_unittest.cc182
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/test/video_codec_unittest.h128
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/test/video_encoder_decoder_instantiation_tests.cc155
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/test/videocodec_test_av1.cc101
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/test/videocodec_test_fixture_config_unittest.cc63
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/test/videocodec_test_fixture_impl.cc860
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/test/videocodec_test_fixture_impl.h107
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/test/videocodec_test_libvpx.cc465
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/test/videocodec_test_mediacodec.cc267
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/test/videocodec_test_openh264.cc87
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/test/videocodec_test_stats_impl.cc441
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/test/videocodec_test_stats_impl.h95
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/test/videocodec_test_stats_impl_unittest.cc105
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/test/videocodec_test_videotoolbox.cc88
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/test/videoprocessor.cc722
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/test/videoprocessor.h263
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/test/videoprocessor_unittest.cc197
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/vp8/default_temporal_layers.cc884
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/vp8/default_temporal_layers.h168
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/vp8/default_temporal_layers_unittest.cc781
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/vp8/include/temporal_layers_checker.h63
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/vp8/include/vp8.h50
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/vp8/include/vp8_globals.h49
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/vp8/libvpx_vp8_decoder.cc384
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/vp8/libvpx_vp8_decoder.h74
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/vp8/libvpx_vp8_encoder.cc1438
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/vp8/libvpx_vp8_encoder.h159
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/vp8/libvpx_vp8_simulcast_test.cc112
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/vp8/screenshare_layers.cc624
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/vp8/screenshare_layers.h164
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/vp8/screenshare_layers_unittest.cc788
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/vp8/temporal_layers.h17
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/vp8/temporal_layers_checker.cc146
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/vp8/test/vp8_impl_unittest.cc913
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/vp8/vp8_scalability.cc24
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/vp8/vp8_scalability.h24
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/vp9/DEPS3
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/vp9/include/vp9.h54
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/vp9/include/vp9_globals.h179
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/vp9/libvpx_vp9_decoder.cc403
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/vp9/libvpx_vp9_decoder.h60
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/vp9/libvpx_vp9_encoder.cc2194
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/vp9/libvpx_vp9_encoder.h251
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/vp9/svc_config.cc240
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/vp9/svc_config.h39
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/vp9/svc_config_unittest.cc285
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/vp9/test/vp9_impl_unittest.cc2446
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/vp9/vp9.cc118
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/vp9/vp9_frame_buffer_pool.cc182
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/vp9/vp9_frame_buffer_pool.h134
-rw-r--r--third_party/libwebrtc/modules/video_coding/decoder_database.cc152
-rw-r--r--third_party/libwebrtc/modules/video_coding/decoder_database.h73
-rw-r--r--third_party/libwebrtc/modules/video_coding/decoder_database_unittest.cc84
-rw-r--r--third_party/libwebrtc/modules/video_coding/decoding_state.cc368
-rw-r--r--third_party/libwebrtc/modules/video_coding/decoding_state.h89
-rw-r--r--third_party/libwebrtc/modules/video_coding/decoding_state_unittest.cc713
-rw-r--r--third_party/libwebrtc/modules/video_coding/encoded_frame.cc151
-rw-r--r--third_party/libwebrtc/modules/video_coding/encoded_frame.h127
-rw-r--r--third_party/libwebrtc/modules/video_coding/encoded_frame_gn/moz.build232
-rw-r--r--third_party/libwebrtc/modules/video_coding/event_wrapper.cc41
-rw-r--r--third_party/libwebrtc/modules/video_coding/event_wrapper.h47
-rw-r--r--third_party/libwebrtc/modules/video_coding/fec_controller_default.cc211
-rw-r--r--third_party/libwebrtc/modules/video_coding/fec_controller_default.h68
-rw-r--r--third_party/libwebrtc/modules/video_coding/fec_controller_unittest.cc114
-rw-r--r--third_party/libwebrtc/modules/video_coding/fec_rate_table.h461
-rw-r--r--third_party/libwebrtc/modules/video_coding/frame_buffer.cc265
-rw-r--r--third_party/libwebrtc/modules/video_coding/frame_buffer.h89
-rw-r--r--third_party/libwebrtc/modules/video_coding/frame_buffer2.cc625
-rw-r--r--third_party/libwebrtc/modules/video_coding/frame_buffer2.h193
-rw-r--r--third_party/libwebrtc/modules/video_coding/frame_buffer2_unittest.cc665
-rw-r--r--third_party/libwebrtc/modules/video_coding/frame_dependencies_calculator.cc75
-rw-r--r--third_party/libwebrtc/modules/video_coding/frame_dependencies_calculator.h49
-rw-r--r--third_party/libwebrtc/modules/video_coding/frame_dependencies_calculator_gn/moz.build225
-rw-r--r--third_party/libwebrtc/modules/video_coding/frame_dependencies_calculator_unittest.cc121
-rw-r--r--third_party/libwebrtc/modules/video_coding/frame_helpers.cc96
-rw-r--r--third_party/libwebrtc/modules/video_coding/frame_helpers.h30
-rw-r--r--third_party/libwebrtc/modules/video_coding/frame_helpers_gn/moz.build232
-rw-r--r--third_party/libwebrtc/modules/video_coding/frame_helpers_unittest.cc34
-rw-r--r--third_party/libwebrtc/modules/video_coding/frame_object.cc131
-rw-r--r--third_party/libwebrtc/modules/video_coding/frame_object.h68
-rw-r--r--third_party/libwebrtc/modules/video_coding/g3doc/index.md177
-rw-r--r--third_party/libwebrtc/modules/video_coding/generic_decoder.cc325
-rw-r--r--third_party/libwebrtc/modules/video_coding/generic_decoder.h124
-rw-r--r--third_party/libwebrtc/modules/video_coding/generic_decoder_unittest.cc190
-rw-r--r--third_party/libwebrtc/modules/video_coding/h264_packet_buffer.cc287
-rw-r--r--third_party/libwebrtc/modules/video_coding/h264_packet_buffer.h56
-rw-r--r--third_party/libwebrtc/modules/video_coding/h264_packet_buffer_unittest.cc778
-rw-r--r--third_party/libwebrtc/modules/video_coding/h264_sprop_parameter_sets.cc53
-rw-r--r--third_party/libwebrtc/modules/video_coding/h264_sprop_parameter_sets.h38
-rw-r--r--third_party/libwebrtc/modules/video_coding/h264_sprop_parameter_sets_unittest.cc45
-rw-r--r--third_party/libwebrtc/modules/video_coding/h264_sps_pps_tracker.cc271
-rw-r--r--third_party/libwebrtc/modules/video_coding/h264_sps_pps_tracker.h76
-rw-r--r--third_party/libwebrtc/modules/video_coding/h264_sps_pps_tracker_unittest.cc368
-rw-r--r--third_party/libwebrtc/modules/video_coding/histogram.cc61
-rw-r--r--third_party/libwebrtc/modules/video_coding/histogram.h46
-rw-r--r--third_party/libwebrtc/modules/video_coding/histogram_unittest.cc77
-rw-r--r--third_party/libwebrtc/modules/video_coding/include/video_codec_initializer.h45
-rw-r--r--third_party/libwebrtc/modules/video_coding/include/video_codec_interface.cc20
-rw-r--r--third_party/libwebrtc/modules/video_coding/include/video_codec_interface.h121
-rw-r--r--third_party/libwebrtc/modules/video_coding/include/video_coding.h150
-rw-r--r--third_party/libwebrtc/modules/video_coding/include/video_coding_defines.h121
-rw-r--r--third_party/libwebrtc/modules/video_coding/include/video_error_codes.h31
-rw-r--r--third_party/libwebrtc/modules/video_coding/internal_defines.h23
-rw-r--r--third_party/libwebrtc/modules/video_coding/jitter_buffer.cc892
-rw-r--r--third_party/libwebrtc/modules/video_coding/jitter_buffer.h275
-rw-r--r--third_party/libwebrtc/modules/video_coding/jitter_buffer_common.h59
-rw-r--r--third_party/libwebrtc/modules/video_coding/jitter_buffer_unittest.cc1848
-rw-r--r--third_party/libwebrtc/modules/video_coding/loss_notification_controller.cc173
-rw-r--r--third_party/libwebrtc/modules/video_coding/loss_notification_controller.h111
-rw-r--r--third_party/libwebrtc/modules/video_coding/loss_notification_controller_unittest.cc607
-rw-r--r--third_party/libwebrtc/modules/video_coding/media_opt_util.cc704
-rw-r--r--third_party/libwebrtc/modules/video_coding/media_opt_util.h350
-rw-r--r--third_party/libwebrtc/modules/video_coding/nack_requester.cc340
-rw-r--r--third_party/libwebrtc/modules/video_coding/nack_requester.h157
-rw-r--r--third_party/libwebrtc/modules/video_coding/nack_requester_gn/moz.build233
-rw-r--r--third_party/libwebrtc/modules/video_coding/nack_requester_unittest.cc402
-rw-r--r--third_party/libwebrtc/modules/video_coding/packet.cc69
-rw-r--r--third_party/libwebrtc/modules/video_coding/packet.h80
-rw-r--r--third_party/libwebrtc/modules/video_coding/packet_buffer.cc422
-rw-r--r--third_party/libwebrtc/modules/video_coding/packet_buffer.h134
-rw-r--r--third_party/libwebrtc/modules/video_coding/packet_buffer_gn/moz.build232
-rw-r--r--third_party/libwebrtc/modules/video_coding/packet_buffer_unittest.cc828
-rw-r--r--third_party/libwebrtc/modules/video_coding/receiver.cc191
-rw-r--r--third_party/libwebrtc/modules/video_coding/receiver.h69
-rw-r--r--third_party/libwebrtc/modules/video_coding/receiver_unittest.cc493
-rw-r--r--third_party/libwebrtc/modules/video_coding/rtp_frame_id_only_ref_finder.cc33
-rw-r--r--third_party/libwebrtc/modules/video_coding/rtp_frame_id_only_ref_finder.h38
-rw-r--r--third_party/libwebrtc/modules/video_coding/rtp_frame_reference_finder.cc189
-rw-r--r--third_party/libwebrtc/modules/video_coding/rtp_frame_reference_finder.h60
-rw-r--r--third_party/libwebrtc/modules/video_coding/rtp_frame_reference_finder_unittest.cc322
-rw-r--r--third_party/libwebrtc/modules/video_coding/rtp_generic_ref_finder.cc44
-rw-r--r--third_party/libwebrtc/modules/video_coding/rtp_generic_ref_finder.h32
-rw-r--r--third_party/libwebrtc/modules/video_coding/rtp_seq_num_only_ref_finder.cc186
-rw-r--r--third_party/libwebrtc/modules/video_coding/rtp_seq_num_only_ref_finder.h70
-rw-r--r--third_party/libwebrtc/modules/video_coding/rtp_vp8_ref_finder.cc254
-rw-r--r--third_party/libwebrtc/modules/video_coding/rtp_vp8_ref_finder.h83
-rw-r--r--third_party/libwebrtc/modules/video_coding/rtp_vp8_ref_finder_unittest.cc370
-rw-r--r--third_party/libwebrtc/modules/video_coding/rtp_vp9_ref_finder.cc367
-rw-r--r--third_party/libwebrtc/modules/video_coding/rtp_vp9_ref_finder.h105
-rw-r--r--third_party/libwebrtc/modules/video_coding/rtp_vp9_ref_finder_unittest.cc637
-rw-r--r--third_party/libwebrtc/modules/video_coding/session_info.cc540
-rw-r--r--third_party/libwebrtc/modules/video_coding/session_info.h122
-rw-r--r--third_party/libwebrtc/modules/video_coding/session_info_unittest.cc469
-rw-r--r--third_party/libwebrtc/modules/video_coding/svc/BUILD.gn135
-rw-r--r--third_party/libwebrtc/modules/video_coding/svc/create_scalability_structure.cc295
-rw-r--r--third_party/libwebrtc/modules/video_coding/svc/create_scalability_structure.h35
-rw-r--r--third_party/libwebrtc/modules/video_coding/svc/scalability_mode_util.cc390
-rw-r--r--third_party/libwebrtc/modules/video_coding/svc/scalability_mode_util.h46
-rw-r--r--third_party/libwebrtc/modules/video_coding/svc/scalability_mode_util_gn/moz.build225
-rw-r--r--third_party/libwebrtc/modules/video_coding/svc/scalability_mode_util_unittest.cc116
-rw-r--r--third_party/libwebrtc/modules/video_coding/svc/scalability_structure_full_svc.cc444
-rw-r--r--third_party/libwebrtc/modules/video_coding/svc/scalability_structure_full_svc.h190
-rw-r--r--third_party/libwebrtc/modules/video_coding/svc/scalability_structure_full_svc_unittest.cc123
-rw-r--r--third_party/libwebrtc/modules/video_coding/svc/scalability_structure_key_svc.cc427
-rw-r--r--third_party/libwebrtc/modules/video_coding/svc/scalability_structure_key_svc.h138
-rw-r--r--third_party/libwebrtc/modules/video_coding/svc/scalability_structure_key_svc_unittest.cc245
-rw-r--r--third_party/libwebrtc/modules/video_coding/svc/scalability_structure_l2t2_key_shift.cc177
-rw-r--r--third_party/libwebrtc/modules/video_coding/svc/scalability_structure_l2t2_key_shift.h64
-rw-r--r--third_party/libwebrtc/modules/video_coding/svc/scalability_structure_l2t2_key_shift_unittest.cc358
-rw-r--r--third_party/libwebrtc/modules/video_coding/svc/scalability_structure_simulcast.cc353
-rw-r--r--third_party/libwebrtc/modules/video_coding/svc/scalability_structure_simulcast.h145
-rw-r--r--third_party/libwebrtc/modules/video_coding/svc/scalability_structure_test_helpers.cc101
-rw-r--r--third_party/libwebrtc/modules/video_coding/svc/scalability_structure_test_helpers.h59
-rw-r--r--third_party/libwebrtc/modules/video_coding/svc/scalability_structure_unittest.cc395
-rw-r--r--third_party/libwebrtc/modules/video_coding/svc/scalability_structures_gn/moz.build232
-rw-r--r--third_party/libwebrtc/modules/video_coding/svc/scalable_video_controller.h139
-rw-r--r--third_party/libwebrtc/modules/video_coding/svc/scalable_video_controller_gn/moz.build221
-rw-r--r--third_party/libwebrtc/modules/video_coding/svc/scalable_video_controller_no_layering.cc88
-rw-r--r--third_party/libwebrtc/modules/video_coding/svc/scalable_video_controller_no_layering.h40
-rw-r--r--third_party/libwebrtc/modules/video_coding/svc/svc_rate_allocator.cc452
-rw-r--r--third_party/libwebrtc/modules/video_coding/svc/svc_rate_allocator.h69
-rw-r--r--third_party/libwebrtc/modules/video_coding/svc/svc_rate_allocator_gn/moz.build225
-rw-r--r--third_party/libwebrtc/modules/video_coding/svc/svc_rate_allocator_unittest.cc584
-rw-r--r--third_party/libwebrtc/modules/video_coding/test/stream_generator.cc128
-rw-r--r--third_party/libwebrtc/modules/video_coding/test/stream_generator.h74
-rw-r--r--third_party/libwebrtc/modules/video_coding/timing/BUILD.gn153
-rw-r--r--third_party/libwebrtc/modules/video_coding/timing/codec_timer.cc58
-rw-r--r--third_party/libwebrtc/modules/video_coding/timing/codec_timer.h50
-rw-r--r--third_party/libwebrtc/modules/video_coding/timing/codec_timer_gn/moz.build221
-rw-r--r--third_party/libwebrtc/modules/video_coding/timing/frame_delay_variation_kalman_filter.cc148
-rw-r--r--third_party/libwebrtc/modules/video_coding/timing/frame_delay_variation_kalman_filter.h106
-rw-r--r--third_party/libwebrtc/modules/video_coding/timing/frame_delay_variation_kalman_filter_gn/moz.build221
-rw-r--r--third_party/libwebrtc/modules/video_coding/timing/frame_delay_variation_kalman_filter_unittest.cc115
-rw-r--r--third_party/libwebrtc/modules/video_coding/timing/inter_frame_delay.cc71
-rw-r--r--third_party/libwebrtc/modules/video_coding/timing/inter_frame_delay.h46
-rw-r--r--third_party/libwebrtc/modules/video_coding/timing/inter_frame_delay_gn/moz.build221
-rw-r--r--third_party/libwebrtc/modules/video_coding/timing/inter_frame_delay_unittest.cc190
-rw-r--r--third_party/libwebrtc/modules/video_coding/timing/jitter_estimator.cc476
-rw-r--r--third_party/libwebrtc/modules/video_coding/timing/jitter_estimator.h218
-rw-r--r--third_party/libwebrtc/modules/video_coding/timing/jitter_estimator_gn/moz.build232
-rw-r--r--third_party/libwebrtc/modules/video_coding/timing/jitter_estimator_unittest.cc305
-rw-r--r--third_party/libwebrtc/modules/video_coding/timing/rtt_filter.cc161
-rw-r--r--third_party/libwebrtc/modules/video_coding/timing/rtt_filter.h69
-rw-r--r--third_party/libwebrtc/modules/video_coding/timing/rtt_filter_gn/moz.build221
-rw-r--r--third_party/libwebrtc/modules/video_coding/timing/rtt_filter_unittest.cc105
-rw-r--r--third_party/libwebrtc/modules/video_coding/timing/timestamp_extrapolator.cc169
-rw-r--r--third_party/libwebrtc/modules/video_coding/timing/timestamp_extrapolator.h48
-rw-r--r--third_party/libwebrtc/modules/video_coding/timing/timestamp_extrapolator_gn/moz.build221
-rw-r--r--third_party/libwebrtc/modules/video_coding/timing/timestamp_extrapolator_unittest.cc221
-rw-r--r--third_party/libwebrtc/modules/video_coding/timing/timing.cc297
-rw-r--r--third_party/libwebrtc/modules/video_coding/timing/timing.h160
-rw-r--r--third_party/libwebrtc/modules/video_coding/timing/timing_module_gn/moz.build232
-rw-r--r--third_party/libwebrtc/modules/video_coding/timing/timing_unittest.cc339
-rw-r--r--third_party/libwebrtc/modules/video_coding/utility/bandwidth_quality_scaler.cc148
-rw-r--r--third_party/libwebrtc/modules/video_coding/utility/bandwidth_quality_scaler.h93
-rw-r--r--third_party/libwebrtc/modules/video_coding/utility/bandwidth_quality_scaler_unittest.cc278
-rw-r--r--third_party/libwebrtc/modules/video_coding/utility/decoded_frames_history.cc92
-rw-r--r--third_party/libwebrtc/modules/video_coding/utility/decoded_frames_history.h52
-rw-r--r--third_party/libwebrtc/modules/video_coding/utility/decoded_frames_history_unittest.cc114
-rw-r--r--third_party/libwebrtc/modules/video_coding/utility/frame_dropper.cc268
-rw-r--r--third_party/libwebrtc/modules/video_coding/utility/frame_dropper.h94
-rw-r--r--third_party/libwebrtc/modules/video_coding/utility/frame_dropper_unittest.cc160
-rw-r--r--third_party/libwebrtc/modules/video_coding/utility/framerate_controller_deprecated.cc85
-rw-r--r--third_party/libwebrtc/modules/video_coding/utility/framerate_controller_deprecated.h47
-rw-r--r--third_party/libwebrtc/modules/video_coding/utility/framerate_controller_deprecated_unittest.cc90
-rw-r--r--third_party/libwebrtc/modules/video_coding/utility/ivf_defines.h25
-rw-r--r--third_party/libwebrtc/modules/video_coding/utility/ivf_file_reader.cc238
-rw-r--r--third_party/libwebrtc/modules/video_coding/utility/ivf_file_reader.h82
-rw-r--r--third_party/libwebrtc/modules/video_coding/utility/ivf_file_reader_unittest.cc188
-rw-r--r--third_party/libwebrtc/modules/video_coding/utility/ivf_file_writer.cc245
-rw-r--r--third_party/libwebrtc/modules/video_coding/utility/ivf_file_writer.h66
-rw-r--r--third_party/libwebrtc/modules/video_coding/utility/ivf_file_writer_unittest.cc311
-rw-r--r--third_party/libwebrtc/modules/video_coding/utility/qp_parser.cc53
-rw-r--r--third_party/libwebrtc/modules/video_coding/utility/qp_parser.h45
-rw-r--r--third_party/libwebrtc/modules/video_coding/utility/qp_parser_unittest.cc118
-rw-r--r--third_party/libwebrtc/modules/video_coding/utility/quality_scaler.cc334
-rw-r--r--third_party/libwebrtc/modules/video_coding/utility/quality_scaler.h120
-rw-r--r--third_party/libwebrtc/modules/video_coding/utility/quality_scaler_unittest.cc254
-rw-r--r--third_party/libwebrtc/modules/video_coding/utility/simulcast_rate_allocator.cc343
-rw-r--r--third_party/libwebrtc/modules/video_coding/utility/simulcast_rate_allocator.h70
-rw-r--r--third_party/libwebrtc/modules/video_coding/utility/simulcast_rate_allocator_unittest.cc824
-rw-r--r--third_party/libwebrtc/modules/video_coding/utility/simulcast_test_fixture_impl.cc967
-rw-r--r--third_party/libwebrtc/modules/video_coding/utility/simulcast_test_fixture_impl.h95
-rw-r--r--third_party/libwebrtc/modules/video_coding/utility/simulcast_utility.cc93
-rw-r--r--third_party/libwebrtc/modules/video_coding/utility/simulcast_utility.h33
-rw-r--r--third_party/libwebrtc/modules/video_coding/utility/vp8_constants.h27
-rw-r--r--third_party/libwebrtc/modules/video_coding/utility/vp8_header_parser.cc200
-rw-r--r--third_party/libwebrtc/modules/video_coding/utility/vp8_header_parser.h40
-rw-r--r--third_party/libwebrtc/modules/video_coding/utility/vp9_constants.h198
-rw-r--r--third_party/libwebrtc/modules/video_coding/utility/vp9_uncompressed_header_parser.cc533
-rw-r--r--third_party/libwebrtc/modules/video_coding/utility/vp9_uncompressed_header_parser.h155
-rw-r--r--third_party/libwebrtc/modules/video_coding/utility/vp9_uncompressed_header_parser_unittest.cc94
-rw-r--r--third_party/libwebrtc/modules/video_coding/video_codec_initializer.cc352
-rw-r--r--third_party/libwebrtc/modules/video_coding/video_codec_initializer_unittest.cc493
-rw-r--r--third_party/libwebrtc/modules/video_coding/video_codec_interface_gn/moz.build226
-rw-r--r--third_party/libwebrtc/modules/video_coding/video_coding_defines.cc20
-rw-r--r--third_party/libwebrtc/modules/video_coding/video_coding_gn/moz.build249
-rw-r--r--third_party/libwebrtc/modules/video_coding/video_coding_impl.cc254
-rw-r--r--third_party/libwebrtc/modules/video_coding/video_coding_impl.h179
-rw-r--r--third_party/libwebrtc/modules/video_coding/video_coding_utility_gn/moz.build243
-rw-r--r--third_party/libwebrtc/modules/video_coding/video_receiver.cc278
-rw-r--r--third_party/libwebrtc/modules/video_coding/video_receiver2.cc108
-rw-r--r--third_party/libwebrtc/modules/video_coding/video_receiver2.h67
-rw-r--r--third_party/libwebrtc/modules/video_coding/video_receiver2_unittest.cc145
-rw-r--r--third_party/libwebrtc/modules/video_coding/video_receiver_unittest.cc236
-rw-r--r--third_party/libwebrtc/modules/video_coding/webrtc_libvpx_interface_gn/moz.build221
-rw-r--r--third_party/libwebrtc/modules/video_coding/webrtc_vp8_gn/moz.build235
-rw-r--r--third_party/libwebrtc/modules/video_coding/webrtc_vp8_scalability_gn/moz.build221
-rw-r--r--third_party/libwebrtc/modules/video_coding/webrtc_vp8_temporal_layers_gn/moz.build237
-rw-r--r--third_party/libwebrtc/modules/video_coding/webrtc_vp9_gn/moz.build238
-rw-r--r--third_party/libwebrtc/modules/video_coding/webrtc_vp9_helpers_gn/moz.build233
325 files changed, 74520 insertions, 0 deletions
diff --git a/third_party/libwebrtc/modules/video_coding/BUILD.gn b/third_party/libwebrtc/modules/video_coding/BUILD.gn
new file mode 100644
index 0000000000..e1b5e4ba84
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/BUILD.gn
@@ -0,0 +1,1329 @@
+# Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+import("//third_party/libaom/options.gni")
+import("../../webrtc.gni")
+
+rtc_library("encoded_frame") {
+ visibility = [ "*" ]
+ sources = [
+ "encoded_frame.cc",
+ "encoded_frame.h",
+ ]
+ deps = [
+ ":codec_globals_headers",
+ ":video_codec_interface",
+ "../../api/video:encoded_image",
+ "../../api/video:video_frame",
+ "../../api/video:video_rtp_headers",
+ "../../modules:module_api_public",
+ "../../modules/rtp_rtcp:rtp_video_header",
+ "../../rtc_base:checks",
+ "../../rtc_base/experiments:alr_experiment",
+ "../../rtc_base/experiments:rtt_mult_experiment",
+ "../../rtc_base/system:rtc_export",
+ "../../system_wrappers",
+ ]
+ absl_deps = [
+ "//third_party/abseil-cpp/absl/types:optional",
+ "//third_party/abseil-cpp/absl/types:variant",
+ ]
+}
+
+rtc_library("chain_diff_calculator") {
+ sources = [
+ "chain_diff_calculator.cc",
+ "chain_diff_calculator.h",
+ ]
+
+ deps = [
+ "../../rtc_base:checks",
+ "../../rtc_base:logging",
+ ]
+ absl_deps = [
+ "//third_party/abseil-cpp/absl/container:inlined_vector",
+ "//third_party/abseil-cpp/absl/types:optional",
+ ]
+}
+
+rtc_library("frame_dependencies_calculator") {
+ sources = [
+ "frame_dependencies_calculator.cc",
+ "frame_dependencies_calculator.h",
+ ]
+
+ deps = [
+ "../../api:array_view",
+ "../../common_video/generic_frame_descriptor",
+ "../../rtc_base:checks",
+ "../../rtc_base:logging",
+ ]
+ absl_deps = [
+ "//third_party/abseil-cpp/absl/algorithm:container",
+ "//third_party/abseil-cpp/absl/container:inlined_vector",
+ "//third_party/abseil-cpp/absl/types:optional",
+ ]
+}
+
+rtc_library("nack_requester") {
+ sources = [
+ "histogram.cc",
+ "histogram.h",
+ "nack_requester.cc",
+ "nack_requester.h",
+ ]
+
+ deps = [
+ "..:module_api",
+ "../../api:field_trials_view",
+ "../../api:sequence_checker",
+ "../../api/task_queue",
+ "../../api/task_queue:pending_task_safety_flag",
+ "../../api/units:time_delta",
+ "../../api/units:timestamp",
+ "../../rtc_base:checks",
+ "../../rtc_base:logging",
+ "../../rtc_base:macromagic",
+ "../../rtc_base:rtc_numerics",
+ "../../rtc_base/experiments:field_trial_parser",
+ "../../rtc_base/task_utils:repeating_task",
+ "../../system_wrappers",
+ ]
+}
+
+rtc_library("packet_buffer") {
+ sources = [
+ "packet_buffer.cc",
+ "packet_buffer.h",
+ ]
+ deps = [
+ ":codec_globals_headers",
+ "../../api:array_view",
+ "../../api:rtp_packet_info",
+ "../../api/units:timestamp",
+ "../../api/video:encoded_image",
+ "../../api/video:video_frame_type",
+ "../../common_video",
+ "../../rtc_base:checks",
+ "../../rtc_base:copy_on_write_buffer",
+ "../../rtc_base:logging",
+ "../../rtc_base:macromagic",
+ "../../rtc_base:mod_ops",
+ "../../rtc_base:rtc_numerics",
+ "../rtp_rtcp:rtp_rtcp_format",
+ "../rtp_rtcp:rtp_video_header",
+ ]
+ absl_deps = [
+ "//third_party/abseil-cpp/absl/base:core_headers",
+ "//third_party/abseil-cpp/absl/types:variant",
+ ]
+}
+
+rtc_library("h264_packet_buffer") {
+ sources = [
+ "h264_packet_buffer.cc",
+ "h264_packet_buffer.h",
+ ]
+ deps = [
+ ":codec_globals_headers",
+ ":packet_buffer",
+ "../../api:array_view",
+ "../../api:rtp_packet_info",
+ "../../api/units:timestamp",
+ "../../api/video:encoded_image",
+ "../../api/video:video_frame_type",
+ "../../common_video",
+ "../../rtc_base:checks",
+ "../../rtc_base:copy_on_write_buffer",
+ "../../rtc_base:logging",
+ "../../rtc_base:rtc_numerics",
+ "../rtp_rtcp:rtp_rtcp_format",
+ "../rtp_rtcp:rtp_video_header",
+ ]
+ absl_deps = [
+ "//third_party/abseil-cpp/absl/base:core_headers",
+ "//third_party/abseil-cpp/absl/types:optional",
+ ]
+}
+
+rtc_library("frame_helpers") {
+ sources = [
+ "frame_helpers.cc",
+ "frame_helpers.h",
+ ]
+ deps = [
+ "../../api/video:encoded_frame",
+ "../../rtc_base:logging",
+ ]
+ absl_deps = [ "//third_party/abseil-cpp/absl/container:inlined_vector" ]
+}
+
+rtc_library("frame_buffer2") {
+ sources = [
+ "frame_buffer2.cc",
+ "frame_buffer2.h",
+ ]
+ deps = [
+ ":frame_helpers",
+ ":video_codec_interface",
+ ":video_coding_utility",
+ "../../api:field_trials_view",
+ "../../api:sequence_checker",
+ "../../api/task_queue",
+ "../../api/units:data_size",
+ "../../api/units:time_delta",
+ "../../api/video:encoded_frame",
+ "../../api/video:encoded_image",
+ "../../api/video:video_rtp_headers",
+ "../../rtc_base:checks",
+ "../../rtc_base:event_tracer",
+ "../../rtc_base:logging",
+ "../../rtc_base:macromagic",
+ "../../rtc_base:rtc_event",
+ "../../rtc_base:rtc_numerics",
+ "../../rtc_base/experiments:field_trial_parser",
+ "../../rtc_base/experiments:rtt_mult_experiment",
+ "../../rtc_base/synchronization:mutex",
+ "../../rtc_base/system:no_unique_address",
+ "../../rtc_base/task_utils:repeating_task",
+ "../../system_wrappers",
+ "timing:inter_frame_delay",
+ "timing:jitter_estimator",
+ "timing:timing_module",
+ ]
+ absl_deps = [ "//third_party/abseil-cpp/absl/container:inlined_vector" ]
+}
+
+rtc_library("video_coding") {
+ visibility = [ "*" ]
+ sources = [
+ "decoder_database.cc",
+ "decoder_database.h",
+ "fec_controller_default.cc",
+ "fec_controller_default.h",
+ "fec_rate_table.h",
+ "frame_object.cc",
+ "frame_object.h",
+ "generic_decoder.cc",
+ "generic_decoder.h",
+ "h264_sprop_parameter_sets.cc",
+ "h264_sprop_parameter_sets.h",
+ "h264_sps_pps_tracker.cc",
+ "h264_sps_pps_tracker.h",
+ "include/video_codec_initializer.h",
+ "internal_defines.h",
+ "loss_notification_controller.cc",
+ "loss_notification_controller.h",
+ "media_opt_util.cc",
+ "media_opt_util.h",
+ "rtp_frame_id_only_ref_finder.cc",
+ "rtp_frame_id_only_ref_finder.h",
+ "rtp_frame_reference_finder.cc",
+ "rtp_frame_reference_finder.h",
+ "rtp_generic_ref_finder.cc",
+ "rtp_generic_ref_finder.h",
+ "rtp_seq_num_only_ref_finder.cc",
+ "rtp_seq_num_only_ref_finder.h",
+ "rtp_vp8_ref_finder.cc",
+ "rtp_vp8_ref_finder.h",
+ "rtp_vp9_ref_finder.cc",
+ "rtp_vp9_ref_finder.h",
+ "video_codec_initializer.cc",
+ "video_receiver2.cc",
+ "video_receiver2.h",
+ ]
+ if (build_with_mozilla) {
+ sources += [
+ "event_wrapper.cc",
+ "event_wrapper.h",
+ ]
+ }
+
+ deps = [
+ ":codec_globals_headers",
+ ":encoded_frame",
+ ":frame_helpers",
+ ":video_codec_interface",
+ ":video_coding_utility",
+ ":webrtc_vp8_scalability",
+ ":webrtc_vp9_helpers",
+ "..:module_api",
+ "..:module_api_public",
+ "..:module_fec_api",
+ "../../api:array_view",
+ "../../api:fec_controller_api",
+ "../../api:field_trials_view",
+ "../../api:rtp_headers",
+ "../../api:rtp_packet_info",
+ "../../api:scoped_refptr",
+ "../../api:sequence_checker",
+ "../../api/task_queue",
+ "../../api/units:data_rate",
+ "../../api/units:data_size",
+ "../../api/units:frequency",
+ "../../api/units:time_delta",
+ "../../api/units:timestamp",
+ "../../api/video:builtin_video_bitrate_allocator_factory",
+ "../../api/video:encoded_frame",
+ "../../api/video:encoded_image",
+ "../../api/video:video_adaptation",
+ "../../api/video:video_adaptation",
+ "../../api/video:video_bitrate_allocation",
+ "../../api/video:video_bitrate_allocator",
+ "../../api/video:video_bitrate_allocator_factory",
+ "../../api/video:video_frame",
+ "../../api/video:video_frame_type",
+ "../../api/video:video_rtp_headers",
+ "../../api/video_codecs:video_codecs_api",
+ "../../common_video",
+ "../../rtc_base:checks",
+ "../../rtc_base:copy_on_write_buffer",
+ "../../rtc_base:event_tracer",
+ "../../rtc_base:logging",
+ "../../rtc_base:macromagic",
+ "../../rtc_base:rtc_event",
+ "../../rtc_base:rtc_numerics",
+ "../../rtc_base:safe_conversions",
+ "../../rtc_base:threading",
+ "../../rtc_base:timeutils",
+ "../../rtc_base/experiments:alr_experiment",
+ "../../rtc_base/experiments:field_trial_parser",
+ "../../rtc_base/experiments:min_video_bitrate_experiment",
+ "../../rtc_base/experiments:rate_control_settings",
+ "../../rtc_base/experiments:rtt_mult_experiment",
+ "../../rtc_base/synchronization:mutex",
+ "../../rtc_base/system:no_unique_address",
+ "../../rtc_base/task_utils:repeating_task",
+ "../../rtc_base/third_party/base64",
+ "../../system_wrappers",
+ "../../system_wrappers:field_trial",
+ "../../system_wrappers:metrics",
+ "../../video/config:encoder_config",
+ "../rtp_rtcp",
+ "../rtp_rtcp:rtp_rtcp_format",
+ "../rtp_rtcp:rtp_video_header",
+ "codecs/av1:av1_svc_config",
+ "svc:scalability_mode_util",
+ "timing:inter_frame_delay",
+ "timing:jitter_estimator",
+ "timing:rtt_filter",
+ "timing:timing_module",
+ ]
+ absl_deps = [
+ "//third_party/abseil-cpp/absl/algorithm:container",
+ "//third_party/abseil-cpp/absl/base:core_headers",
+ "//third_party/abseil-cpp/absl/container:inlined_vector",
+ "//third_party/abseil-cpp/absl/types:optional",
+ "//third_party/abseil-cpp/absl/types:variant",
+ ]
+}
+
+rtc_library("video_codec_interface") {
+ visibility = [ "*" ]
+ sources = [
+ "include/video_codec_interface.cc",
+ "include/video_codec_interface.h",
+ "include/video_coding_defines.h",
+ "include/video_error_codes.h",
+ "video_coding_defines.cc",
+ ]
+ deps = [
+ ":codec_globals_headers",
+ "../../api/video:video_frame",
+ "../../api/video:video_rtp_headers",
+ "../../api/video_codecs:scalability_mode",
+ "../../api/video_codecs:video_codecs_api",
+ "../../common_video",
+ "../../common_video/generic_frame_descriptor",
+ "../../rtc_base/system:rtc_export",
+ ]
+ absl_deps = [
+ "//third_party/abseil-cpp/absl/base:core_headers",
+ "//third_party/abseil-cpp/absl/types:optional",
+ ]
+}
+
+rtc_library("video_coding_legacy") {
+ visibility = [ ":video_coding_unittests" ]
+ sources = [
+ "decoding_state.cc",
+ "decoding_state.h",
+ "event_wrapper.cc",
+ "event_wrapper.h",
+ "frame_buffer.cc",
+ "frame_buffer.h",
+ "include/video_coding.h",
+ "jitter_buffer.cc",
+ "jitter_buffer.h",
+ "jitter_buffer_common.h",
+ "packet.cc",
+ "packet.h",
+ "receiver.cc",
+ "receiver.h",
+ "session_info.cc",
+ "session_info.h",
+ "video_coding_impl.cc",
+ "video_coding_impl.h",
+ "video_receiver.cc",
+ ]
+ deps = [
+ ":codec_globals_headers",
+ ":encoded_frame",
+ ":video_codec_interface",
+ ":video_coding",
+ "..:module_api",
+ "..:module_api_public",
+ "../../api:field_trials_view",
+ "../../api:rtp_headers",
+ "../../api:rtp_packet_info",
+ "../../api:sequence_checker",
+ "../../api/transport:field_trial_based_config",
+ "../../api/units:timestamp",
+ "../../api/video:encoded_image",
+ "../../api/video:video_frame",
+ "../../api/video:video_frame_type",
+ "../../api/video:video_rtp_headers",
+ "../../api/video_codecs:video_codecs_api",
+ "../../common_video",
+ "../../modules/rtp_rtcp:rtp_video_header",
+ "../../rtc_base:checks",
+ "../../rtc_base:event_tracer",
+ "../../rtc_base:logging",
+ "../../rtc_base:macromagic",
+ "../../rtc_base:one_time_event",
+ "../../rtc_base:rtc_event",
+ "../../rtc_base:safe_conversions",
+ "../../rtc_base/memory:always_valid_pointer",
+ "../../rtc_base/synchronization:mutex",
+ "../../system_wrappers",
+ "../rtp_rtcp:rtp_rtcp_format",
+ "../rtp_rtcp:rtp_video_header",
+ "timing:inter_frame_delay",
+ "timing:jitter_estimator",
+ "timing:timing_module",
+ ]
+ absl_deps = [
+ "//third_party/abseil-cpp/absl/memory",
+ "//third_party/abseil-cpp/absl/types:optional",
+ "//third_party/abseil-cpp/absl/types:variant",
+ ]
+}
+
+rtc_source_set("codec_globals_headers") {
+ visibility = [ "*" ]
+ sources = [
+ "codecs/h264/include/h264_globals.h",
+ "codecs/interface/common_constants.h",
+ "codecs/vp8/include/vp8_globals.h",
+ "codecs/vp9/include/vp9_globals.h",
+ ]
+
+ deps = [ "../../rtc_base:checks" ]
+}
+
+rtc_library("video_coding_utility") {
+ visibility = [ "*" ]
+ sources = [
+ "utility/bandwidth_quality_scaler.cc",
+ "utility/bandwidth_quality_scaler.h",
+ "utility/decoded_frames_history.cc",
+ "utility/decoded_frames_history.h",
+ "utility/frame_dropper.cc",
+ "utility/frame_dropper.h",
+ "utility/framerate_controller_deprecated.cc",
+ "utility/framerate_controller_deprecated.h",
+ "utility/ivf_defines.h",
+ "utility/ivf_file_reader.cc",
+ "utility/ivf_file_reader.h",
+ "utility/ivf_file_writer.cc",
+ "utility/ivf_file_writer.h",
+ "utility/qp_parser.cc",
+ "utility/qp_parser.h",
+ "utility/quality_scaler.cc",
+ "utility/quality_scaler.h",
+ "utility/simulcast_rate_allocator.cc",
+ "utility/simulcast_rate_allocator.h",
+ "utility/simulcast_utility.cc",
+ "utility/simulcast_utility.h",
+ "utility/vp8_constants.h",
+ "utility/vp8_header_parser.cc",
+ "utility/vp8_header_parser.h",
+ "utility/vp9_constants.h",
+ "utility/vp9_uncompressed_header_parser.cc",
+ "utility/vp9_uncompressed_header_parser.h",
+ ]
+
+ deps = [
+ ":video_codec_interface",
+ "../../api:array_view",
+ "../../api:field_trials_view",
+ "../../api:scoped_refptr",
+ "../../api:sequence_checker",
+ "../../api/units:time_delta",
+ "../../api/video:encoded_frame",
+ "../../api/video:encoded_image",
+ "../../api/video:video_adaptation",
+ "../../api/video:video_bitrate_allocation",
+ "../../api/video:video_bitrate_allocator",
+ "../../api/video:video_codec_constants",
+ "../../api/video:video_frame",
+ "../../api/video_codecs:video_codecs_api",
+ "../../common_video",
+ "../../modules/rtp_rtcp",
+ "../../rtc_base:bitstream_reader",
+ "../../rtc_base:checks",
+ "../../rtc_base:logging",
+ "../../rtc_base:rate_statistics",
+ "../../rtc_base:refcount",
+ "../../rtc_base:rtc_numerics",
+ "../../rtc_base:stringutils",
+ "../../rtc_base:timeutils",
+ "../../rtc_base:weak_ptr",
+ "../../rtc_base/experiments:bandwidth_quality_scaler_settings",
+ "../../rtc_base/experiments:encoder_info_settings",
+ "../../rtc_base/experiments:quality_scaler_settings",
+ "../../rtc_base/experiments:quality_scaling_experiment",
+ "../../rtc_base/experiments:rate_control_settings",
+ "../../rtc_base/experiments:stable_target_rate_experiment",
+ "../../rtc_base/synchronization:mutex",
+ "../../rtc_base/system:arch",
+ "../../rtc_base/system:file_wrapper",
+ "../../rtc_base/system:no_unique_address",
+ "../../rtc_base/task_utils:repeating_task",
+ "../../system_wrappers:field_trial",
+ "../rtp_rtcp:rtp_rtcp_format",
+ ]
+ absl_deps = [
+ "//third_party/abseil-cpp/absl/numeric:bits",
+ "//third_party/abseil-cpp/absl/strings:strings",
+ "//third_party/abseil-cpp/absl/types:optional",
+ ]
+}
+
+rtc_library("webrtc_h264") {
+ visibility = [ "*" ]
+ sources = [
+ "codecs/h264/h264.cc",
+ "codecs/h264/h264_color_space.cc",
+ "codecs/h264/h264_color_space.h",
+ "codecs/h264/h264_decoder_impl.cc",
+ "codecs/h264/h264_decoder_impl.h",
+ "codecs/h264/h264_encoder_impl.cc",
+ "codecs/h264/h264_encoder_impl.h",
+ "codecs/h264/include/h264.h",
+ ]
+
+ defines = []
+ deps = [
+ ":video_codec_interface",
+ ":video_coding_utility",
+ "../../api/transport/rtp:dependency_descriptor",
+ "../../api/video:video_codec_constants",
+ "../../api/video:video_frame",
+ "../../api/video:video_frame_i010",
+ "../../api/video:video_rtp_headers",
+ "../../api/video_codecs:scalability_mode",
+ "../../api/video_codecs:video_codecs_api",
+ "../../common_video",
+ "../../media:codec",
+ "../../media:media_constants",
+ "../../media:rtc_media_base",
+ "../../rtc_base:checks",
+ "../../rtc_base:event_tracer",
+ "../../rtc_base:logging",
+ "../../rtc_base:timeutils",
+ "../../rtc_base/system:rtc_export",
+ "../../system_wrappers:field_trial",
+ "../../system_wrappers:metrics",
+ "svc:scalability_structures",
+ "svc:scalable_video_controller",
+ "//third_party/libyuv",
+ ]
+ absl_deps = [
+ "//third_party/abseil-cpp/absl/container:inlined_vector",
+ "//third_party/abseil-cpp/absl/strings",
+ "//third_party/abseil-cpp/absl/types:optional",
+ ]
+
+ if (rtc_use_h264) {
+ deps += [
+ "//third_party/ffmpeg",
+ "//third_party/openh264:encoder",
+ ]
+ if (!build_with_mozilla) {
+ deps += [ "../../media:rtc_media_base" ]
+ }
+ }
+}
+
+rtc_library("webrtc_multiplex") {
+ sources = [
+ "codecs/multiplex/augmented_video_frame_buffer.cc",
+ "codecs/multiplex/include/augmented_video_frame_buffer.h",
+ "codecs/multiplex/include/multiplex_decoder_adapter.h",
+ "codecs/multiplex/include/multiplex_encoder_adapter.h",
+ "codecs/multiplex/multiplex_decoder_adapter.cc",
+ "codecs/multiplex/multiplex_encoded_image_packer.cc",
+ "codecs/multiplex/multiplex_encoded_image_packer.h",
+ "codecs/multiplex/multiplex_encoder_adapter.cc",
+ ]
+
+ deps = [
+ ":video_codec_interface",
+ ":video_coding_utility",
+ "../../api:fec_controller_api",
+ "../../api:scoped_refptr",
+ "../../api/video:encoded_image",
+ "../../api/video:video_frame",
+ "../../api/video:video_rtp_headers",
+ "../../api/video_codecs:video_codecs_api",
+ "../../common_video",
+ "../../media:rtc_media_base",
+ "../../rtc_base:checks",
+ "../../rtc_base:logging",
+ "../../rtc_base/synchronization:mutex",
+ "../rtp_rtcp:rtp_rtcp_format",
+ ]
+}
+
+# This target defines a bare-bones interface towards libvpx, used by the
+# VP8 and VP9 wrappers below.
+rtc_library("webrtc_libvpx_interface") {
+ visibility = [ "*" ]
+ sources = [
+ "codecs/interface/libvpx_interface.cc",
+ "codecs/interface/libvpx_interface.h",
+ ]
+ deps = [ "../../rtc_base:checks" ]
+ if (rtc_build_libvpx) {
+ deps += [ rtc_libvpx_dir ]
+ }
+}
+
+rtc_library("mock_libvpx_interface") {
+ testonly = true
+ sources = [ "codecs/interface/mock_libvpx_interface.h" ]
+ deps = [
+ ":webrtc_libvpx_interface",
+ "../../test:test_support",
+ ]
+}
+
+# This target includes the internal SW codec.
+rtc_library("webrtc_vp8") {
+ visibility = [ "*" ]
+ poisonous = [ "software_video_codecs" ]
+ sources = [
+ "codecs/vp8/include/vp8.h",
+ "codecs/vp8/libvpx_vp8_decoder.cc",
+ "codecs/vp8/libvpx_vp8_decoder.h",
+ "codecs/vp8/libvpx_vp8_encoder.cc",
+ "codecs/vp8/libvpx_vp8_encoder.h",
+ ]
+
+ deps = [
+ ":codec_globals_headers",
+ ":video_codec_interface",
+ ":video_coding_utility",
+ ":webrtc_libvpx_interface",
+ ":webrtc_vp8_scalability",
+ ":webrtc_vp8_temporal_layers",
+ "../../api:fec_controller_api",
+ "../../api:scoped_refptr",
+ "../../api/video:encoded_image",
+ "../../api/video:video_frame",
+ "../../api/video:video_rtp_headers",
+ "../../api/video_codecs:scalability_mode",
+ "../../api/video_codecs:video_codecs_api",
+ "../../api/video_codecs:vp8_temporal_layers_factory",
+ "../../common_video",
+ "../../rtc_base:checks",
+ "../../rtc_base:event_tracer",
+ "../../rtc_base:logging",
+ "../../rtc_base:rtc_numerics",
+ "../../rtc_base:timeutils",
+ "../../rtc_base/experiments:cpu_speed_experiment",
+ "../../rtc_base/experiments:encoder_info_settings",
+ "../../rtc_base/experiments:field_trial_parser",
+ "../../rtc_base/experiments:rate_control_settings",
+ "../../system_wrappers:field_trial",
+ "../../system_wrappers:metrics",
+ "svc:scalability_mode_util",
+ "//third_party/libyuv",
+ ]
+ absl_deps = [
+ "//third_party/abseil-cpp/absl/algorithm:container",
+ "//third_party/abseil-cpp/absl/strings:strings",
+ "//third_party/abseil-cpp/absl/types:optional",
+ ]
+ if (rtc_build_libvpx) {
+ deps += [ rtc_libvpx_dir ]
+ }
+}
+
+rtc_source_set("webrtc_vp8_scalability") {
+ sources = [
+ "codecs/vp8/vp8_scalability.cc",
+ "codecs/vp8/vp8_scalability.h",
+ ]
+ deps = [ "../../api/video_codecs:scalability_mode" ]
+}
+
+rtc_library("webrtc_vp8_temporal_layers") {
+ visibility = [ "*" ]
+ sources = [
+ "codecs/vp8/default_temporal_layers.cc",
+ "codecs/vp8/default_temporal_layers.h",
+ "codecs/vp8/include/temporal_layers_checker.h",
+ "codecs/vp8/screenshare_layers.cc",
+ "codecs/vp8/screenshare_layers.h",
+ "codecs/vp8/temporal_layers.h",
+ "codecs/vp8/temporal_layers_checker.cc",
+ ]
+
+ deps = [
+ ":codec_globals_headers",
+ ":video_codec_interface",
+ ":video_coding_utility",
+ "../../api:fec_controller_api",
+ "../../api/video_codecs:video_codecs_api",
+ "../../rtc_base:checks",
+ "../../rtc_base:logging",
+ "../../rtc_base:macromagic",
+ "../../rtc_base:rate_statistics",
+ "../../rtc_base:rtc_numerics",
+ "../../rtc_base:timeutils",
+ "../../system_wrappers:field_trial",
+ "../../system_wrappers:metrics",
+ ]
+ absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ]
+}
+
+# This target includes VP9 files that may be used for any VP9 codec, internal SW or external HW.
+rtc_library("webrtc_vp9_helpers") {
+ sources = [
+ "codecs/vp9/svc_config.cc",
+ "codecs/vp9/svc_config.h",
+ ]
+
+ deps = [
+ ":codec_globals_headers",
+ ":video_codec_interface",
+ "../../api/video:video_bitrate_allocation",
+ "../../api/video:video_bitrate_allocator",
+ "../../api/video:video_codec_constants",
+ "../../api/video_codecs:video_codecs_api",
+ "../../common_video",
+ "../../media:rtc_media_base",
+ "../../rtc_base:checks",
+ "../../rtc_base:logging",
+ "../../rtc_base/experiments:stable_target_rate_experiment",
+ "svc:scalability_mode_util",
+ "svc:scalability_structures",
+ "svc:scalable_video_controller",
+ ]
+ absl_deps = [ "//third_party/abseil-cpp/absl/container:inlined_vector" ]
+}
+
+rtc_library("webrtc_vp9") {
+ visibility = [ "*" ]
+ poisonous = [ "software_video_codecs" ]
+ sources = [
+ "codecs/vp9/include/vp9.h",
+ "codecs/vp9/libvpx_vp9_decoder.cc",
+ "codecs/vp9/libvpx_vp9_decoder.h",
+ "codecs/vp9/libvpx_vp9_encoder.cc",
+ "codecs/vp9/libvpx_vp9_encoder.h",
+ "codecs/vp9/vp9.cc",
+ "codecs/vp9/vp9_frame_buffer_pool.cc",
+ "codecs/vp9/vp9_frame_buffer_pool.h",
+ ]
+
+ deps = [
+ ":video_codec_interface",
+ ":video_coding_utility",
+ ":webrtc_libvpx_interface",
+ ":webrtc_vp9_helpers",
+ "../../api:fec_controller_api",
+ "../../api:field_trials_view",
+ "../../api:refcountedbase",
+ "../../api:scoped_refptr",
+ "../../api/transport:field_trial_based_config",
+ "../../api/video:video_frame",
+ "../../api/video:video_frame_i010",
+ "../../api/video:video_rtp_headers",
+ "../../api/video_codecs:scalability_mode",
+ "../../api/video_codecs:video_codecs_api",
+ "../../common_video",
+ "../../media:codec",
+ "../../media:rtc_media_base",
+ "../../rtc_base:buffer",
+ "../../rtc_base:checks",
+ "../../rtc_base:event_tracer",
+ "../../rtc_base:logging",
+ "../../rtc_base:stringutils",
+ "../../rtc_base:timeutils",
+ "../../rtc_base/containers:flat_map",
+ "../../rtc_base/experiments:encoder_info_settings",
+ "../../rtc_base/experiments:field_trial_parser",
+ "../../rtc_base/experiments:rate_control_settings",
+ "../../rtc_base/synchronization:mutex",
+ "../../system_wrappers:field_trial",
+ "../rtp_rtcp:rtp_rtcp_format",
+ "svc:scalability_mode_util",
+ "svc:scalability_structures",
+ "svc:scalable_video_controller",
+ "svc:svc_rate_allocator",
+ "//third_party/libyuv",
+ ]
+ absl_deps = [
+ "//third_party/abseil-cpp/absl/algorithm:container",
+ "//third_party/abseil-cpp/absl/container:inlined_vector",
+ "//third_party/abseil-cpp/absl/memory",
+ "//third_party/abseil-cpp/absl/strings:strings",
+ "//third_party/abseil-cpp/absl/types:optional",
+ ]
+ if (rtc_build_libvpx) {
+ deps += [ rtc_libvpx_dir ]
+ }
+}
+
+if (rtc_include_tests) {
+ if (is_android) {
+ rtc_library("android_codec_factory_helper") {
+ sources = [
+ "codecs/test/android_codec_factory_helper.cc",
+ "codecs/test/android_codec_factory_helper.h",
+ ]
+
+ deps = [
+ "../../api/video_codecs:video_codecs_api",
+ "../../modules/utility:utility",
+ "../../rtc_base:checks",
+ "../../rtc_base:ignore_wundef",
+ "../../sdk/android:internal_jni",
+ "../../sdk/android:native_api_base",
+ "../../sdk/android:native_api_codecs",
+ "../../sdk/android:native_api_jni",
+ ]
+ }
+ }
+
+ if (is_ios || is_mac) {
+ rtc_library("objc_codec_factory_helper") {
+ sources = [
+ "codecs/test/objc_codec_factory_helper.h",
+ "codecs/test/objc_codec_factory_helper.mm",
+ ]
+
+ deps = [
+ "../../api/video:video_frame",
+ "../../api/video:video_rtp_headers",
+ "../../api/video_codecs:video_codecs_api",
+ "../../media:rtc_audio_video",
+ "../../media:rtc_media_base",
+ "../../sdk:native_api",
+ "../../sdk:peerconnectionfactory_base_objc",
+ "../../sdk:videocodec_objc",
+ "../../sdk:videosource_objc",
+ "../../sdk:videotoolbox_objc",
+ ]
+ }
+ }
+
+ rtc_library("encoded_video_frame_producer") {
+ testonly = true
+ sources = [
+ "codecs/test/encoded_video_frame_producer.cc",
+ "codecs/test/encoded_video_frame_producer.h",
+ ]
+ deps = [
+ ":video_codec_interface",
+ "../../api:create_frame_generator",
+ "../../api:frame_generator_api",
+ "../../api/transport/rtp:dependency_descriptor",
+ "../../api/video:encoded_image",
+ "../../api/video:video_frame",
+ "../../api/video:video_frame_type",
+ "../../api/video_codecs:video_codecs_api",
+ "../../rtc_base:checks",
+ ]
+ }
+
+ rtc_library("simulcast_test_fixture_impl") {
+ testonly = true
+ sources = [
+ "utility/simulcast_test_fixture_impl.cc",
+ "utility/simulcast_test_fixture_impl.h",
+ ]
+
+ deps = [
+ ":video_codec_interface",
+ ":video_coding",
+ ":video_coding_utility",
+ "../../api:mock_video_decoder",
+ "../../api:mock_video_encoder",
+ "../../api:simulcast_test_fixture_api",
+ "../../api/video:encoded_image",
+ "../../api/video:video_frame",
+ "../../api/video:video_rtp_headers",
+ "../../api/video_codecs:video_codecs_api",
+ "../../common_video",
+ "../../rtc_base:checks",
+ "../../test:test_support",
+ ]
+ }
+
+ rtc_library("video_codecs_test_framework") {
+ testonly = true
+ sources = [
+ "codecs/test/video_codec_analyzer.cc",
+ "codecs/test/video_codec_analyzer.h",
+ "codecs/test/video_codec_unittest.cc",
+ "codecs/test/video_codec_unittest.h",
+ "codecs/test/videoprocessor.cc",
+ "codecs/test/videoprocessor.h",
+ ]
+
+ deps = [
+ ":codec_globals_headers",
+ ":video_codec_interface",
+ ":video_coding",
+ ":video_coding_utility",
+ ":videocodec_test_stats_impl",
+ ":webrtc_vp9_helpers",
+ "..:module_api",
+ "../../api:create_frame_generator",
+ "../../api:frame_generator_api",
+ "../../api:scoped_refptr",
+ "../../api:sequence_checker",
+ "../../api:video_codec_tester_api",
+ "../../api:videocodec_test_fixture_api",
+ "../../api/task_queue",
+ "../../api/task_queue:default_task_queue_factory",
+ "../../api/video:builtin_video_bitrate_allocator_factory",
+ "../../api/video:encoded_image",
+ "../../api/video:resolution",
+ "../../api/video:video_bitrate_allocation",
+ "../../api/video:video_bitrate_allocator",
+ "../../api/video:video_bitrate_allocator_factory",
+ "../../api/video:video_codec_constants",
+ "../../api/video:video_frame",
+ "../../api/video:video_rtp_headers",
+ "../../api/video_codecs:video_codecs_api",
+ "../../common_video",
+ "../../rtc_base:buffer",
+ "../../rtc_base:checks",
+ "../../rtc_base:macromagic",
+ "../../rtc_base:rtc_event",
+ "../../rtc_base:task_queue_for_test",
+ "../../rtc_base:timeutils",
+ "../../rtc_base/synchronization:mutex",
+ "../../rtc_base/system:no_unique_address",
+ "../../test:test_support",
+ "../../test:video_test_common",
+ "../../test:video_test_support",
+ "../rtp_rtcp:rtp_rtcp_format",
+ "//third_party/libyuv",
+ ]
+ absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ]
+ }
+
+ video_coding_modules_tests_resources = []
+ if (is_android) {
+ video_coding_modules_tests_resources += [
+ "../../resources/foreman_128x96.yuv",
+ "../../resources/foreman_160x120.yuv",
+ "../../resources/foreman_176x144.yuv",
+ "../../resources/foreman_240x136.yuv",
+ "../../resources/foreman_320x240.yuv",
+ "../../resources/foreman_480x272.yuv",
+ ]
+ }
+ if (!is_android) {
+ video_coding_modules_tests_resources += [
+ "../../resources/ConferenceMotion_1280_720_50.yuv",
+ "../../resources/FourPeople_1280x720_30.yuv",
+ ]
+ }
+
+ num_video_coding_modules_tests_resources = 0
+ foreach(i, video_coding_modules_tests_resources) {
+ num_video_coding_modules_tests_resources += 1
+ }
+
+ if (num_video_coding_modules_tests_resources > 0) {
+ if (is_ios || is_mac) {
+ bundle_data("video_coding_modules_tests_resources_bundle_data") {
+ testonly = true
+ sources = video_coding_modules_tests_resources
+ outputs = [ "{{bundle_resources_dir}}/{{source_file_part}}" ]
+ }
+ }
+ }
+
+ rtc_library("videocodec_test_impl") {
+ testonly = true
+ sources = [
+ "codecs/test/video_codec_tester_impl.cc",
+ "codecs/test/video_codec_tester_impl.h",
+ "codecs/test/videocodec_test_fixture_impl.cc",
+ "codecs/test/videocodec_test_fixture_impl.h",
+ ]
+ deps = [
+ ":codec_globals_headers",
+ ":video_codec_interface",
+ ":video_codecs_test_framework",
+ ":video_coding_utility",
+ ":videocodec_test_stats_impl",
+ ":webrtc_vp9_helpers",
+ "../../api:array_view",
+ "../../api:video_codec_tester_api",
+ "../../api:videocodec_test_fixture_api",
+ "../../api/task_queue:default_task_queue_factory",
+ "../../api/task_queue:task_queue",
+ "../../api/test/metrics:global_metrics_logger_and_exporter",
+ "../../api/test/metrics:metric",
+ "../../api/test/video:function_video_factory",
+ "../../api/transport:field_trial_based_config",
+ "../../api/units:frequency",
+ "../../api/units:time_delta",
+ "../../api/units:timestamp",
+ "../../api/video:encoded_image",
+ "../../api/video:video_bitrate_allocation",
+ "../../api/video:video_frame",
+ "../../api/video_codecs:video_codecs_api",
+ "../../api/video_codecs:video_decoder_factory_template",
+ "../../api/video_codecs:video_decoder_factory_template_dav1d_adapter",
+ "../../api/video_codecs:video_decoder_factory_template_libvpx_vp8_adapter",
+ "../../api/video_codecs:video_decoder_factory_template_libvpx_vp9_adapter",
+ "../../api/video_codecs:video_decoder_factory_template_open_h264_adapter",
+ "../../api/video_codecs:video_encoder_factory_template",
+ "../../api/video_codecs:video_encoder_factory_template_libaom_av1_adapter",
+ "../../api/video_codecs:video_encoder_factory_template_libvpx_vp8_adapter",
+ "../../api/video_codecs:video_encoder_factory_template_libvpx_vp9_adapter",
+ "../../api/video_codecs:video_encoder_factory_template_open_h264_adapter",
+ "../../call:video_stream_api",
+ "../../common_video",
+ "../../media:media_constants",
+ "../../media:rtc_audio_video",
+ "../../media:rtc_media_base",
+ "../../rtc_base:checks",
+ "../../rtc_base:logging",
+ "../../rtc_base:rtc_base_tests_utils",
+ "../../rtc_base:rtc_event",
+ "../../rtc_base:stringutils",
+ "../../rtc_base:task_queue_for_test",
+ "../../rtc_base:timeutils",
+ "../../system_wrappers",
+ "../../test:fileutils",
+ "../../test:test_support",
+ "../../test:video_test_common",
+ "../../test:video_test_support",
+ "../../video/config:encoder_config",
+ "../../video/config:streams_config",
+ ]
+ absl_deps = [
+ "//third_party/abseil-cpp/absl/strings:strings",
+ "//third_party/abseil-cpp/absl/types:optional",
+ ]
+ }
+
+ rtc_library("videocodec_test_stats_impl") {
+ testonly = true
+ sources = [
+ "codecs/test/videocodec_test_stats_impl.cc",
+ "codecs/test/videocodec_test_stats_impl.h",
+ ]
+ deps = [
+ "../../api:videocodec_test_stats_api",
+ "../../api/numerics",
+ "../../rtc_base:checks",
+ "../../rtc_base:rtc_numerics",
+ "../../rtc_base:stringutils",
+ "../../test:test_common",
+ "../rtp_rtcp:rtp_rtcp_format",
+ ]
+ }
+
+ rtc_library("video_coding_modules_tests") {
+ testonly = true
+ defines = []
+
+ sources = [
+ "codecs/h264/test/h264_impl_unittest.cc",
+ "codecs/multiplex/test/multiplex_adapter_unittest.cc",
+ "codecs/test/video_codec_test.cc",
+ "codecs/test/video_encoder_decoder_instantiation_tests.cc",
+ "codecs/test/videocodec_test_av1.cc",
+ "codecs/test/videocodec_test_libvpx.cc",
+ "codecs/vp8/test/vp8_impl_unittest.cc",
+ ]
+
+ if (rtc_libvpx_build_vp9) {
+ sources += [ "codecs/vp9/test/vp9_impl_unittest.cc" ]
+ }
+
+ if (rtc_use_h264) {
+ sources += [ "codecs/test/videocodec_test_openh264.cc" ]
+ }
+
+ deps = [
+ ":encoded_video_frame_producer",
+ ":mock_libvpx_interface",
+ ":video_codec_interface",
+ ":video_codecs_test_framework",
+ ":video_coding_utility",
+ ":videocodec_test_impl",
+ ":webrtc_h264",
+ ":webrtc_libvpx_interface",
+ ":webrtc_multiplex",
+ ":webrtc_vp8",
+ ":webrtc_vp9",
+ ":webrtc_vp9_helpers",
+ "../../api:create_frame_generator",
+ "../../api:create_video_codec_tester_api",
+ "../../api:create_videocodec_test_fixture_api",
+ "../../api:frame_generator_api",
+ "../../api:mock_video_codec_factory",
+ "../../api:mock_video_decoder",
+ "../../api:mock_video_encoder",
+ "../../api:scoped_refptr",
+ "../../api:video_codec_tester_api",
+ "../../api:videocodec_test_fixture_api",
+ "../../api:videocodec_test_stats_api",
+ "../../api/test/video:function_video_factory",
+ "../../api/units:data_rate",
+ "../../api/units:frequency",
+ "../../api/video:encoded_image",
+ "../../api/video:resolution",
+ "../../api/video:video_frame",
+ "../../api/video:video_rtp_headers",
+ "../../api/video_codecs:builtin_video_decoder_factory",
+ "../../api/video_codecs:builtin_video_encoder_factory",
+ "../../api/video_codecs:rtc_software_fallback_wrappers",
+ "../../api/video_codecs:scalability_mode",
+ "../../api/video_codecs:video_codecs_api",
+ "../../common_video",
+ "../../common_video/test:utilities",
+ "../../media:codec",
+ "../../media:media_constants",
+ "../../media:rtc_internal_video_codecs",
+ "../../media:rtc_media_base",
+ "../../media:rtc_simulcast_encoder_adapter",
+ "../../rtc_base:refcount",
+ "../../rtc_base:stringutils",
+ "../../rtc_base:timeutils",
+ "../../test:explicit_key_value_config",
+ "../../test:field_trial",
+ "../../test:fileutils",
+ "../../test:test_support",
+ "../../test:video_test_common",
+ "../../test:video_test_support",
+ "../rtp_rtcp:rtp_rtcp_format",
+ "codecs/av1:dav1d_decoder",
+ "svc:scalability_mode_util",
+ "//third_party/libyuv",
+ ]
+ absl_deps = [
+ "//third_party/abseil-cpp/absl/functional:any_invocable",
+ "//third_party/abseil-cpp/absl/memory",
+ "//third_party/abseil-cpp/absl/types:optional",
+ ]
+
+ data = video_coding_modules_tests_resources
+
+ if (is_android) {
+ sources += [ "codecs/test/videocodec_test_mediacodec.cc" ]
+
+ deps += [
+ ":android_codec_factory_helper",
+ "../../rtc_base:stringutils",
+ ]
+ }
+
+ if (is_ios || is_mac) {
+ sources += [ "codecs/test/videocodec_test_videotoolbox.cc" ]
+
+ deps += [ ":objc_codec_factory_helper" ]
+
+ if (num_video_coding_modules_tests_resources > 0) {
+ deps += [ ":video_coding_modules_tests_resources_bundle_data" ]
+ }
+ }
+
+ if (rtc_build_libvpx) {
+ deps += [ rtc_libvpx_dir ]
+ }
+ }
+
+ rtc_library("video_coding_unittests") {
+ testonly = true
+
+ sources = [
+ "chain_diff_calculator_unittest.cc",
+ "codecs/test/video_codec_analyzer_unittest.cc",
+ "codecs/test/video_codec_tester_impl_unittest.cc",
+ "codecs/test/videocodec_test_fixture_config_unittest.cc",
+ "codecs/test/videocodec_test_stats_impl_unittest.cc",
+ "codecs/test/videoprocessor_unittest.cc",
+ "codecs/vp8/default_temporal_layers_unittest.cc",
+ "codecs/vp8/libvpx_vp8_simulcast_test.cc",
+ "codecs/vp8/screenshare_layers_unittest.cc",
+ "codecs/vp9/svc_config_unittest.cc",
+ "decoder_database_unittest.cc",
+ "decoding_state_unittest.cc",
+ "fec_controller_unittest.cc",
+ "frame_buffer2_unittest.cc",
+ "frame_dependencies_calculator_unittest.cc",
+ "frame_helpers_unittest.cc",
+ "generic_decoder_unittest.cc",
+ "h264_packet_buffer_unittest.cc",
+ "h264_sprop_parameter_sets_unittest.cc",
+ "h264_sps_pps_tracker_unittest.cc",
+ "histogram_unittest.cc",
+ "jitter_buffer_unittest.cc",
+ "loss_notification_controller_unittest.cc",
+ "nack_requester_unittest.cc",
+ "packet_buffer_unittest.cc",
+ "receiver_unittest.cc",
+ "rtp_frame_reference_finder_unittest.cc",
+ "rtp_vp8_ref_finder_unittest.cc",
+ "rtp_vp9_ref_finder_unittest.cc",
+ "session_info_unittest.cc",
+ "test/stream_generator.cc",
+ "test/stream_generator.h",
+ "utility/bandwidth_quality_scaler_unittest.cc",
+ "utility/decoded_frames_history_unittest.cc",
+ "utility/frame_dropper_unittest.cc",
+ "utility/framerate_controller_deprecated_unittest.cc",
+ "utility/ivf_file_reader_unittest.cc",
+ "utility/ivf_file_writer_unittest.cc",
+ "utility/qp_parser_unittest.cc",
+ "utility/quality_scaler_unittest.cc",
+ "utility/simulcast_rate_allocator_unittest.cc",
+ "utility/vp9_uncompressed_header_parser_unittest.cc",
+ "video_codec_initializer_unittest.cc",
+ "video_receiver2_unittest.cc",
+ "video_receiver_unittest.cc",
+ ]
+ if (rtc_use_h264) {
+ sources += [
+ "codecs/h264/h264_encoder_impl_unittest.cc",
+ "codecs/h264/h264_simulcast_unittest.cc",
+ ]
+ }
+
+ deps = [
+ ":chain_diff_calculator",
+ ":codec_globals_headers",
+ ":encoded_frame",
+ ":frame_buffer2",
+ ":frame_dependencies_calculator",
+ ":frame_helpers",
+ ":h264_packet_buffer",
+ ":nack_requester",
+ ":packet_buffer",
+ ":simulcast_test_fixture_impl",
+ ":video_codec_interface",
+ ":video_codecs_test_framework",
+ ":video_coding",
+ ":video_coding_legacy",
+ ":video_coding_utility",
+ ":videocodec_test_impl",
+ ":videocodec_test_stats_impl",
+ ":webrtc_h264",
+ ":webrtc_vp8",
+ ":webrtc_vp8_temporal_layers",
+ ":webrtc_vp9",
+ ":webrtc_vp9_helpers",
+ "..:module_fec_api",
+ "../../api:array_view",
+ "../../api:create_simulcast_test_fixture_api",
+ "../../api:fec_controller_api",
+ "../../api:mock_fec_controller_override",
+ "../../api:mock_video_decoder",
+ "../../api:mock_video_encoder",
+ "../../api:rtp_packet_info",
+ "../../api:scoped_refptr",
+ "../../api:simulcast_test_fixture_api",
+ "../../api:video_codec_tester_api",
+ "../../api:videocodec_test_fixture_api",
+ "../../api/task_queue",
+ "../../api/task_queue:default_task_queue_factory",
+ "../../api/task_queue/test:mock_task_queue_base",
+ "../../api/test/video:function_video_factory",
+ "../../api/units:data_size",
+ "../../api/units:frequency",
+ "../../api/units:time_delta",
+ "../../api/units:timestamp",
+ "../../api/video:builtin_video_bitrate_allocator_factory",
+ "../../api/video:encoded_frame",
+ "../../api/video:encoded_image",
+ "../../api/video:render_resolution",
+ "../../api/video:video_adaptation",
+ "../../api/video:video_bitrate_allocation",
+ "../../api/video:video_bitrate_allocator",
+ "../../api/video:video_bitrate_allocator_factory",
+ "../../api/video:video_frame",
+ "../../api/video:video_frame_type",
+ "../../api/video:video_rtp_headers",
+ "../../api/video_codecs:video_codecs_api",
+ "../../api/video_codecs:vp8_temporal_layers_factory",
+ "../../common_video",
+ "../../common_video/generic_frame_descriptor",
+ "../../common_video/test:utilities",
+ "../../media:media_constants",
+ "../../media:rtc_media_base",
+ "../../rtc_base:checks",
+ "../../rtc_base:gunit_helpers",
+ "../../rtc_base:histogram_percentile_counter",
+ "../../rtc_base:platform_thread",
+ "../../rtc_base:random",
+ "../../rtc_base:refcount",
+ "../../rtc_base:rtc_base_tests_utils",
+ "../../rtc_base:rtc_event",
+ "../../rtc_base:rtc_numerics",
+ "../../rtc_base:stringutils",
+ "../../rtc_base:task_queue_for_test",
+ "../../rtc_base:timeutils",
+ "../../rtc_base/experiments:encoder_info_settings",
+ "../../rtc_base/synchronization:mutex",
+ "../../rtc_base/system:unused",
+ "../../system_wrappers",
+ "../../system_wrappers:field_trial",
+ "../../system_wrappers:metrics",
+ "../../test:fake_encoded_frame",
+ "../../test:fake_video_codecs",
+ "../../test:field_trial",
+ "../../test:fileutils",
+ "../../test:run_loop",
+ "../../test:scoped_key_value_config",
+ "../../test:test_support",
+ "../../test:video_test_common",
+ "../../test:video_test_support",
+ "../../test/time_controller:time_controller",
+ "../../third_party/libyuv:libyuv",
+ "../rtp_rtcp:rtp_rtcp_format",
+ "../rtp_rtcp:rtp_video_header",
+ "codecs/av1:video_coding_codecs_av1_tests",
+ "svc:scalability_structure_tests",
+ "svc:svc_rate_allocator_tests",
+ "timing:jitter_estimator",
+ "timing:timing_module",
+ ]
+ absl_deps = [
+ "//third_party/abseil-cpp/absl/memory",
+ "//third_party/abseil-cpp/absl/types:optional",
+ "//third_party/abseil-cpp/absl/types:variant",
+ ]
+ if (rtc_build_libvpx) {
+ deps += [ rtc_libvpx_dir ]
+ }
+ }
+}
diff --git a/third_party/libwebrtc/modules/video_coding/DEPS b/third_party/libwebrtc/modules/video_coding/DEPS
new file mode 100644
index 0000000000..d62707c2f9
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/DEPS
@@ -0,0 +1,25 @@
+include_rules = [
+ "+vpx",
+ "+call",
+ "+common_video",
+ "+sdk",
+ "+system_wrappers",
+ "+rtc_tools",
+ "+third_party/libyuv",
+ "+rtc_base/system/rtc_export.h",
+ "+video/config",
+]
+
+specific_include_rules = {
+ "android_codec_factory_helper\.cc": [
+ "+base/android",
+ ],
+ "multiplex_encoder_adapter\.cc": [
+ "+media/base",
+ ],
+ ".*test.*\.cc": [
+ "+media/base",
+ "+media/engine",
+ "+video/config",
+ ],
+}
diff --git a/third_party/libwebrtc/modules/video_coding/OWNERS b/third_party/libwebrtc/modules/video_coding/OWNERS
new file mode 100644
index 0000000000..2e4d968c98
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/OWNERS
@@ -0,0 +1,7 @@
+asapersson@webrtc.org
+brandtr@webrtc.org
+ilnik@webrtc.org
+marpan@webrtc.org
+philipel@webrtc.org
+sprang@webrtc.org
+stefan@webrtc.org
diff --git a/third_party/libwebrtc/modules/video_coding/chain_diff_calculator.cc b/third_party/libwebrtc/modules/video_coding/chain_diff_calculator.cc
new file mode 100644
index 0000000000..5f852717b5
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/chain_diff_calculator.cc
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include "modules/video_coding/chain_diff_calculator.h"
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <algorithm>
+#include <vector>
+
+#include "absl/container/inlined_vector.h"
+#include "absl/types/optional.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+
+void ChainDiffCalculator::Reset(const std::vector<bool>& chains) {
+ last_frame_in_chain_.resize(chains.size());
+ for (size_t i = 0; i < chains.size(); ++i) {
+ if (chains[i]) {
+ last_frame_in_chain_[i] = absl::nullopt;
+ }
+ }
+}
+
+absl::InlinedVector<int, 4> ChainDiffCalculator::ChainDiffs(
+ int64_t frame_id) const {
+ absl::InlinedVector<int, 4> result;
+ result.reserve(last_frame_in_chain_.size());
+ for (const auto& frame_id_in_chain : last_frame_in_chain_) {
+ result.push_back(frame_id_in_chain ? (frame_id - *frame_id_in_chain) : 0);
+ }
+ return result;
+}
+
+absl::InlinedVector<int, 4> ChainDiffCalculator::From(
+ int64_t frame_id,
+ const std::vector<bool>& chains) {
+ auto result = ChainDiffs(frame_id);
+ if (chains.size() != last_frame_in_chain_.size()) {
+ RTC_LOG(LS_ERROR) << "Insconsistent chain configuration for frame#"
+ << frame_id << ": expected "
+ << last_frame_in_chain_.size() << " chains, found "
+ << chains.size();
+ }
+ size_t num_chains = std::min(last_frame_in_chain_.size(), chains.size());
+ for (size_t i = 0; i < num_chains; ++i) {
+ if (chains[i]) {
+ last_frame_in_chain_[i] = frame_id;
+ }
+ }
+ return result;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/chain_diff_calculator.h b/third_party/libwebrtc/modules/video_coding/chain_diff_calculator.h
new file mode 100644
index 0000000000..bca7340c6f
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/chain_diff_calculator.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_CHAIN_DIFF_CALCULATOR_H_
+#define MODULES_VIDEO_CODING_CHAIN_DIFF_CALCULATOR_H_
+
+#include <stdint.h>
+
+#include <vector>
+
+#include "absl/container/inlined_vector.h"
+#include "absl/types/optional.h"
+
+namespace webrtc {
+
+// This class is thread compatible.
+class ChainDiffCalculator {
+ public:
+ ChainDiffCalculator() = default;
+ ChainDiffCalculator(const ChainDiffCalculator&) = default;
+ ChainDiffCalculator& operator=(const ChainDiffCalculator&) = default;
+
+ // Restarts chains, i.e. for position where chains[i] == true next chain_diff
+ // will be 0. Saves chains.size() as number of chains in the stream.
+ void Reset(const std::vector<bool>& chains);
+
+ // Returns chain diffs based on flags if frame is part of the chain.
+ absl::InlinedVector<int, 4> From(int64_t frame_id,
+ const std::vector<bool>& chains);
+
+ private:
+ absl::InlinedVector<int, 4> ChainDiffs(int64_t frame_id) const;
+
+ absl::InlinedVector<absl::optional<int64_t>, 4> last_frame_in_chain_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_CHAIN_DIFF_CALCULATOR_H_
diff --git a/third_party/libwebrtc/modules/video_coding/chain_diff_calculator_gn/moz.build b/third_party/libwebrtc/modules/video_coding/chain_diff_calculator_gn/moz.build
new file mode 100644
index 0000000000..9020f11067
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/chain_diff_calculator_gn/moz.build
@@ -0,0 +1,225 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+DEFINES["WEBRTC_STRICT_FIELD_TRIALS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "!/third_party/libwebrtc/gen",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/video_coding/chain_diff_calculator.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "mips32":
+
+ DEFINES["MIPS32_LE"] = True
+ DEFINES["MIPS_FPU_LE"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "mips64":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("chain_diff_calculator_gn")
diff --git a/third_party/libwebrtc/modules/video_coding/chain_diff_calculator_unittest.cc b/third_party/libwebrtc/modules/video_coding/chain_diff_calculator_unittest.cc
new file mode 100644
index 0000000000..efd09bd888
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/chain_diff_calculator_unittest.cc
@@ -0,0 +1,126 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/chain_diff_calculator.h"
+
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+
+using ::testing::ElementsAre;
+using ::testing::SizeIs;
+
+TEST(ChainDiffCalculatorTest, SingleChain) {
+ // Simulate a stream with 2 temporal layer where chain
+ // protects temporal layer 0.
+ ChainDiffCalculator calculator;
+ // Key frame.
+ calculator.Reset({true});
+ EXPECT_THAT(calculator.From(1, {true}), ElementsAre(0));
+ // T1 delta frame.
+ EXPECT_THAT(calculator.From(2, {false}), ElementsAre(1));
+ // T0 delta frame.
+ EXPECT_THAT(calculator.From(3, {true}), ElementsAre(2));
+}
+
+TEST(ChainDiffCalculatorTest, TwoChainsFullSvc) {
+ // Simulate a full svc stream with 2 spatial and 2 temporal layers.
+ // chains are protecting temporal layers 0.
+ ChainDiffCalculator calculator;
+ // S0 Key frame.
+ calculator.Reset({true, true});
+ EXPECT_THAT(calculator.From(1, {true, true}), ElementsAre(0, 0));
+ // S1 Key frame.
+ EXPECT_THAT(calculator.From(2, {false, true}), ElementsAre(1, 1));
+ // S0T1 delta frame.
+ EXPECT_THAT(calculator.From(3, {false, false}), ElementsAre(2, 1));
+ // S1T1 delta frame.
+ EXPECT_THAT(calculator.From(4, {false, false}), ElementsAre(3, 2));
+ // S0T0 delta frame.
+ EXPECT_THAT(calculator.From(5, {true, true}), ElementsAre(4, 3));
+ // S1T0 delta frame.
+ EXPECT_THAT(calculator.From(6, {false, true}), ElementsAre(1, 1));
+}
+
+TEST(ChainDiffCalculatorTest, TwoChainsKSvc) {
+ // Simulate a k-svc stream with 2 spatial and 2 temporal layers.
+ // chains are protecting temporal layers 0.
+ ChainDiffCalculator calculator;
+ // S0 Key frame.
+ calculator.Reset({true, true});
+ EXPECT_THAT(calculator.From(1, {true, true}), ElementsAre(0, 0));
+ // S1 Key frame.
+ EXPECT_THAT(calculator.From(2, {false, true}), ElementsAre(1, 1));
+ // S0T1 delta frame.
+ EXPECT_THAT(calculator.From(3, {false, false}), ElementsAre(2, 1));
+ // S1T1 delta frame.
+ EXPECT_THAT(calculator.From(4, {false, false}), ElementsAre(3, 2));
+ // S0T0 delta frame.
+ EXPECT_THAT(calculator.From(5, {true, false}), ElementsAre(4, 3));
+ // S1T0 delta frame.
+ EXPECT_THAT(calculator.From(6, {false, true}), ElementsAre(1, 4));
+}
+
+TEST(ChainDiffCalculatorTest, TwoChainsSimulcast) {
+ // Simulate a k-svc stream with 2 spatial and 2 temporal layers.
+ // chains are protecting temporal layers 0.
+ ChainDiffCalculator calculator;
+ // S0 Key frame.
+ calculator.Reset({true, false});
+ EXPECT_THAT(calculator.From(1, {true, false}), ElementsAre(0, 0));
+ // S1 Key frame.
+ calculator.Reset({false, true});
+ EXPECT_THAT(calculator.From(2, {false, true}), ElementsAre(1, 0));
+ // S0T1 delta frame.
+ EXPECT_THAT(calculator.From(3, {false, false}), ElementsAre(2, 1));
+ // S1T1 delta frame.
+ EXPECT_THAT(calculator.From(4, {false, false}), ElementsAre(3, 2));
+ // S0T0 delta frame.
+ EXPECT_THAT(calculator.From(5, {true, false}), ElementsAre(4, 3));
+ // S1T0 delta frame.
+ EXPECT_THAT(calculator.From(6, {false, true}), ElementsAre(1, 4));
+}
+
+TEST(ChainDiffCalculatorTest, ResilentToAbsentChainConfig) {
+ ChainDiffCalculator calculator;
+ // Key frame.
+ calculator.Reset({true, false});
+ EXPECT_THAT(calculator.From(1, {true, false}), ElementsAre(0, 0));
+ // Forgot to set chains. should still return 2 chain_diffs.
+ EXPECT_THAT(calculator.From(2, {}), ElementsAre(1, 0));
+ // chain diffs for next frame(s) are undefined, but still there should be
+ // correct number of them.
+ EXPECT_THAT(calculator.From(3, {true, false}), SizeIs(2));
+ EXPECT_THAT(calculator.From(4, {false, true}), SizeIs(2));
+ // Since previous two frames updated all the chains, can expect what
+ // chain_diffs would be.
+ EXPECT_THAT(calculator.From(5, {false, false}), ElementsAre(2, 1));
+}
+
+TEST(ChainDiffCalculatorTest, ResilentToTooMainChains) {
+ ChainDiffCalculator calculator;
+ // Key frame.
+ calculator.Reset({true, false});
+ EXPECT_THAT(calculator.From(1, {true, false}), ElementsAre(0, 0));
+ // Set wrong number of chains. Expect number of chain_diffs is not changed.
+ EXPECT_THAT(calculator.From(2, {true, true, true}), ElementsAre(1, 0));
+ // chain diffs for next frame(s) are undefined, but still there should be
+ // correct number of them.
+ EXPECT_THAT(calculator.From(3, {true, false}), SizeIs(2));
+ EXPECT_THAT(calculator.From(4, {false, true}), SizeIs(2));
+ // Since previous two frames updated all the chains, can expect what
+ // chain_diffs would be.
+ EXPECT_THAT(calculator.From(5, {false, false}), ElementsAre(2, 1));
+}
+
+} // namespace
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/codec_globals_headers_gn/moz.build b/third_party/libwebrtc/modules/video_coding/codec_globals_headers_gn/moz.build
new file mode 100644
index 0000000000..7c87e340af
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codec_globals_headers_gn/moz.build
@@ -0,0 +1,205 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+DEFINES["WEBRTC_STRICT_FIELD_TRIALS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "!/third_party/libwebrtc/gen",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "mips32":
+
+ DEFINES["MIPS32_LE"] = True
+ DEFINES["MIPS_FPU_LE"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "mips64":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("codec_globals_headers_gn")
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/av1/BUILD.gn b/third_party/libwebrtc/modules/video_coding/codecs/av1/BUILD.gn
new file mode 100644
index 0000000000..610f958ad1
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/av1/BUILD.gn
@@ -0,0 +1,110 @@
+# Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+import("//third_party/libaom/options.gni")
+import("../../../../webrtc.gni")
+
+rtc_library("av1_svc_config") {
+ sources = [
+ "av1_svc_config.cc",
+ "av1_svc_config.h",
+ ]
+ deps = [
+ "../../../../api/video_codecs:video_codecs_api",
+ "../../../../rtc_base:checks",
+ "../../../../rtc_base:logging",
+ "../../../../rtc_base:stringutils",
+ "../../svc:scalability_mode_util",
+ "../../svc:scalability_structures",
+ "../../svc:scalable_video_controller",
+ ]
+
+ absl_deps = [ "//third_party/abseil-cpp/absl/container:inlined_vector" ]
+}
+
+rtc_library("dav1d_decoder") {
+ visibility = [ "*" ]
+ poisonous = [ "software_video_codecs" ]
+ public = [ "dav1d_decoder.h" ]
+ sources = [ "dav1d_decoder.cc" ]
+
+ deps = [
+ "../..:video_codec_interface",
+ "../../../../api:scoped_refptr",
+ "../../../../api/video:encoded_image",
+ "../../../../api/video:video_frame",
+ "../../../../api/video_codecs:video_codecs_api",
+ "../../../../common_video",
+ "../../../../rtc_base:logging",
+ "//third_party/dav1d",
+ "//third_party/libyuv",
+ ]
+ absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ]
+}
+
+rtc_library("libaom_av1_encoder") {
+ visibility = [ "*" ]
+ poisonous = [ "software_video_codecs" ]
+ public = [ "libaom_av1_encoder.h" ]
+ sources = [ "libaom_av1_encoder.cc" ]
+ deps = [
+ "../..:video_codec_interface",
+ "../../../../api:scoped_refptr",
+ "../../../../api/video:encoded_image",
+ "../../../../api/video:video_frame",
+ "../../../../api/video_codecs:scalability_mode",
+ "../../../../api/video_codecs:video_codecs_api",
+ "../../../../common_video",
+ "../../../../rtc_base:checks",
+ "../../../../rtc_base:logging",
+ "../../../../rtc_base:rtc_numerics",
+ "../../svc:scalability_structures",
+ "../../svc:scalable_video_controller",
+ "//third_party/libaom",
+ ]
+ absl_deps = [
+ "//third_party/abseil-cpp/absl/algorithm:container",
+ "//third_party/abseil-cpp/absl/base:core_headers",
+ "//third_party/abseil-cpp/absl/strings:strings",
+ "//third_party/abseil-cpp/absl/types:optional",
+ ]
+}
+
+if (rtc_include_tests) {
+ rtc_library("video_coding_codecs_av1_tests") {
+ testonly = true
+
+ sources = [ "av1_svc_config_unittest.cc" ]
+ deps = [
+ ":av1_svc_config",
+ "../../../../api/video_codecs:video_codecs_api",
+ "../../../../test:test_support",
+ ]
+
+ if (enable_libaom) {
+ sources += [
+ "libaom_av1_encoder_unittest.cc",
+ "libaom_av1_unittest.cc",
+ ]
+ deps += [
+ ":dav1d_decoder",
+ ":libaom_av1_encoder",
+ "../..:encoded_video_frame_producer",
+ "../..:video_codec_interface",
+ "../../../../api:mock_video_encoder",
+ "../../../../api/units:data_size",
+ "../../../../api/units:time_delta",
+ "../../../../api/video:video_frame",
+ "../../svc:scalability_mode_util",
+ "../../svc:scalability_structures",
+ "../../svc:scalable_video_controller",
+ ]
+ absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ]
+ }
+ }
+}
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/av1/DEPS b/third_party/libwebrtc/modules/video_coding/codecs/av1/DEPS
new file mode 100644
index 0000000000..bfb1c733d4
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/av1/DEPS
@@ -0,0 +1,4 @@
+include_rules = [
+ "+third_party/libaom",
+ "+third_party/dav1d",
+]
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/av1/av1_svc_config.cc b/third_party/libwebrtc/modules/video_coding/codecs/av1/av1_svc_config.cc
new file mode 100644
index 0000000000..43dcf96ab7
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/av1/av1_svc_config.cc
@@ -0,0 +1,118 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/codecs/av1/av1_svc_config.h"
+
+#include <algorithm>
+#include <cmath>
+#include <memory>
+
+#include "modules/video_coding/svc/create_scalability_structure.h"
+#include "modules/video_coding/svc/scalability_mode_util.h"
+#include "modules/video_coding/svc/scalable_video_controller.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/strings/string_builder.h"
+
+namespace webrtc {
+namespace {
+absl::optional<ScalabilityMode> BuildScalabilityMode(int num_temporal_layers,
+ int num_spatial_layers) {
+ char name[20];
+ rtc::SimpleStringBuilder ss(name);
+ ss << "L" << num_spatial_layers << "T" << num_temporal_layers;
+ if (num_spatial_layers > 1) {
+ ss << "_KEY";
+ }
+
+ return ScalabilityModeFromString(name);
+}
+} // namespace
+
+absl::InlinedVector<ScalabilityMode, kScalabilityModeCount>
+LibaomAv1EncoderSupportedScalabilityModes() {
+ absl::InlinedVector<ScalabilityMode, kScalabilityModeCount> scalability_modes;
+ for (ScalabilityMode scalability_mode : kAllScalabilityModes) {
+ if (ScalabilityStructureConfig(scalability_mode) != absl::nullopt) {
+ scalability_modes.push_back(scalability_mode);
+ }
+ }
+ return scalability_modes;
+}
+
+bool LibaomAv1EncoderSupportsScalabilityMode(ScalabilityMode scalability_mode) {
+ // For libaom AV1, the scalability mode is supported if we can create the
+ // scalability structure.
+ return ScalabilityStructureConfig(scalability_mode) != absl::nullopt;
+}
+
+bool SetAv1SvcConfig(VideoCodec& video_codec,
+ int num_temporal_layers,
+ int num_spatial_layers) {
+ RTC_DCHECK_EQ(video_codec.codecType, kVideoCodecAV1);
+
+ absl::optional<ScalabilityMode> scalability_mode =
+ video_codec.GetScalabilityMode();
+ if (!scalability_mode.has_value()) {
+ scalability_mode =
+ BuildScalabilityMode(num_temporal_layers, num_spatial_layers);
+ if (!scalability_mode) {
+ RTC_LOG(LS_WARNING) << "Scalability mode is not set, using 'L1T1'.";
+ scalability_mode = ScalabilityMode::kL1T1;
+ }
+ }
+
+ std::unique_ptr<ScalableVideoController> structure =
+ CreateScalabilityStructure(*scalability_mode);
+ if (structure == nullptr) {
+ RTC_LOG(LS_WARNING) << "Failed to create structure "
+ << static_cast<int>(*scalability_mode);
+ return false;
+ }
+
+ video_codec.SetScalabilityMode(*scalability_mode);
+
+ ScalableVideoController::StreamLayersConfig info = structure->StreamConfig();
+ for (int sl_idx = 0; sl_idx < info.num_spatial_layers; ++sl_idx) {
+ SpatialLayer& spatial_layer = video_codec.spatialLayers[sl_idx];
+ spatial_layer.width = video_codec.width * info.scaling_factor_num[sl_idx] /
+ info.scaling_factor_den[sl_idx];
+ spatial_layer.height = video_codec.height *
+ info.scaling_factor_num[sl_idx] /
+ info.scaling_factor_den[sl_idx];
+ spatial_layer.maxFramerate = video_codec.maxFramerate;
+ spatial_layer.numberOfTemporalLayers = info.num_temporal_layers;
+ spatial_layer.active = true;
+ }
+
+ if (info.num_spatial_layers == 1) {
+ SpatialLayer& spatial_layer = video_codec.spatialLayers[0];
+ spatial_layer.minBitrate = video_codec.minBitrate;
+ spatial_layer.maxBitrate = video_codec.maxBitrate;
+ spatial_layer.targetBitrate =
+ (video_codec.minBitrate + video_codec.maxBitrate) / 2;
+ return true;
+ }
+
+ for (int sl_idx = 0; sl_idx < info.num_spatial_layers; ++sl_idx) {
+ SpatialLayer& spatial_layer = video_codec.spatialLayers[sl_idx];
+ // minBitrate and maxBitrate formulas are copied from vp9 settings and
+ // are not yet tuned for av1.
+ const int num_pixels = spatial_layer.width * spatial_layer.height;
+ int min_bitrate_kbps = (600.0 * std::sqrt(num_pixels) - 95'000.0) / 1000.0;
+ spatial_layer.minBitrate = std::max(min_bitrate_kbps, 20);
+ spatial_layer.maxBitrate = 50 + static_cast<int>(1.6 * num_pixels / 1000.0);
+ spatial_layer.targetBitrate =
+ (spatial_layer.minBitrate + spatial_layer.maxBitrate) / 2;
+ }
+ return true;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/av1/av1_svc_config.h b/third_party/libwebrtc/modules/video_coding/codecs/av1/av1_svc_config.h
new file mode 100644
index 0000000000..05b886b9f4
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/av1/av1_svc_config.h
@@ -0,0 +1,32 @@
+/* Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_CODECS_AV1_AV1_SVC_CONFIG_H_
+#define MODULES_VIDEO_CODING_CODECS_AV1_AV1_SVC_CONFIG_H_
+
+#include <vector>
+
+#include "absl/container/inlined_vector.h"
+#include "api/video_codecs/video_codec.h"
+
+namespace webrtc {
+
+absl::InlinedVector<ScalabilityMode, kScalabilityModeCount>
+LibaomAv1EncoderSupportedScalabilityModes();
+
+bool LibaomAv1EncoderSupportsScalabilityMode(ScalabilityMode scalability_mode);
+
+// Fills `video_codec.spatialLayers` using other members.
+bool SetAv1SvcConfig(VideoCodec& video_codec,
+ int num_temporal_layers,
+ int num_spatial_layers);
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_CODECS_AV1_AV1_SVC_CONFIG_H_
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/av1/av1_svc_config_gn/moz.build b/third_party/libwebrtc/modules/video_coding/codecs/av1/av1_svc_config_gn/moz.build
new file mode 100644
index 0000000000..cc297017e3
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/av1/av1_svc_config_gn/moz.build
@@ -0,0 +1,225 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+DEFINES["WEBRTC_STRICT_FIELD_TRIALS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "!/third_party/libwebrtc/gen",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/video_coding/codecs/av1/av1_svc_config.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "mips32":
+
+ DEFINES["MIPS32_LE"] = True
+ DEFINES["MIPS_FPU_LE"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "mips64":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("av1_svc_config_gn")
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/av1/av1_svc_config_unittest.cc b/third_party/libwebrtc/modules/video_coding/codecs/av1/av1_svc_config_unittest.cc
new file mode 100644
index 0000000000..9f1da9865c
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/av1/av1_svc_config_unittest.cc
@@ -0,0 +1,171 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/codecs/av1/av1_svc_config.h"
+
+#include "api/video_codecs/video_codec.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+constexpr int kDontCare = 0;
+
+TEST(Av1SvcConfigTest, TreatsEmptyAsL1T1) {
+ VideoCodec video_codec;
+ video_codec.codecType = kVideoCodecAV1;
+
+ EXPECT_TRUE(SetAv1SvcConfig(video_codec, /*num_temporal_layers=*/kDontCare,
+ /*num_spatial_layers=*/kDontCare));
+
+ EXPECT_TRUE(video_codec.spatialLayers[0].active);
+ EXPECT_EQ(video_codec.spatialLayers[0].numberOfTemporalLayers, 1);
+ EXPECT_FALSE(video_codec.spatialLayers[1].active);
+}
+
+TEST(Av1SvcConfigTest, ScalabilityModeFromNumberOfTemporalLayers) {
+ VideoCodec video_codec;
+ video_codec.codecType = kVideoCodecAV1;
+
+ EXPECT_TRUE(SetAv1SvcConfig(video_codec, /*num_temporal_layers=*/3,
+ /*num_spatial_layers=*/1));
+ EXPECT_EQ(video_codec.spatialLayers[0].numberOfTemporalLayers, 3);
+}
+
+TEST(Av1SvcConfigTest, ScalabilityModeFromNumberOfSpatialLayers) {
+ VideoCodec video_codec;
+ video_codec.codecType = kVideoCodecAV1;
+
+ EXPECT_TRUE(SetAv1SvcConfig(video_codec, /*num_temporal_layers=*/3,
+ /*num_spatial_layers=*/2));
+ EXPECT_EQ(video_codec.spatialLayers[0].numberOfTemporalLayers, 3);
+ EXPECT_TRUE(video_codec.spatialLayers[0].active);
+ EXPECT_TRUE(video_codec.spatialLayers[1].active);
+ EXPECT_FALSE(video_codec.spatialLayers[2].active);
+}
+
+TEST(Av1SvcConfigTest, SetsActiveSpatialLayersFromScalabilityMode) {
+ VideoCodec video_codec;
+ video_codec.codecType = kVideoCodecAV1;
+ video_codec.SetScalabilityMode(ScalabilityMode::kL2T1);
+
+ EXPECT_TRUE(SetAv1SvcConfig(video_codec, /*num_temporal_layers=*/kDontCare,
+ /*num_spatial_layers=*/kDontCare));
+
+ EXPECT_TRUE(video_codec.spatialLayers[0].active);
+ EXPECT_TRUE(video_codec.spatialLayers[1].active);
+ EXPECT_FALSE(video_codec.spatialLayers[2].active);
+}
+
+TEST(Av1SvcConfigTest, ConfiguresDobuleResolutionRatioFromScalabilityMode) {
+ VideoCodec video_codec;
+ video_codec.codecType = kVideoCodecAV1;
+ video_codec.SetScalabilityMode(ScalabilityMode::kL2T1);
+ video_codec.width = 1200;
+ video_codec.height = 800;
+
+ EXPECT_TRUE(SetAv1SvcConfig(video_codec, /*num_temporal_layers=*/kDontCare,
+ /*num_spatial_layers=*/kDontCare));
+
+ EXPECT_EQ(video_codec.spatialLayers[0].width, 600);
+ EXPECT_EQ(video_codec.spatialLayers[0].height, 400);
+ EXPECT_EQ(video_codec.spatialLayers[1].width, 1200);
+ EXPECT_EQ(video_codec.spatialLayers[1].height, 800);
+}
+
+TEST(Av1SvcConfigTest, ConfiguresSmallResolutionRatioFromScalabilityMode) {
+ VideoCodec video_codec;
+ video_codec.codecType = kVideoCodecAV1;
+ // h mode uses 1.5:1 ratio
+ video_codec.SetScalabilityMode(ScalabilityMode::kL2T1h);
+ video_codec.width = 1500;
+ video_codec.height = 900;
+
+ EXPECT_TRUE(SetAv1SvcConfig(video_codec, /*num_temporal_layers=*/kDontCare,
+ /*num_spatial_layers=*/kDontCare));
+
+ EXPECT_EQ(video_codec.spatialLayers[0].width, 1000);
+ EXPECT_EQ(video_codec.spatialLayers[0].height, 600);
+ EXPECT_EQ(video_codec.spatialLayers[1].width, 1500);
+ EXPECT_EQ(video_codec.spatialLayers[1].height, 900);
+}
+
+TEST(Av1SvcConfigTest, CopiesFramrate) {
+ VideoCodec video_codec;
+ video_codec.codecType = kVideoCodecAV1;
+ // h mode uses 1.5:1 ratio
+ video_codec.SetScalabilityMode(ScalabilityMode::kL2T1);
+ video_codec.maxFramerate = 27;
+
+ EXPECT_TRUE(SetAv1SvcConfig(video_codec, /*num_temporal_layers=*/kDontCare,
+ /*num_spatial_layers=*/kDontCare));
+
+ EXPECT_EQ(video_codec.spatialLayers[0].maxFramerate, 27);
+ EXPECT_EQ(video_codec.spatialLayers[1].maxFramerate, 27);
+}
+
+TEST(Av1SvcConfigTest, SetsNumberOfTemporalLayers) {
+ VideoCodec video_codec;
+ video_codec.codecType = kVideoCodecAV1;
+ video_codec.SetScalabilityMode(ScalabilityMode::kL1T3);
+
+ EXPECT_TRUE(SetAv1SvcConfig(video_codec, /*num_temporal_layers=*/kDontCare,
+ /*num_spatial_layers=*/kDontCare));
+
+ EXPECT_EQ(video_codec.spatialLayers[0].numberOfTemporalLayers, 3);
+}
+
+TEST(Av1SvcConfigTest, CopiesMinMaxBitrateForSingleSpatialLayer) {
+ VideoCodec video_codec;
+ video_codec.codecType = kVideoCodecAV1;
+ video_codec.SetScalabilityMode(ScalabilityMode::kL1T3);
+ video_codec.minBitrate = 100;
+ video_codec.maxBitrate = 500;
+
+ EXPECT_TRUE(SetAv1SvcConfig(video_codec, /*num_temporal_layers=*/kDontCare,
+ /*num_spatial_layers=*/kDontCare));
+
+ EXPECT_EQ(video_codec.spatialLayers[0].minBitrate, 100u);
+ EXPECT_EQ(video_codec.spatialLayers[0].maxBitrate, 500u);
+ EXPECT_LE(video_codec.spatialLayers[0].minBitrate,
+ video_codec.spatialLayers[0].targetBitrate);
+ EXPECT_LE(video_codec.spatialLayers[0].targetBitrate,
+ video_codec.spatialLayers[0].maxBitrate);
+}
+
+TEST(Av1SvcConfigTest, SetsBitratesForMultipleSpatialLayers) {
+ VideoCodec video_codec;
+ video_codec.codecType = kVideoCodecAV1;
+ video_codec.SetScalabilityMode(ScalabilityMode::kL3T3);
+
+ EXPECT_TRUE(SetAv1SvcConfig(video_codec, /*num_temporal_layers=*/kDontCare,
+ /*num_spatial_layers=*/kDontCare));
+
+ EXPECT_GT(video_codec.spatialLayers[0].minBitrate, 0u);
+ EXPECT_LE(video_codec.spatialLayers[0].minBitrate,
+ video_codec.spatialLayers[0].targetBitrate);
+ EXPECT_LE(video_codec.spatialLayers[0].targetBitrate,
+ video_codec.spatialLayers[0].maxBitrate);
+
+ EXPECT_GT(video_codec.spatialLayers[1].minBitrate, 0u);
+ EXPECT_LE(video_codec.spatialLayers[1].minBitrate,
+ video_codec.spatialLayers[1].targetBitrate);
+ EXPECT_LE(video_codec.spatialLayers[1].targetBitrate,
+ video_codec.spatialLayers[1].maxBitrate);
+
+ EXPECT_GT(video_codec.spatialLayers[2].minBitrate, 0u);
+ EXPECT_LE(video_codec.spatialLayers[2].minBitrate,
+ video_codec.spatialLayers[2].targetBitrate);
+ EXPECT_LE(video_codec.spatialLayers[2].targetBitrate,
+ video_codec.spatialLayers[2].maxBitrate);
+}
+
+} // namespace
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/av1/dav1d_decoder.cc b/third_party/libwebrtc/modules/video_coding/codecs/av1/dav1d_decoder.cc
new file mode 100644
index 0000000000..a2cd6d868c
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/av1/dav1d_decoder.cc
@@ -0,0 +1,205 @@
+/*
+ * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/codecs/av1/dav1d_decoder.h"
+
+#include <algorithm>
+
+#include "api/scoped_refptr.h"
+#include "api/video/encoded_image.h"
+#include "api/video/video_frame_buffer.h"
+#include "common_video/include/video_frame_buffer.h"
+#include "modules/video_coding/include/video_error_codes.h"
+#include "rtc_base/logging.h"
+#include "third_party/dav1d/libdav1d/include/dav1d/dav1d.h"
+#include "third_party/libyuv/include/libyuv/convert.h"
+#include "third_party/libyuv/include/libyuv/planar_functions.h"
+
+namespace webrtc {
+namespace {
+
+class Dav1dDecoder : public VideoDecoder {
+ public:
+ Dav1dDecoder();
+ Dav1dDecoder(const Dav1dDecoder&) = delete;
+ Dav1dDecoder& operator=(const Dav1dDecoder&) = delete;
+
+ ~Dav1dDecoder() override;
+
+ bool Configure(const Settings& settings) override;
+ int32_t Decode(const EncodedImage& encoded_image,
+ bool missing_frames,
+ int64_t render_time_ms) override;
+ int32_t RegisterDecodeCompleteCallback(
+ DecodedImageCallback* callback) override;
+ int32_t Release() override;
+ DecoderInfo GetDecoderInfo() const override;
+ const char* ImplementationName() const override;
+
+ private:
+ Dav1dContext* context_ = nullptr;
+ DecodedImageCallback* decode_complete_callback_ = nullptr;
+};
+
+class ScopedDav1dData {
+ public:
+ ~ScopedDav1dData() { dav1d_data_unref(&data_); }
+
+ Dav1dData& Data() { return data_; }
+
+ private:
+ Dav1dData data_ = {};
+};
+
+class ScopedDav1dPicture
+ : public rtc::RefCountedNonVirtual<ScopedDav1dPicture> {
+ public:
+ ~ScopedDav1dPicture() { dav1d_picture_unref(&picture_); }
+
+ Dav1dPicture& Picture() { return picture_; }
+ using rtc::RefCountedNonVirtual<ScopedDav1dPicture>::HasOneRef;
+
+ private:
+ Dav1dPicture picture_ = {};
+};
+
+constexpr char kDav1dName[] = "dav1d";
+
+// Calling `dav1d_data_wrap` requires a `free_callback` to be registered.
+void NullFreeCallback(const uint8_t* buffer, void* opaque) {}
+
+Dav1dDecoder::Dav1dDecoder() = default;
+
+Dav1dDecoder::~Dav1dDecoder() {
+ Release();
+}
+
+bool Dav1dDecoder::Configure(const Settings& settings) {
+ Dav1dSettings s;
+ dav1d_default_settings(&s);
+
+ s.n_threads = std::max(2, settings.number_of_cores());
+ s.max_frame_delay = 1; // For low latency decoding.
+ s.all_layers = 0; // Don't output a frame for every spatial layer.
+ s.operating_point = 31; // Decode all operating points.
+
+ return dav1d_open(&context_, &s) == 0;
+}
+
+int32_t Dav1dDecoder::RegisterDecodeCompleteCallback(
+ DecodedImageCallback* decode_complete_callback) {
+ decode_complete_callback_ = decode_complete_callback;
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+int32_t Dav1dDecoder::Release() {
+ dav1d_close(&context_);
+ if (context_ != nullptr) {
+ return WEBRTC_VIDEO_CODEC_MEMORY;
+ }
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+VideoDecoder::DecoderInfo Dav1dDecoder::GetDecoderInfo() const {
+ DecoderInfo info;
+ info.implementation_name = kDav1dName;
+ info.is_hardware_accelerated = false;
+ return info;
+}
+
+const char* Dav1dDecoder::ImplementationName() const {
+ return kDav1dName;
+}
+
+int32_t Dav1dDecoder::Decode(const EncodedImage& encoded_image,
+ bool /*missing_frames*/,
+ int64_t /*render_time_ms*/) {
+ if (!context_ || decode_complete_callback_ == nullptr) {
+ return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
+ }
+
+ ScopedDav1dData scoped_dav1d_data;
+ Dav1dData& dav1d_data = scoped_dav1d_data.Data();
+ dav1d_data_wrap(&dav1d_data, encoded_image.data(), encoded_image.size(),
+ /*free_callback=*/&NullFreeCallback,
+ /*user_data=*/nullptr);
+
+ if (int decode_res = dav1d_send_data(context_, &dav1d_data)) {
+ RTC_LOG(LS_WARNING)
+ << "Dav1dDecoder::Decode decoding failed with error code "
+ << decode_res;
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+
+ rtc::scoped_refptr<ScopedDav1dPicture> scoped_dav1d_picture(
+ new ScopedDav1dPicture{});
+ Dav1dPicture& dav1d_picture = scoped_dav1d_picture->Picture();
+ if (int get_picture_res = dav1d_get_picture(context_, &dav1d_picture)) {
+ RTC_LOG(LS_WARNING)
+ << "Dav1dDecoder::Decode getting picture failed with error code "
+ << get_picture_res;
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+
+ if (dav1d_picture.p.bpc != 8) {
+ // Only accept 8 bit depth.
+ RTC_LOG(LS_ERROR) << "Dav1dDecoder::Decode unhandled bit depth: "
+ << dav1d_picture.p.bpc;
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+
+ rtc::scoped_refptr<VideoFrameBuffer> wrapped_buffer;
+ if (dav1d_picture.p.layout == DAV1D_PIXEL_LAYOUT_I420) {
+ wrapped_buffer = WrapI420Buffer(
+ dav1d_picture.p.w, dav1d_picture.p.h,
+ static_cast<uint8_t*>(dav1d_picture.data[0]), dav1d_picture.stride[0],
+ static_cast<uint8_t*>(dav1d_picture.data[1]), dav1d_picture.stride[1],
+ static_cast<uint8_t*>(dav1d_picture.data[2]), dav1d_picture.stride[1],
+ // To keep |scoped_dav1d_picture.Picture()| alive
+ [scoped_dav1d_picture] {});
+ } else if (dav1d_picture.p.layout == DAV1D_PIXEL_LAYOUT_I444) {
+ wrapped_buffer = WrapI444Buffer(
+ dav1d_picture.p.w, dav1d_picture.p.h,
+ static_cast<uint8_t*>(dav1d_picture.data[0]), dav1d_picture.stride[0],
+ static_cast<uint8_t*>(dav1d_picture.data[1]), dav1d_picture.stride[1],
+ static_cast<uint8_t*>(dav1d_picture.data[2]), dav1d_picture.stride[1],
+ // To keep |scoped_dav1d_picture.Picture()| alive
+ [scoped_dav1d_picture] {});
+ } else {
+ // Only accept I420 or I444 pixel format.
+ RTC_LOG(LS_ERROR) << "Dav1dDecoder::Decode unhandled pixel layout: "
+ << dav1d_picture.p.layout;
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+
+ if (!wrapped_buffer.get()) {
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+
+ VideoFrame decoded_frame = VideoFrame::Builder()
+ .set_video_frame_buffer(wrapped_buffer)
+ .set_timestamp_rtp(encoded_image.Timestamp())
+ .set_ntp_time_ms(encoded_image.ntp_time_ms_)
+ .set_color_space(encoded_image.ColorSpace())
+ .build();
+
+ decode_complete_callback_->Decoded(decoded_frame, absl::nullopt,
+ absl::nullopt);
+
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+} // namespace
+
+std::unique_ptr<VideoDecoder> CreateDav1dDecoder() {
+ return std::make_unique<Dav1dDecoder>();
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/av1/dav1d_decoder.h b/third_party/libwebrtc/modules/video_coding/codecs/av1/dav1d_decoder.h
new file mode 100644
index 0000000000..c9396d1e03
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/av1/dav1d_decoder.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#ifndef MODULES_VIDEO_CODING_CODECS_AV1_DAV1D_DECODER_H_
+#define MODULES_VIDEO_CODING_CODECS_AV1_DAV1D_DECODER_H_
+
+#include <memory>
+
+#include "api/video_codecs/video_decoder.h"
+
+namespace webrtc {
+
+std::unique_ptr<VideoDecoder> CreateDav1dDecoder();
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_CODECS_AV1_DAV1D_DECODER_H_
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/av1/libaom_av1_encoder.cc b/third_party/libwebrtc/modules/video_coding/codecs/av1/libaom_av1_encoder.cc
new file mode 100644
index 0000000000..2713171616
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/av1/libaom_av1_encoder.cc
@@ -0,0 +1,825 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include "modules/video_coding/codecs/av1/libaom_av1_encoder.h"
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <memory>
+#include <utility>
+#include <vector>
+
+#include "absl/algorithm/container.h"
+#include "absl/base/macros.h"
+#include "absl/types/optional.h"
+#include "api/scoped_refptr.h"
+#include "api/video/encoded_image.h"
+#include "api/video/i420_buffer.h"
+#include "api/video/video_frame.h"
+#include "api/video_codecs/scalability_mode.h"
+#include "api/video_codecs/video_codec.h"
+#include "api/video_codecs/video_encoder.h"
+#include "modules/video_coding/include/video_codec_interface.h"
+#include "modules/video_coding/include/video_error_codes.h"
+#include "modules/video_coding/svc/create_scalability_structure.h"
+#include "modules/video_coding/svc/scalable_video_controller.h"
+#include "modules/video_coding/svc/scalable_video_controller_no_layering.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/numerics/sequence_number_unwrapper.h"
+#include "third_party/libaom/source/libaom/aom/aom_codec.h"
+#include "third_party/libaom/source/libaom/aom/aom_encoder.h"
+#include "third_party/libaom/source/libaom/aom/aomcx.h"
+
+#define SET_ENCODER_PARAM_OR_RETURN_ERROR(param_id, param_value) \
+ do { \
+ if (!SetEncoderControlParameters(param_id, param_value)) { \
+ return WEBRTC_VIDEO_CODEC_ERROR; \
+ } \
+ } while (0)
+
+namespace webrtc {
+namespace {
+
+// Encoder configuration parameters
+constexpr int kQpMin = 10;
+constexpr int kUsageProfile = AOM_USAGE_REALTIME;
+constexpr int kMinQindex = 145; // Min qindex threshold for QP scaling.
+constexpr int kMaxQindex = 205; // Max qindex threshold for QP scaling.
+constexpr int kBitDepth = 8;
+constexpr int kLagInFrames = 0; // No look ahead.
+constexpr int kRtpTicksPerSecond = 90000;
+constexpr float kMinimumFrameRate = 1.0;
+
+aom_superblock_size_t GetSuperblockSize(int width, int height, int threads) {
+ int resolution = width * height;
+ if (threads >= 4 && resolution >= 960 * 540 && resolution < 1920 * 1080)
+ return AOM_SUPERBLOCK_SIZE_64X64;
+ else
+ return AOM_SUPERBLOCK_SIZE_DYNAMIC;
+}
+
+class LibaomAv1Encoder final : public VideoEncoder {
+ public:
+ explicit LibaomAv1Encoder(
+ const absl::optional<LibaomAv1EncoderAuxConfig>& aux_config);
+ ~LibaomAv1Encoder();
+
+ int InitEncode(const VideoCodec* codec_settings,
+ const Settings& settings) override;
+
+ int32_t RegisterEncodeCompleteCallback(
+ EncodedImageCallback* encoded_image_callback) override;
+
+ int32_t Release() override;
+
+ int32_t Encode(const VideoFrame& frame,
+ const std::vector<VideoFrameType>* frame_types) override;
+
+ void SetRates(const RateControlParameters& parameters) override;
+
+ EncoderInfo GetEncoderInfo() const override;
+
+ private:
+ template <typename P>
+ bool SetEncoderControlParameters(int param_id, P param_value);
+
+ // Get value to be used for encoder cpu_speed setting
+ int GetCpuSpeed(int width, int height);
+
+ // Determine number of encoder threads to use.
+ int NumberOfThreads(int width, int height, int number_of_cores);
+
+ bool SvcEnabled() const { return svc_params_.has_value(); }
+ // Fills svc_params_ memeber value. Returns false on error.
+ bool SetSvcParams(ScalableVideoController::StreamLayersConfig svc_config);
+ // Configures the encoder with layer for the next frame.
+ void SetSvcLayerId(
+ const ScalableVideoController::LayerFrameConfig& layer_frame);
+ // Configures the encoder which buffers next frame updates and can reference.
+ void SetSvcRefFrameConfig(
+ const ScalableVideoController::LayerFrameConfig& layer_frame);
+ // If pixel format doesn't match, then reallocate.
+ void MaybeRewrapImgWithFormat(const aom_img_fmt_t fmt);
+
+ std::unique_ptr<ScalableVideoController> svc_controller_;
+ absl::optional<ScalabilityMode> scalability_mode_;
+ bool inited_;
+ bool rates_configured_;
+ absl::optional<aom_svc_params_t> svc_params_;
+ VideoCodec encoder_settings_;
+ absl::optional<LibaomAv1EncoderAuxConfig> aux_config_;
+ aom_image_t* frame_for_encode_;
+ aom_codec_ctx_t ctx_;
+ aom_codec_enc_cfg_t cfg_;
+ EncodedImageCallback* encoded_image_callback_;
+ SeqNumUnwrapper<uint32_t> rtp_timestamp_unwrapper_;
+};
+
+int32_t VerifyCodecSettings(const VideoCodec& codec_settings) {
+ if (codec_settings.width < 1) {
+ return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
+ }
+ if (codec_settings.height < 1) {
+ return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
+ }
+ // maxBitrate == 0 represents an unspecified maxBitRate.
+ if (codec_settings.maxBitrate > 0 &&
+ codec_settings.minBitrate > codec_settings.maxBitrate) {
+ return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
+ }
+ if (codec_settings.maxBitrate > 0 &&
+ codec_settings.startBitrate > codec_settings.maxBitrate) {
+ return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
+ }
+ if (codec_settings.startBitrate < codec_settings.minBitrate) {
+ return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
+ }
+ if (codec_settings.maxFramerate < 1) {
+ return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
+ }
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+LibaomAv1Encoder::LibaomAv1Encoder(
+ const absl::optional<LibaomAv1EncoderAuxConfig>& aux_config)
+ : inited_(false),
+ rates_configured_(false),
+ aux_config_(aux_config),
+ frame_for_encode_(nullptr),
+ encoded_image_callback_(nullptr) {}
+
+LibaomAv1Encoder::~LibaomAv1Encoder() {
+ Release();
+}
+
+int LibaomAv1Encoder::InitEncode(const VideoCodec* codec_settings,
+ const Settings& settings) {
+ if (codec_settings == nullptr) {
+ RTC_LOG(LS_WARNING) << "No codec settings provided to "
+ "LibaomAv1Encoder.";
+ return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
+ }
+ if (settings.number_of_cores < 1) {
+ return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
+ }
+ if (inited_) {
+ RTC_LOG(LS_WARNING) << "Initing LibaomAv1Encoder without first releasing.";
+ Release();
+ }
+ encoder_settings_ = *codec_settings;
+
+ // Sanity checks for encoder configuration.
+ const int32_t result = VerifyCodecSettings(encoder_settings_);
+ if (result < 0) {
+ RTC_LOG(LS_WARNING) << "Incorrect codec settings provided to "
+ "LibaomAv1Encoder.";
+ return result;
+ }
+ if (encoder_settings_.numberOfSimulcastStreams > 1) {
+ RTC_LOG(LS_WARNING) << "Simulcast is not implemented by LibaomAv1Encoder.";
+ return result;
+ }
+ scalability_mode_ = encoder_settings_.GetScalabilityMode();
+ if (!scalability_mode_.has_value()) {
+ RTC_LOG(LS_WARNING) << "Scalability mode is not set, using 'L1T1'.";
+ scalability_mode_ = ScalabilityMode::kL1T1;
+ }
+ svc_controller_ = CreateScalabilityStructure(*scalability_mode_);
+ if (svc_controller_ == nullptr) {
+ RTC_LOG(LS_WARNING) << "Failed to set scalability mode "
+ << static_cast<int>(*scalability_mode_);
+ return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
+ }
+
+ if (!SetSvcParams(svc_controller_->StreamConfig())) {
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+
+ // Initialize encoder configuration structure with default values
+ aom_codec_err_t ret =
+ aom_codec_enc_config_default(aom_codec_av1_cx(), &cfg_, kUsageProfile);
+ if (ret != AOM_CODEC_OK) {
+ RTC_LOG(LS_WARNING) << "LibaomAv1Encoder::EncodeInit returned " << ret
+ << " on aom_codec_enc_config_default.";
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+
+ // Overwrite default config with input encoder settings & RTC-relevant values.
+ cfg_.g_w = encoder_settings_.width;
+ cfg_.g_h = encoder_settings_.height;
+ cfg_.g_threads =
+ NumberOfThreads(cfg_.g_w, cfg_.g_h, settings.number_of_cores);
+ cfg_.g_timebase.num = 1;
+ cfg_.g_timebase.den = kRtpTicksPerSecond;
+ cfg_.rc_target_bitrate = encoder_settings_.maxBitrate; // kilobits/sec.
+ cfg_.g_input_bit_depth = kBitDepth;
+ cfg_.kf_mode = AOM_KF_DISABLED;
+ cfg_.rc_min_quantizer = kQpMin;
+ cfg_.rc_max_quantizer = encoder_settings_.qpMax;
+ cfg_.rc_undershoot_pct = 50;
+ cfg_.rc_overshoot_pct = 50;
+ cfg_.rc_buf_initial_sz = 600;
+ cfg_.rc_buf_optimal_sz = 600;
+ cfg_.rc_buf_sz = 1000;
+ cfg_.g_usage = kUsageProfile;
+ cfg_.g_error_resilient = 0;
+ // Low-latency settings.
+ cfg_.rc_end_usage = AOM_CBR; // Constant Bit Rate (CBR) mode
+ cfg_.g_pass = AOM_RC_ONE_PASS; // One-pass rate control
+ cfg_.g_lag_in_frames = kLagInFrames; // No look ahead when lag equals 0.
+
+ if (frame_for_encode_ != nullptr) {
+ aom_img_free(frame_for_encode_);
+ frame_for_encode_ = nullptr;
+ }
+
+ // Flag options: AOM_CODEC_USE_PSNR and AOM_CODEC_USE_HIGHBITDEPTH
+ aom_codec_flags_t flags = 0;
+
+ // Initialize an encoder instance.
+ ret = aom_codec_enc_init(&ctx_, aom_codec_av1_cx(), &cfg_, flags);
+ if (ret != AOM_CODEC_OK) {
+ RTC_LOG(LS_WARNING) << "LibaomAv1Encoder::EncodeInit returned " << ret
+ << " on aom_codec_enc_init.";
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+ inited_ = true;
+
+ // Set control parameters
+ SET_ENCODER_PARAM_OR_RETURN_ERROR(AOME_SET_CPUUSED,
+ GetCpuSpeed(cfg_.g_w, cfg_.g_h));
+ SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_ENABLE_CDEF, 1);
+ SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_ENABLE_TPL_MODEL, 0);
+ SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_DELTAQ_MODE, 0);
+ SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_ENABLE_ORDER_HINT, 0);
+ SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_AQ_MODE, 3);
+ SET_ENCODER_PARAM_OR_RETURN_ERROR(AOME_SET_MAX_INTRA_BITRATE_PCT, 300);
+ SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_COEFF_COST_UPD_FREQ, 3);
+ SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_MODE_COST_UPD_FREQ, 3);
+ SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_MV_COST_UPD_FREQ, 3);
+
+ if (codec_settings->mode == VideoCodecMode::kScreensharing) {
+ SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_TUNE_CONTENT,
+ AOM_CONTENT_SCREEN);
+ SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_ENABLE_PALETTE, 1);
+ } else {
+ SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_ENABLE_PALETTE, 0);
+ }
+
+ if (cfg_.g_threads == 4 && cfg_.g_w == 640 &&
+ (cfg_.g_h == 360 || cfg_.g_h == 480)) {
+ SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_TILE_ROWS,
+ static_cast<int>(log2(cfg_.g_threads)));
+ } else {
+ SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_TILE_COLUMNS,
+ static_cast<int>(log2(cfg_.g_threads)));
+ }
+
+ SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_ROW_MT, 1);
+ SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_ENABLE_OBMC, 0);
+ SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_NOISE_SENSITIVITY, 0);
+ SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_ENABLE_WARPED_MOTION, 0);
+ SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_ENABLE_GLOBAL_MOTION, 0);
+ SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_ENABLE_REF_FRAME_MVS, 0);
+ SET_ENCODER_PARAM_OR_RETURN_ERROR(
+ AV1E_SET_SUPERBLOCK_SIZE,
+ GetSuperblockSize(cfg_.g_w, cfg_.g_h, cfg_.g_threads));
+ SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_ENABLE_CFL_INTRA, 0);
+ SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_ENABLE_SMOOTH_INTRA, 0);
+ SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_ENABLE_ANGLE_DELTA, 0);
+ SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_ENABLE_FILTER_INTRA, 0);
+ SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_INTRA_DEFAULT_TX_ONLY, 1);
+ SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_DISABLE_TRELLIS_QUANT, 1);
+ SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_ENABLE_DIST_WTD_COMP, 0);
+ SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_ENABLE_DIFF_WTD_COMP, 0);
+ SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_ENABLE_DUAL_FILTER, 0);
+ SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_ENABLE_INTERINTRA_COMP, 0);
+ SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_ENABLE_INTERINTRA_WEDGE, 0);
+ SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_ENABLE_INTRA_EDGE_FILTER, 0);
+ SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_ENABLE_INTRABC, 0);
+ SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_ENABLE_MASKED_COMP, 0);
+ SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_ENABLE_PAETH_INTRA, 0);
+ SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_ENABLE_QM, 0);
+ SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_ENABLE_RECT_PARTITIONS, 0);
+ SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_ENABLE_RESTORATION, 0);
+ SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_ENABLE_SMOOTH_INTERINTRA, 0);
+ SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_ENABLE_TX64, 0);
+ SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_MAX_REFERENCE_FRAMES, 3);
+
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+template <typename P>
+bool LibaomAv1Encoder::SetEncoderControlParameters(int param_id,
+ P param_value) {
+ aom_codec_err_t error_code = aom_codec_control(&ctx_, param_id, param_value);
+ if (error_code != AOM_CODEC_OK) {
+ RTC_LOG(LS_WARNING)
+ << "LibaomAv1Encoder::SetEncoderControlParameters returned "
+ << error_code << " on id: " << param_id << ".";
+ }
+ return error_code == AOM_CODEC_OK;
+}
+
+// Only positive speeds, range for real-time coding currently is: 6 - 8.
+// Lower means slower/better quality, higher means fastest/lower quality.
+int LibaomAv1Encoder::GetCpuSpeed(int width, int height) {
+ if (aux_config_) {
+ if (auto it = aux_config_->max_pixel_count_to_cpu_speed.lower_bound(width *
+ height);
+ it != aux_config_->max_pixel_count_to_cpu_speed.end()) {
+ return it->second;
+ }
+
+ return 10;
+ } else {
+ // For smaller resolutions, use lower speed setting (get some coding gain at
+ // the cost of increased encoding complexity).
+ switch (encoder_settings_.GetVideoEncoderComplexity()) {
+ case VideoCodecComplexity::kComplexityHigh:
+ if (width * height <= 320 * 180)
+ return 8;
+ else if (width * height <= 640 * 360)
+ return 9;
+ else
+ return 10;
+ case VideoCodecComplexity::kComplexityHigher:
+ if (width * height <= 320 * 180)
+ return 7;
+ else if (width * height <= 640 * 360)
+ return 8;
+ else if (width * height <= 1280 * 720)
+ return 9;
+ else
+ return 10;
+ case VideoCodecComplexity::kComplexityMax:
+ if (width * height <= 320 * 180)
+ return 6;
+ else if (width * height <= 640 * 360)
+ return 7;
+ else if (width * height <= 1280 * 720)
+ return 8;
+ else
+ return 9;
+ default:
+ return 10;
+ }
+ }
+}
+
+int LibaomAv1Encoder::NumberOfThreads(int width,
+ int height,
+ int number_of_cores) {
+ // Keep the number of encoder threads equal to the possible number of
+ // column/row tiles, which is (1, 2, 4, 8). See comments below for
+ // AV1E_SET_TILE_COLUMNS/ROWS.
+ if (width * height >= 640 * 360 && number_of_cores > 4) {
+ return 4;
+ } else if (width * height >= 320 * 180 && number_of_cores > 2) {
+ return 2;
+ } else {
+// Use 2 threads for low res on ARM.
+#if defined(WEBRTC_ARCH_ARM) || defined(WEBRTC_ARCH_ARM64) || \
+ defined(WEBRTC_ANDROID)
+ if (width * height >= 320 * 180 && number_of_cores > 2) {
+ return 2;
+ }
+#endif
+ // 1 thread less than VGA.
+ return 1;
+ }
+}
+
+bool LibaomAv1Encoder::SetSvcParams(
+ ScalableVideoController::StreamLayersConfig svc_config) {
+ bool svc_enabled =
+ svc_config.num_spatial_layers > 1 || svc_config.num_temporal_layers > 1;
+ if (!svc_enabled) {
+ svc_params_ = absl::nullopt;
+ return true;
+ }
+ if (svc_config.num_spatial_layers < 1 || svc_config.num_spatial_layers > 4) {
+ RTC_LOG(LS_WARNING) << "Av1 supports up to 4 spatial layers. "
+ << svc_config.num_spatial_layers << " configured.";
+ return false;
+ }
+ if (svc_config.num_temporal_layers < 1 ||
+ svc_config.num_temporal_layers > 8) {
+ RTC_LOG(LS_WARNING) << "Av1 supports up to 8 temporal layers. "
+ << svc_config.num_temporal_layers << " configured.";
+ return false;
+ }
+ aom_svc_params_t& svc_params = svc_params_.emplace();
+ svc_params.number_spatial_layers = svc_config.num_spatial_layers;
+ svc_params.number_temporal_layers = svc_config.num_temporal_layers;
+
+ int num_layers =
+ svc_config.num_spatial_layers * svc_config.num_temporal_layers;
+ for (int i = 0; i < num_layers; ++i) {
+ svc_params.min_quantizers[i] = kQpMin;
+ svc_params.max_quantizers[i] = encoder_settings_.qpMax;
+ }
+
+ // Assume each temporal layer doubles framerate.
+ for (int tid = 0; tid < svc_config.num_temporal_layers; ++tid) {
+ svc_params.framerate_factor[tid] =
+ 1 << (svc_config.num_temporal_layers - tid - 1);
+ }
+
+ for (int sid = 0; sid < svc_config.num_spatial_layers; ++sid) {
+ svc_params.scaling_factor_num[sid] = svc_config.scaling_factor_num[sid];
+ svc_params.scaling_factor_den[sid] = svc_config.scaling_factor_den[sid];
+ }
+
+ return true;
+}
+
+void LibaomAv1Encoder::SetSvcLayerId(
+ const ScalableVideoController::LayerFrameConfig& layer_frame) {
+ aom_svc_layer_id_t layer_id = {};
+ layer_id.spatial_layer_id = layer_frame.SpatialId();
+ layer_id.temporal_layer_id = layer_frame.TemporalId();
+ SetEncoderControlParameters(AV1E_SET_SVC_LAYER_ID, &layer_id);
+}
+
+void LibaomAv1Encoder::SetSvcRefFrameConfig(
+ const ScalableVideoController::LayerFrameConfig& layer_frame) {
+ // Buffer name to use for each layer_frame.buffers position. In particular
+ // when there are 2 buffers are referenced, prefer name them last and golden,
+ // because av1 bitstream format has dedicated fields for these two names.
+ // See last_frame_idx and golden_frame_idx in the av1 spec
+ // https://aomediacodec.github.io/av1-spec/av1-spec.pdf
+ static constexpr int kPreferedSlotName[] = {0, // Last
+ 3, // Golden
+ 1, 2, 4, 5, 6};
+ static constexpr int kAv1NumBuffers = 8;
+
+ aom_svc_ref_frame_config_t ref_frame_config = {};
+ RTC_CHECK_LE(layer_frame.Buffers().size(), ABSL_ARRAYSIZE(kPreferedSlotName));
+ for (size_t i = 0; i < layer_frame.Buffers().size(); ++i) {
+ const CodecBufferUsage& buffer = layer_frame.Buffers()[i];
+ int slot_name = kPreferedSlotName[i];
+ RTC_CHECK_GE(buffer.id, 0);
+ RTC_CHECK_LT(buffer.id, kAv1NumBuffers);
+ ref_frame_config.ref_idx[slot_name] = buffer.id;
+ if (buffer.referenced) {
+ ref_frame_config.reference[slot_name] = 1;
+ }
+ if (buffer.updated) {
+ ref_frame_config.refresh[buffer.id] = 1;
+ }
+ }
+
+ SetEncoderControlParameters(AV1E_SET_SVC_REF_FRAME_CONFIG, &ref_frame_config);
+}
+
+int32_t LibaomAv1Encoder::RegisterEncodeCompleteCallback(
+ EncodedImageCallback* encoded_image_callback) {
+ encoded_image_callback_ = encoded_image_callback;
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+int32_t LibaomAv1Encoder::Release() {
+ if (frame_for_encode_ != nullptr) {
+ aom_img_free(frame_for_encode_);
+ frame_for_encode_ = nullptr;
+ }
+ if (inited_) {
+ if (aom_codec_destroy(&ctx_)) {
+ return WEBRTC_VIDEO_CODEC_MEMORY;
+ }
+ inited_ = false;
+ }
+ rates_configured_ = false;
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+void LibaomAv1Encoder::MaybeRewrapImgWithFormat(const aom_img_fmt_t fmt) {
+ if (!frame_for_encode_) {
+ frame_for_encode_ =
+ aom_img_wrap(nullptr, fmt, cfg_.g_w, cfg_.g_h, 1, nullptr);
+
+ } else if (frame_for_encode_->fmt != fmt) {
+ RTC_LOG(LS_INFO) << "Switching AV1 encoder pixel format to "
+ << (fmt == AOM_IMG_FMT_NV12 ? "NV12" : "I420");
+ aom_img_free(frame_for_encode_);
+ frame_for_encode_ =
+ aom_img_wrap(nullptr, fmt, cfg_.g_w, cfg_.g_h, 1, nullptr);
+ }
+ // else no-op since the image is already in the right format.
+}
+
+int32_t LibaomAv1Encoder::Encode(
+ const VideoFrame& frame,
+ const std::vector<VideoFrameType>* frame_types) {
+ if (!inited_ || encoded_image_callback_ == nullptr || !rates_configured_) {
+ return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
+ }
+
+ bool keyframe_required =
+ frame_types != nullptr &&
+ absl::c_linear_search(*frame_types, VideoFrameType::kVideoFrameKey);
+
+ std::vector<ScalableVideoController::LayerFrameConfig> layer_frames =
+ svc_controller_->NextFrameConfig(keyframe_required);
+
+ if (layer_frames.empty()) {
+ RTC_LOG(LS_ERROR) << "SVCController returned no configuration for a frame.";
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+
+ rtc::scoped_refptr<VideoFrameBuffer> buffer = frame.video_frame_buffer();
+ absl::InlinedVector<VideoFrameBuffer::Type, kMaxPreferredPixelFormats>
+ supported_formats = {VideoFrameBuffer::Type::kI420,
+ VideoFrameBuffer::Type::kNV12};
+ rtc::scoped_refptr<VideoFrameBuffer> mapped_buffer;
+ if (buffer->type() != VideoFrameBuffer::Type::kNative) {
+ // `buffer` is already mapped.
+ mapped_buffer = buffer;
+ } else {
+ // Attempt to map to one of the supported formats.
+ mapped_buffer = buffer->GetMappedFrameBuffer(supported_formats);
+ }
+
+ // Convert input frame to I420, if needed.
+ if (!mapped_buffer ||
+ (absl::c_find(supported_formats, mapped_buffer->type()) ==
+ supported_formats.end() &&
+ mapped_buffer->type() != VideoFrameBuffer::Type::kI420A)) {
+ rtc::scoped_refptr<I420BufferInterface> converted_buffer(buffer->ToI420());
+ if (!converted_buffer) {
+ RTC_LOG(LS_ERROR) << "Failed to convert "
+ << VideoFrameBufferTypeToString(
+ frame.video_frame_buffer()->type())
+ << " image to I420. Can't encode frame.";
+ return WEBRTC_VIDEO_CODEC_ENCODER_FAILURE;
+ }
+ RTC_CHECK(converted_buffer->type() == VideoFrameBuffer::Type::kI420 ||
+ converted_buffer->type() == VideoFrameBuffer::Type::kI420A);
+
+ mapped_buffer = converted_buffer;
+ }
+
+ switch (mapped_buffer->type()) {
+ case VideoFrameBuffer::Type::kI420:
+ case VideoFrameBuffer::Type::kI420A: {
+ // Set frame_for_encode_ data pointers and strides.
+ MaybeRewrapImgWithFormat(AOM_IMG_FMT_I420);
+ auto i420_buffer = mapped_buffer->GetI420();
+ RTC_DCHECK(i420_buffer);
+ frame_for_encode_->planes[AOM_PLANE_Y] =
+ const_cast<unsigned char*>(i420_buffer->DataY());
+ frame_for_encode_->planes[AOM_PLANE_U] =
+ const_cast<unsigned char*>(i420_buffer->DataU());
+ frame_for_encode_->planes[AOM_PLANE_V] =
+ const_cast<unsigned char*>(i420_buffer->DataV());
+ frame_for_encode_->stride[AOM_PLANE_Y] = i420_buffer->StrideY();
+ frame_for_encode_->stride[AOM_PLANE_U] = i420_buffer->StrideU();
+ frame_for_encode_->stride[AOM_PLANE_V] = i420_buffer->StrideV();
+ break;
+ }
+ case VideoFrameBuffer::Type::kNV12: {
+ MaybeRewrapImgWithFormat(AOM_IMG_FMT_NV12);
+ const NV12BufferInterface* nv12_buffer = mapped_buffer->GetNV12();
+ RTC_DCHECK(nv12_buffer);
+ frame_for_encode_->planes[AOM_PLANE_Y] =
+ const_cast<unsigned char*>(nv12_buffer->DataY());
+ frame_for_encode_->planes[AOM_PLANE_U] =
+ const_cast<unsigned char*>(nv12_buffer->DataUV());
+ frame_for_encode_->planes[AOM_PLANE_V] = nullptr;
+ frame_for_encode_->stride[AOM_PLANE_Y] = nv12_buffer->StrideY();
+ frame_for_encode_->stride[AOM_PLANE_U] = nv12_buffer->StrideUV();
+ frame_for_encode_->stride[AOM_PLANE_V] = 0;
+ break;
+ }
+ default:
+ return WEBRTC_VIDEO_CODEC_ENCODER_FAILURE;
+ }
+
+ const uint32_t duration =
+ kRtpTicksPerSecond / static_cast<float>(encoder_settings_.maxFramerate);
+
+ const size_t num_spatial_layers =
+ svc_params_ ? svc_params_->number_spatial_layers : 1;
+ auto next_layer_frame = layer_frames.begin();
+ for (size_t i = 0; i < num_spatial_layers; ++i) {
+ // The libaom AV1 encoder requires that `aom_codec_encode` is called for
+ // every spatial layer, even if the configured bitrate for that layer is
+ // zero. For zero bitrate spatial layers no frames will be produced.
+ absl::optional<ScalableVideoController::LayerFrameConfig>
+ non_encoded_layer_frame;
+ ScalableVideoController::LayerFrameConfig* layer_frame;
+ if (next_layer_frame != layer_frames.end() &&
+ next_layer_frame->SpatialId() == static_cast<int>(i)) {
+ layer_frame = &*next_layer_frame;
+ ++next_layer_frame;
+ } else {
+ // For layers that are not encoded only the spatial id matters.
+ non_encoded_layer_frame.emplace().S(i);
+ layer_frame = &*non_encoded_layer_frame;
+ }
+ const bool end_of_picture = (next_layer_frame == layer_frames.end());
+
+ aom_enc_frame_flags_t flags =
+ layer_frame->IsKeyframe() ? AOM_EFLAG_FORCE_KF : 0;
+
+ if (SvcEnabled()) {
+ SetSvcLayerId(*layer_frame);
+ SetSvcRefFrameConfig(*layer_frame);
+
+ SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_ERROR_RESILIENT_MODE,
+ layer_frame->TemporalId() > 0 ? 1 : 0);
+ }
+
+ // Encode a frame. The presentation timestamp `pts` should never wrap, hence
+ // the unwrapping.
+ aom_codec_err_t ret = aom_codec_encode(
+ &ctx_, frame_for_encode_,
+ rtp_timestamp_unwrapper_.Unwrap(frame.timestamp()), duration, flags);
+ if (ret != AOM_CODEC_OK) {
+ RTC_LOG(LS_WARNING) << "LibaomAv1Encoder::Encode returned " << ret
+ << " on aom_codec_encode.";
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+
+ if (non_encoded_layer_frame) {
+ continue;
+ }
+
+ // Get encoded image data.
+ EncodedImage encoded_image;
+ aom_codec_iter_t iter = nullptr;
+ int data_pkt_count = 0;
+ while (const aom_codec_cx_pkt_t* pkt =
+ aom_codec_get_cx_data(&ctx_, &iter)) {
+ if (pkt->kind == AOM_CODEC_CX_FRAME_PKT && pkt->data.frame.sz > 0) {
+ if (data_pkt_count > 0) {
+ RTC_LOG(LS_WARNING) << "LibaomAv1Encoder::Encoder returned more than "
+ "one data packet for an input video frame.";
+ Release();
+ }
+ encoded_image.SetEncodedData(EncodedImageBuffer::Create(
+ /*data=*/static_cast<const uint8_t*>(pkt->data.frame.buf),
+ /*size=*/pkt->data.frame.sz));
+
+ if ((pkt->data.frame.flags & AOM_EFLAG_FORCE_KF) != 0) {
+ layer_frame->Keyframe();
+ }
+
+ encoded_image._frameType = layer_frame->IsKeyframe()
+ ? VideoFrameType::kVideoFrameKey
+ : VideoFrameType::kVideoFrameDelta;
+ encoded_image.SetTimestamp(frame.timestamp());
+ encoded_image.capture_time_ms_ = frame.render_time_ms();
+ encoded_image.rotation_ = frame.rotation();
+ encoded_image.content_type_ = VideoContentType::UNSPECIFIED;
+ // If encoded image width/height info are added to aom_codec_cx_pkt_t,
+ // use those values in lieu of the values in frame.
+ if (svc_params_) {
+ int n = svc_params_->scaling_factor_num[layer_frame->SpatialId()];
+ int d = svc_params_->scaling_factor_den[layer_frame->SpatialId()];
+ encoded_image._encodedWidth = cfg_.g_w * n / d;
+ encoded_image._encodedHeight = cfg_.g_h * n / d;
+ encoded_image.SetSpatialIndex(layer_frame->SpatialId());
+ encoded_image.SetTemporalIndex(layer_frame->TemporalId());
+ } else {
+ encoded_image._encodedWidth = cfg_.g_w;
+ encoded_image._encodedHeight = cfg_.g_h;
+ }
+ encoded_image.timing_.flags = VideoSendTiming::kInvalid;
+
+ int qp = -1;
+ SET_ENCODER_PARAM_OR_RETURN_ERROR(AOME_GET_LAST_QUANTIZER, &qp);
+ encoded_image.qp_ = qp;
+
+ encoded_image.SetColorSpace(frame.color_space());
+ ++data_pkt_count;
+ }
+ }
+
+ // Deliver encoded image data.
+ if (encoded_image.size() > 0) {
+ CodecSpecificInfo codec_specific_info;
+ codec_specific_info.codecType = kVideoCodecAV1;
+ codec_specific_info.end_of_picture = end_of_picture;
+ codec_specific_info.scalability_mode = scalability_mode_;
+ bool is_keyframe = layer_frame->IsKeyframe();
+ codec_specific_info.generic_frame_info =
+ svc_controller_->OnEncodeDone(*layer_frame);
+ if (is_keyframe && codec_specific_info.generic_frame_info) {
+ codec_specific_info.template_structure =
+ svc_controller_->DependencyStructure();
+ auto& resolutions = codec_specific_info.template_structure->resolutions;
+ if (SvcEnabled()) {
+ resolutions.resize(svc_params_->number_spatial_layers);
+ for (int sid = 0; sid < svc_params_->number_spatial_layers; ++sid) {
+ int n = svc_params_->scaling_factor_num[sid];
+ int d = svc_params_->scaling_factor_den[sid];
+ resolutions[sid] =
+ RenderResolution(cfg_.g_w * n / d, cfg_.g_h * n / d);
+ }
+ } else {
+ resolutions = {RenderResolution(cfg_.g_w, cfg_.g_h)};
+ }
+ }
+ encoded_image_callback_->OnEncodedImage(encoded_image,
+ &codec_specific_info);
+ }
+ }
+
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+void LibaomAv1Encoder::SetRates(const RateControlParameters& parameters) {
+ if (!inited_) {
+ RTC_LOG(LS_WARNING) << "SetRates() while encoder is not initialized";
+ return;
+ }
+ if (parameters.framerate_fps < kMinimumFrameRate) {
+ RTC_LOG(LS_WARNING) << "Unsupported framerate (must be >= "
+ << kMinimumFrameRate
+ << " ): " << parameters.framerate_fps;
+ return;
+ }
+ if (parameters.bitrate.get_sum_bps() == 0) {
+ RTC_LOG(LS_WARNING) << "Attempt to set target bit rate to zero";
+ return;
+ }
+
+ // The bitrates caluclated internally in libaom when `AV1E_SET_SVC_PARAMS` is
+ // called depends on the currently configured `rc_target_bitrate`. If the
+ // total target bitrate is not updated first a division by zero could happen.
+ svc_controller_->OnRatesUpdated(parameters.bitrate);
+ cfg_.rc_target_bitrate = parameters.bitrate.get_sum_kbps();
+ aom_codec_err_t error_code = aom_codec_enc_config_set(&ctx_, &cfg_);
+ if (error_code != AOM_CODEC_OK) {
+ RTC_LOG(LS_WARNING) << "Error configuring encoder, error code: "
+ << error_code;
+ }
+
+ if (SvcEnabled()) {
+ for (int sid = 0; sid < svc_params_->number_spatial_layers; ++sid) {
+ // libaom bitrate for spatial id S and temporal id T means bitrate
+ // of frames with spatial_id=S and temporal_id<=T
+ // while `parameters.bitrate` provdies bitrate of frames with
+ // spatial_id=S and temporal_id=T
+ int accumulated_bitrate_bps = 0;
+ for (int tid = 0; tid < svc_params_->number_temporal_layers; ++tid) {
+ int layer_index = sid * svc_params_->number_temporal_layers + tid;
+ accumulated_bitrate_bps += parameters.bitrate.GetBitrate(sid, tid);
+ // `svc_params.layer_target_bitrate` expects bitrate in kbps.
+ svc_params_->layer_target_bitrate[layer_index] =
+ accumulated_bitrate_bps / 1000;
+ }
+ }
+ SetEncoderControlParameters(AV1E_SET_SVC_PARAMS, &*svc_params_);
+ }
+
+ rates_configured_ = true;
+
+ // Set frame rate to closest integer value.
+ encoder_settings_.maxFramerate =
+ static_cast<uint32_t>(parameters.framerate_fps + 0.5);
+}
+
+VideoEncoder::EncoderInfo LibaomAv1Encoder::GetEncoderInfo() const {
+ EncoderInfo info;
+ info.supports_native_handle = false;
+ info.implementation_name = "libaom";
+ info.has_trusted_rate_controller = true;
+ info.is_hardware_accelerated = false;
+ info.scaling_settings = VideoEncoder::ScalingSettings(kMinQindex, kMaxQindex);
+ info.preferred_pixel_formats = {VideoFrameBuffer::Type::kI420,
+ VideoFrameBuffer::Type::kNV12};
+ if (SvcEnabled()) {
+ for (int sid = 0; sid < svc_params_->number_spatial_layers; ++sid) {
+ info.fps_allocation[sid].resize(svc_params_->number_temporal_layers);
+ for (int tid = 0; tid < svc_params_->number_temporal_layers; ++tid) {
+ info.fps_allocation[sid][tid] =
+ encoder_settings_.maxFramerate / svc_params_->framerate_factor[tid];
+ }
+ }
+ }
+ return info;
+}
+
+} // namespace
+
+std::unique_ptr<VideoEncoder> CreateLibaomAv1Encoder() {
+ return std::make_unique<LibaomAv1Encoder>(absl::nullopt);
+}
+
+std::unique_ptr<VideoEncoder> CreateLibaomAv1Encoder(
+ const LibaomAv1EncoderAuxConfig& aux_config) {
+ return std::make_unique<LibaomAv1Encoder>(aux_config);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/av1/libaom_av1_encoder.h b/third_party/libwebrtc/modules/video_coding/codecs/av1/libaom_av1_encoder.h
new file mode 100644
index 0000000000..2fd1d5a754
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/av1/libaom_av1_encoder.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#ifndef MODULES_VIDEO_CODING_CODECS_AV1_LIBAOM_AV1_ENCODER_H_
+#define MODULES_VIDEO_CODING_CODECS_AV1_LIBAOM_AV1_ENCODER_H_
+
+#include <map>
+#include <memory>
+
+#include "absl/strings/string_view.h"
+#include "api/video_codecs/video_encoder.h"
+
+namespace webrtc {
+struct LibaomAv1EncoderAuxConfig {
+ // A map of max pixel count --> cpu speed.
+ std::map<int, int> max_pixel_count_to_cpu_speed;
+};
+
+std::unique_ptr<VideoEncoder> CreateLibaomAv1Encoder();
+std::unique_ptr<VideoEncoder> CreateLibaomAv1Encoder(
+ const LibaomAv1EncoderAuxConfig& aux_config);
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_CODECS_AV1_LIBAOM_AV1_ENCODER_H_
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/av1/libaom_av1_encoder_unittest.cc b/third_party/libwebrtc/modules/video_coding/codecs/av1/libaom_av1_encoder_unittest.cc
new file mode 100644
index 0000000000..d194cef35b
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/av1/libaom_av1_encoder_unittest.cc
@@ -0,0 +1,264 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/codecs/av1/libaom_av1_encoder.h"
+
+#include <limits>
+#include <memory>
+#include <vector>
+
+#include "absl/types/optional.h"
+#include "api/video_codecs/video_codec.h"
+#include "api/video_codecs/video_encoder.h"
+#include "modules/video_coding/codecs/test/encoded_video_frame_producer.h"
+#include "modules/video_coding/include/video_error_codes.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+
+using ::testing::ElementsAre;
+using ::testing::Eq;
+using ::testing::Field;
+using ::testing::IsEmpty;
+using ::testing::SizeIs;
+
+VideoCodec DefaultCodecSettings() {
+ VideoCodec codec_settings;
+ codec_settings.width = 320;
+ codec_settings.height = 180;
+ codec_settings.maxFramerate = 30;
+ codec_settings.maxBitrate = 1000;
+ codec_settings.qpMax = 63;
+ return codec_settings;
+}
+
+VideoEncoder::Settings DefaultEncoderSettings() {
+ return VideoEncoder::Settings(
+ VideoEncoder::Capabilities(/*loss_notification=*/false),
+ /*number_of_cores=*/1, /*max_payload_size=*/1200);
+}
+
+TEST(LibaomAv1EncoderTest, CanCreate) {
+ std::unique_ptr<VideoEncoder> encoder = CreateLibaomAv1Encoder();
+ EXPECT_TRUE(encoder);
+}
+
+TEST(LibaomAv1EncoderTest, InitAndRelease) {
+ std::unique_ptr<VideoEncoder> encoder = CreateLibaomAv1Encoder();
+ ASSERT_TRUE(encoder);
+ VideoCodec codec_settings = DefaultCodecSettings();
+ EXPECT_EQ(encoder->InitEncode(&codec_settings, DefaultEncoderSettings()),
+ WEBRTC_VIDEO_CODEC_OK);
+ EXPECT_EQ(encoder->Release(), WEBRTC_VIDEO_CODEC_OK);
+}
+
+TEST(LibaomAv1EncoderTest, NoBitrateOnTopLayerRefecltedInActiveDecodeTargets) {
+ // Configure encoder with 2 temporal layers.
+ std::unique_ptr<VideoEncoder> encoder = CreateLibaomAv1Encoder();
+ VideoCodec codec_settings = DefaultCodecSettings();
+ codec_settings.SetScalabilityMode(ScalabilityMode::kL1T2);
+ ASSERT_EQ(encoder->InitEncode(&codec_settings, DefaultEncoderSettings()),
+ WEBRTC_VIDEO_CODEC_OK);
+
+ VideoEncoder::RateControlParameters rate_parameters;
+ rate_parameters.framerate_fps = 30;
+ rate_parameters.bitrate.SetBitrate(0, /*temporal_index=*/0, 300'000);
+ rate_parameters.bitrate.SetBitrate(0, /*temporal_index=*/1, 0);
+ encoder->SetRates(rate_parameters);
+
+ std::vector<EncodedVideoFrameProducer::EncodedFrame> encoded_frames =
+ EncodedVideoFrameProducer(*encoder).SetNumInputFrames(1).Encode();
+ ASSERT_THAT(encoded_frames, SizeIs(1));
+ ASSERT_NE(encoded_frames[0].codec_specific_info.generic_frame_info,
+ absl::nullopt);
+ // Assuming L1T2 structure uses 1st decode target for T0 and 2nd decode target
+ // for T0+T1 frames, expect only 1st decode target is active.
+ EXPECT_EQ(encoded_frames[0]
+ .codec_specific_info.generic_frame_info->active_decode_targets,
+ 0b01);
+}
+
+TEST(LibaomAv1EncoderTest,
+ SpatialScalabilityInTemporalUnitReportedAsDeltaFrame) {
+ std::unique_ptr<VideoEncoder> encoder = CreateLibaomAv1Encoder();
+ VideoCodec codec_settings = DefaultCodecSettings();
+ codec_settings.SetScalabilityMode(ScalabilityMode::kL2T1);
+ ASSERT_EQ(encoder->InitEncode(&codec_settings, DefaultEncoderSettings()),
+ WEBRTC_VIDEO_CODEC_OK);
+
+ VideoEncoder::RateControlParameters rate_parameters;
+ rate_parameters.framerate_fps = 30;
+ rate_parameters.bitrate.SetBitrate(/*spatial_index=*/0, 0, 300'000);
+ rate_parameters.bitrate.SetBitrate(/*spatial_index=*/1, 0, 300'000);
+ encoder->SetRates(rate_parameters);
+
+ std::vector<EncodedVideoFrameProducer::EncodedFrame> encoded_frames =
+ EncodedVideoFrameProducer(*encoder).SetNumInputFrames(1).Encode();
+ ASSERT_THAT(encoded_frames, SizeIs(2));
+ EXPECT_THAT(encoded_frames[0].encoded_image._frameType,
+ Eq(VideoFrameType::kVideoFrameKey));
+ EXPECT_THAT(encoded_frames[1].encoded_image._frameType,
+ Eq(VideoFrameType::kVideoFrameDelta));
+}
+
+TEST(LibaomAv1EncoderTest, NoBitrateOnTopSpatialLayerProduceDeltaFrames) {
+ std::unique_ptr<VideoEncoder> encoder = CreateLibaomAv1Encoder();
+ VideoCodec codec_settings = DefaultCodecSettings();
+ codec_settings.SetScalabilityMode(ScalabilityMode::kL2T1);
+ ASSERT_EQ(encoder->InitEncode(&codec_settings, DefaultEncoderSettings()),
+ WEBRTC_VIDEO_CODEC_OK);
+
+ VideoEncoder::RateControlParameters rate_parameters;
+ rate_parameters.framerate_fps = 30;
+ rate_parameters.bitrate.SetBitrate(/*spatial_index=*/0, 0, 300'000);
+ rate_parameters.bitrate.SetBitrate(/*spatial_index=*/1, 0, 0);
+ encoder->SetRates(rate_parameters);
+
+ std::vector<EncodedVideoFrameProducer::EncodedFrame> encoded_frames =
+ EncodedVideoFrameProducer(*encoder).SetNumInputFrames(2).Encode();
+ ASSERT_THAT(encoded_frames, SizeIs(2));
+ EXPECT_THAT(encoded_frames[0].encoded_image._frameType,
+ Eq(VideoFrameType::kVideoFrameKey));
+ EXPECT_THAT(encoded_frames[1].encoded_image._frameType,
+ Eq(VideoFrameType::kVideoFrameDelta));
+}
+
+TEST(LibaomAv1EncoderTest, SetsEndOfPictureForLastFrameInTemporalUnit) {
+ VideoBitrateAllocation allocation;
+ allocation.SetBitrate(0, 0, 30000);
+ allocation.SetBitrate(1, 0, 40000);
+ allocation.SetBitrate(2, 0, 30000);
+
+ std::unique_ptr<VideoEncoder> encoder = CreateLibaomAv1Encoder();
+ VideoCodec codec_settings = DefaultCodecSettings();
+ // Configure encoder with 3 spatial layers.
+ codec_settings.SetScalabilityMode(ScalabilityMode::kL3T1);
+ codec_settings.maxBitrate = allocation.get_sum_kbps();
+ ASSERT_EQ(encoder->InitEncode(&codec_settings, DefaultEncoderSettings()),
+ WEBRTC_VIDEO_CODEC_OK);
+
+ encoder->SetRates(VideoEncoder::RateControlParameters(
+ allocation, codec_settings.maxFramerate));
+
+ std::vector<EncodedVideoFrameProducer::EncodedFrame> encoded_frames =
+ EncodedVideoFrameProducer(*encoder).SetNumInputFrames(2).Encode();
+ ASSERT_THAT(encoded_frames, SizeIs(6));
+ EXPECT_FALSE(encoded_frames[0].codec_specific_info.end_of_picture);
+ EXPECT_FALSE(encoded_frames[1].codec_specific_info.end_of_picture);
+ EXPECT_TRUE(encoded_frames[2].codec_specific_info.end_of_picture);
+ EXPECT_FALSE(encoded_frames[3].codec_specific_info.end_of_picture);
+ EXPECT_FALSE(encoded_frames[4].codec_specific_info.end_of_picture);
+ EXPECT_TRUE(encoded_frames[5].codec_specific_info.end_of_picture);
+}
+
+TEST(LibaomAv1EncoderTest, CheckOddDimensionsWithSpatialLayers) {
+ VideoBitrateAllocation allocation;
+ allocation.SetBitrate(0, 0, 30000);
+ allocation.SetBitrate(1, 0, 40000);
+ allocation.SetBitrate(2, 0, 30000);
+ std::unique_ptr<VideoEncoder> encoder = CreateLibaomAv1Encoder();
+ VideoCodec codec_settings = DefaultCodecSettings();
+ // Configure encoder with 3 spatial layers.
+ codec_settings.SetScalabilityMode(ScalabilityMode::kL3T1);
+ // Odd width and height values should not make encoder crash.
+ codec_settings.width = 623;
+ codec_settings.height = 405;
+ codec_settings.maxBitrate = allocation.get_sum_kbps();
+ ASSERT_EQ(encoder->InitEncode(&codec_settings, DefaultEncoderSettings()),
+ WEBRTC_VIDEO_CODEC_OK);
+ encoder->SetRates(VideoEncoder::RateControlParameters(
+ allocation, codec_settings.maxFramerate));
+ EncodedVideoFrameProducer evfp(*encoder);
+ evfp.SetResolution(RenderResolution{623, 405});
+ std::vector<EncodedVideoFrameProducer::EncodedFrame> encoded_frames =
+ evfp.SetNumInputFrames(2).Encode();
+ ASSERT_THAT(encoded_frames, SizeIs(6));
+}
+
+TEST(LibaomAv1EncoderTest, EncoderInfoProvidesFpsAllocation) {
+ std::unique_ptr<VideoEncoder> encoder = CreateLibaomAv1Encoder();
+ VideoCodec codec_settings = DefaultCodecSettings();
+ codec_settings.SetScalabilityMode(ScalabilityMode::kL3T3);
+ codec_settings.maxFramerate = 60;
+ ASSERT_EQ(encoder->InitEncode(&codec_settings, DefaultEncoderSettings()),
+ WEBRTC_VIDEO_CODEC_OK);
+
+ const auto& encoder_info = encoder->GetEncoderInfo();
+ EXPECT_THAT(encoder_info.fps_allocation[0], ElementsAre(15, 30, 60));
+ EXPECT_THAT(encoder_info.fps_allocation[1], ElementsAre(15, 30, 60));
+ EXPECT_THAT(encoder_info.fps_allocation[2], ElementsAre(15, 30, 60));
+ EXPECT_THAT(encoder_info.fps_allocation[3], IsEmpty());
+}
+
+TEST(LibaomAv1EncoderTest, PopulatesEncodedFrameSize) {
+ VideoBitrateAllocation allocation;
+ allocation.SetBitrate(0, 0, 30000);
+ allocation.SetBitrate(1, 0, 40000);
+ allocation.SetBitrate(2, 0, 30000);
+ std::unique_ptr<VideoEncoder> encoder = CreateLibaomAv1Encoder();
+ VideoCodec codec_settings = DefaultCodecSettings();
+ codec_settings.maxBitrate = allocation.get_sum_kbps();
+ ASSERT_GT(codec_settings.width, 4);
+ // Configure encoder with 3 spatial layers.
+ codec_settings.SetScalabilityMode(ScalabilityMode::kL3T1);
+ ASSERT_EQ(encoder->InitEncode(&codec_settings, DefaultEncoderSettings()),
+ WEBRTC_VIDEO_CODEC_OK);
+ encoder->SetRates(VideoEncoder::RateControlParameters(
+ allocation, codec_settings.maxFramerate));
+ using Frame = EncodedVideoFrameProducer::EncodedFrame;
+ std::vector<Frame> encoded_frames =
+ EncodedVideoFrameProducer(*encoder).SetNumInputFrames(1).Encode();
+ EXPECT_THAT(
+ encoded_frames,
+ ElementsAre(
+ Field(&Frame::encoded_image,
+ AllOf(Field(&EncodedImage::_encodedWidth,
+ codec_settings.width / 4),
+ Field(&EncodedImage::_encodedHeight,
+ codec_settings.height / 4))),
+ Field(&Frame::encoded_image,
+ AllOf(Field(&EncodedImage::_encodedWidth,
+ codec_settings.width / 2),
+ Field(&EncodedImage::_encodedHeight,
+ codec_settings.height / 2))),
+ Field(&Frame::encoded_image,
+ AllOf(Field(&EncodedImage::_encodedWidth, codec_settings.width),
+ Field(&EncodedImage::_encodedHeight,
+ codec_settings.height)))));
+}
+
+TEST(LibaomAv1EncoderTest, RtpTimestampWrap) {
+ std::unique_ptr<VideoEncoder> encoder = CreateLibaomAv1Encoder();
+ VideoCodec codec_settings = DefaultCodecSettings();
+ codec_settings.SetScalabilityMode(ScalabilityMode::kL1T1);
+ ASSERT_EQ(encoder->InitEncode(&codec_settings, DefaultEncoderSettings()),
+ WEBRTC_VIDEO_CODEC_OK);
+
+ VideoEncoder::RateControlParameters rate_parameters;
+ rate_parameters.framerate_fps = 30;
+ rate_parameters.bitrate.SetBitrate(/*spatial_index=*/0, 0, 300'000);
+ encoder->SetRates(rate_parameters);
+
+ std::vector<EncodedVideoFrameProducer::EncodedFrame> encoded_frames =
+ EncodedVideoFrameProducer(*encoder)
+ .SetNumInputFrames(2)
+ .SetRtpTimestamp(std::numeric_limits<uint32_t>::max())
+ .Encode();
+ ASSERT_THAT(encoded_frames, SizeIs(2));
+ EXPECT_THAT(encoded_frames[0].encoded_image._frameType,
+ Eq(VideoFrameType::kVideoFrameKey));
+ EXPECT_THAT(encoded_frames[1].encoded_image._frameType,
+ Eq(VideoFrameType::kVideoFrameDelta));
+}
+
+} // namespace
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/av1/libaom_av1_unittest.cc b/third_party/libwebrtc/modules/video_coding/codecs/av1/libaom_av1_unittest.cc
new file mode 100644
index 0000000000..86e317f94b
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/av1/libaom_av1_unittest.cc
@@ -0,0 +1,368 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <map>
+#include <memory>
+#include <ostream>
+#include <tuple>
+#include <vector>
+
+#include "absl/types/optional.h"
+#include "api/units/data_size.h"
+#include "api/units/time_delta.h"
+#include "api/video_codecs/video_codec.h"
+#include "api/video_codecs/video_encoder.h"
+#include "modules/video_coding/codecs/av1/dav1d_decoder.h"
+#include "modules/video_coding/codecs/av1/libaom_av1_encoder.h"
+#include "modules/video_coding/codecs/test/encoded_video_frame_producer.h"
+#include "modules/video_coding/include/video_codec_interface.h"
+#include "modules/video_coding/include/video_error_codes.h"
+#include "modules/video_coding/svc/create_scalability_structure.h"
+#include "modules/video_coding/svc/scalability_mode_util.h"
+#include "modules/video_coding/svc/scalable_video_controller.h"
+#include "modules/video_coding/svc/scalable_video_controller_no_layering.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+
+using ::testing::ContainerEq;
+using ::testing::Each;
+using ::testing::ElementsAreArray;
+using ::testing::Ge;
+using ::testing::IsEmpty;
+using ::testing::Not;
+using ::testing::NotNull;
+using ::testing::Optional;
+using ::testing::Pointwise;
+using ::testing::SizeIs;
+using ::testing::Truly;
+using ::testing::Values;
+
+// Use small resolution for this test to make it faster.
+constexpr int kWidth = 320;
+constexpr int kHeight = 180;
+constexpr int kFramerate = 30;
+
+VideoCodec DefaultCodecSettings() {
+ VideoCodec codec_settings;
+ codec_settings.SetScalabilityMode(ScalabilityMode::kL1T1);
+ codec_settings.width = kWidth;
+ codec_settings.height = kHeight;
+ codec_settings.maxFramerate = kFramerate;
+ codec_settings.maxBitrate = 1000;
+ codec_settings.qpMax = 63;
+ return codec_settings;
+}
+VideoEncoder::Settings DefaultEncoderSettings() {
+ return VideoEncoder::Settings(
+ VideoEncoder::Capabilities(/*loss_notification=*/false),
+ /*number_of_cores=*/1, /*max_payload_size=*/1200);
+}
+
+class TestAv1Decoder {
+ public:
+ explicit TestAv1Decoder(int decoder_id)
+ : decoder_id_(decoder_id), decoder_(CreateDav1dDecoder()) {
+ if (decoder_ == nullptr) {
+ ADD_FAILURE() << "Failed to create a decoder#" << decoder_id_;
+ return;
+ }
+ EXPECT_TRUE(decoder_->Configure({}));
+ EXPECT_EQ(decoder_->RegisterDecodeCompleteCallback(&callback_),
+ WEBRTC_VIDEO_CODEC_OK);
+ }
+ // This class requires pointer stability and thus not copyable nor movable.
+ TestAv1Decoder(const TestAv1Decoder&) = delete;
+ TestAv1Decoder& operator=(const TestAv1Decoder&) = delete;
+
+ void Decode(int64_t frame_id, const EncodedImage& image) {
+ ASSERT_THAT(decoder_, NotNull());
+ int32_t error = decoder_->Decode(image, /*missing_frames=*/false,
+ /*render_time_ms=*/image.capture_time_ms_);
+ if (error != WEBRTC_VIDEO_CODEC_OK) {
+ ADD_FAILURE() << "Failed to decode frame id " << frame_id
+ << " with error code " << error << " by decoder#"
+ << decoder_id_;
+ return;
+ }
+ decoded_ids_.push_back(frame_id);
+ }
+
+ const std::vector<int64_t>& decoded_frame_ids() const { return decoded_ids_; }
+ size_t num_output_frames() const { return callback_.num_called(); }
+
+ private:
+ // Decoder callback that only counts how many times it was called.
+ // While it is tempting to replace it with a simple mock, that one requires
+ // to set expectation on number of calls in advance. Tests below unsure about
+ // expected number of calls until after calls are done.
+ class DecoderCallback : public DecodedImageCallback {
+ public:
+ size_t num_called() const { return num_called_; }
+
+ private:
+ int32_t Decoded(VideoFrame& /*decoded_image*/) override {
+ ++num_called_;
+ return 0;
+ }
+ void Decoded(VideoFrame& /*decoded_image*/,
+ absl::optional<int32_t> /*decode_time_ms*/,
+ absl::optional<uint8_t> /*qp*/) override {
+ ++num_called_;
+ }
+
+ int num_called_ = 0;
+ };
+
+ const int decoder_id_;
+ std::vector<int64_t> decoded_ids_;
+ DecoderCallback callback_;
+ const std::unique_ptr<VideoDecoder> decoder_;
+};
+
+TEST(LibaomAv1Test, EncodeDecode) {
+ TestAv1Decoder decoder(0);
+ std::unique_ptr<VideoEncoder> encoder = CreateLibaomAv1Encoder();
+ VideoCodec codec_settings = DefaultCodecSettings();
+ ASSERT_EQ(encoder->InitEncode(&codec_settings, DefaultEncoderSettings()),
+ WEBRTC_VIDEO_CODEC_OK);
+
+ VideoBitrateAllocation allocation;
+ allocation.SetBitrate(0, 0, 300000);
+ encoder->SetRates(VideoEncoder::RateControlParameters(
+ allocation, codec_settings.maxFramerate));
+
+ std::vector<EncodedVideoFrameProducer::EncodedFrame> encoded_frames =
+ EncodedVideoFrameProducer(*encoder).SetNumInputFrames(4).Encode();
+ for (size_t frame_id = 0; frame_id < encoded_frames.size(); ++frame_id) {
+ decoder.Decode(static_cast<int64_t>(frame_id),
+ encoded_frames[frame_id].encoded_image);
+ }
+
+ // Check encoder produced some frames for decoder to decode.
+ ASSERT_THAT(encoded_frames, Not(IsEmpty()));
+ // Check decoder found all of them valid.
+ EXPECT_THAT(decoder.decoded_frame_ids(), SizeIs(encoded_frames.size()));
+ // Check each of them produced an output frame.
+ EXPECT_EQ(decoder.num_output_frames(), decoder.decoded_frame_ids().size());
+}
+
+struct LayerId {
+ friend bool operator==(const LayerId& lhs, const LayerId& rhs) {
+ return std::tie(lhs.spatial_id, lhs.temporal_id) ==
+ std::tie(rhs.spatial_id, rhs.temporal_id);
+ }
+ friend bool operator<(const LayerId& lhs, const LayerId& rhs) {
+ return std::tie(lhs.spatial_id, lhs.temporal_id) <
+ std::tie(rhs.spatial_id, rhs.temporal_id);
+ }
+ friend std::ostream& operator<<(std::ostream& s, const LayerId& layer) {
+ return s << "S" << layer.spatial_id << "T" << layer.temporal_id;
+ }
+
+ int spatial_id = 0;
+ int temporal_id = 0;
+};
+
+struct SvcTestParam {
+ ScalabilityMode GetScalabilityMode() const {
+ absl::optional<ScalabilityMode> scalability_mode =
+ ScalabilityModeFromString(name);
+ RTC_CHECK(scalability_mode.has_value());
+ return *scalability_mode;
+ }
+
+ std::string name;
+ int num_frames_to_generate;
+ std::map<LayerId, DataRate> configured_bitrates;
+};
+
+class LibaomAv1SvcTest : public ::testing::TestWithParam<SvcTestParam> {};
+
+TEST_P(LibaomAv1SvcTest, EncodeAndDecodeAllDecodeTargets) {
+ const SvcTestParam param = GetParam();
+ std::unique_ptr<ScalableVideoController> svc_controller =
+ CreateScalabilityStructure(param.GetScalabilityMode());
+ ASSERT_TRUE(svc_controller);
+ VideoBitrateAllocation allocation;
+ if (param.configured_bitrates.empty()) {
+ ScalableVideoController::StreamLayersConfig config =
+ svc_controller->StreamConfig();
+ for (int sid = 0; sid < config.num_spatial_layers; ++sid) {
+ for (int tid = 0; tid < config.num_temporal_layers; ++tid) {
+ allocation.SetBitrate(sid, tid, 100'000);
+ }
+ }
+ } else {
+ for (const auto& kv : param.configured_bitrates) {
+ allocation.SetBitrate(kv.first.spatial_id, kv.first.temporal_id,
+ kv.second.bps());
+ }
+ }
+
+ size_t num_decode_targets =
+ svc_controller->DependencyStructure().num_decode_targets;
+
+ std::unique_ptr<VideoEncoder> encoder = CreateLibaomAv1Encoder();
+ VideoCodec codec_settings = DefaultCodecSettings();
+ codec_settings.SetScalabilityMode(GetParam().GetScalabilityMode());
+ ASSERT_EQ(encoder->InitEncode(&codec_settings, DefaultEncoderSettings()),
+ WEBRTC_VIDEO_CODEC_OK);
+ encoder->SetRates(VideoEncoder::RateControlParameters(
+ allocation, codec_settings.maxFramerate));
+ std::vector<EncodedVideoFrameProducer::EncodedFrame> encoded_frames =
+ EncodedVideoFrameProducer(*encoder)
+ .SetNumInputFrames(GetParam().num_frames_to_generate)
+ .SetResolution({kWidth, kHeight})
+ .Encode();
+
+ ASSERT_THAT(
+ encoded_frames,
+ Each(Truly([&](const EncodedVideoFrameProducer::EncodedFrame& frame) {
+ return frame.codec_specific_info.generic_frame_info &&
+ frame.codec_specific_info.generic_frame_info
+ ->decode_target_indications.size() == num_decode_targets;
+ })));
+
+ for (size_t dt = 0; dt < num_decode_targets; ++dt) {
+ TestAv1Decoder decoder(dt);
+ std::vector<int64_t> requested_ids;
+ for (int64_t frame_id = 0;
+ frame_id < static_cast<int64_t>(encoded_frames.size()); ++frame_id) {
+ const EncodedVideoFrameProducer::EncodedFrame& frame =
+ encoded_frames[frame_id];
+ if (frame.codec_specific_info.generic_frame_info
+ ->decode_target_indications[dt] !=
+ DecodeTargetIndication::kNotPresent) {
+ requested_ids.push_back(frame_id);
+ decoder.Decode(frame_id, frame.encoded_image);
+ }
+ EXPECT_THAT(frame.codec_specific_info.scalability_mode,
+ Optional(param.GetScalabilityMode()));
+ }
+
+ ASSERT_THAT(requested_ids, SizeIs(Ge(2u)));
+ // Check decoder found all of them valid.
+ EXPECT_THAT(decoder.decoded_frame_ids(), ContainerEq(requested_ids))
+ << "Decoder#" << dt;
+ // Check each of them produced an output frame.
+ EXPECT_EQ(decoder.num_output_frames(), decoder.decoded_frame_ids().size())
+ << "Decoder#" << dt;
+ }
+}
+
+MATCHER(SameLayerIdAndBitrateIsNear, "") {
+ // First check if layer id is the same.
+ return std::get<0>(arg).first == std::get<1>(arg).first &&
+ // check measured bitrate is not much lower than requested.
+ std::get<0>(arg).second >= std::get<1>(arg).second * 0.8 &&
+ // check measured bitrate is not much larger than requested.
+ std::get<0>(arg).second <= std::get<1>(arg).second * 1.1;
+}
+
+TEST_P(LibaomAv1SvcTest, SetRatesMatchMeasuredBitrate) {
+ const SvcTestParam param = GetParam();
+ if (param.configured_bitrates.empty()) {
+ // Rates are not configured for this particular structure, skip the test.
+ return;
+ }
+ constexpr TimeDelta kDuration = TimeDelta::Seconds(5);
+
+ VideoBitrateAllocation allocation;
+ for (const auto& kv : param.configured_bitrates) {
+ allocation.SetBitrate(kv.first.spatial_id, kv.first.temporal_id,
+ kv.second.bps());
+ }
+
+ std::unique_ptr<VideoEncoder> encoder = CreateLibaomAv1Encoder();
+ ASSERT_TRUE(encoder);
+ VideoCodec codec_settings = DefaultCodecSettings();
+ codec_settings.SetScalabilityMode(param.GetScalabilityMode());
+ codec_settings.maxBitrate = allocation.get_sum_kbps();
+ codec_settings.maxFramerate = 30;
+ ASSERT_EQ(encoder->InitEncode(&codec_settings, DefaultEncoderSettings()),
+ WEBRTC_VIDEO_CODEC_OK);
+
+ encoder->SetRates(VideoEncoder::RateControlParameters(
+ allocation, codec_settings.maxFramerate));
+
+ std::vector<EncodedVideoFrameProducer::EncodedFrame> encoded_frames =
+ EncodedVideoFrameProducer(*encoder)
+ .SetNumInputFrames(codec_settings.maxFramerate * kDuration.seconds())
+ .SetResolution({codec_settings.width, codec_settings.height})
+ .SetFramerateFps(codec_settings.maxFramerate)
+ .Encode();
+
+ // Calculate size of each layer.
+ std::map<LayerId, DataSize> layer_size;
+ for (const auto& frame : encoded_frames) {
+ ASSERT_TRUE(frame.codec_specific_info.generic_frame_info);
+ const auto& layer = *frame.codec_specific_info.generic_frame_info;
+ LayerId layer_id = {layer.spatial_id, layer.temporal_id};
+ // This is almost same as
+ // layer_size[layer_id] += DataSize::Bytes(frame.encoded_image.size());
+ // but avoids calling deleted default constructor for DataSize.
+ layer_size.emplace(layer_id, DataSize::Zero()).first->second +=
+ DataSize::Bytes(frame.encoded_image.size());
+ }
+ // Convert size of the layer into bitrate of that layer.
+ std::vector<std::pair<LayerId, DataRate>> measured_bitrates;
+ for (const auto& kv : layer_size) {
+ measured_bitrates.emplace_back(kv.first, kv.second / kDuration);
+ }
+ EXPECT_THAT(measured_bitrates, Pointwise(SameLayerIdAndBitrateIsNear(),
+ param.configured_bitrates));
+}
+
+INSTANTIATE_TEST_SUITE_P(
+ Svc,
+ LibaomAv1SvcTest,
+ Values(SvcTestParam{"L1T1", /*num_frames_to_generate=*/4},
+ SvcTestParam{"L1T2",
+ /*num_frames_to_generate=*/4,
+ /*configured_bitrates=*/
+ {{{0, 0}, DataRate::KilobitsPerSec(60)},
+ {{0, 1}, DataRate::KilobitsPerSec(40)}}},
+ SvcTestParam{"L1T3", /*num_frames_to_generate=*/8},
+ SvcTestParam{"L2T1",
+ /*num_frames_to_generate=*/3,
+ /*configured_bitrates=*/
+ {{{0, 0}, DataRate::KilobitsPerSec(30)},
+ {{1, 0}, DataRate::KilobitsPerSec(70)}}},
+ SvcTestParam{"L2T1h",
+ /*num_frames_to_generate=*/3,
+ /*configured_bitrates=*/
+ {{{0, 0}, DataRate::KilobitsPerSec(30)},
+ {{1, 0}, DataRate::KilobitsPerSec(70)}}},
+ SvcTestParam{"L2T1_KEY", /*num_frames_to_generate=*/3},
+ SvcTestParam{"L3T1", /*num_frames_to_generate=*/3},
+ SvcTestParam{"L3T3", /*num_frames_to_generate=*/8},
+ SvcTestParam{"S2T1", /*num_frames_to_generate=*/3},
+ SvcTestParam{"S3T3", /*num_frames_to_generate=*/8},
+ SvcTestParam{"L2T2", /*num_frames_to_generate=*/4},
+ SvcTestParam{"L2T2_KEY", /*num_frames_to_generate=*/4},
+ SvcTestParam{"L2T2_KEY_SHIFT",
+ /*num_frames_to_generate=*/4,
+ /*configured_bitrates=*/
+ {{{0, 0}, DataRate::KilobitsPerSec(70)},
+ {{0, 1}, DataRate::KilobitsPerSec(30)},
+ {{1, 0}, DataRate::KilobitsPerSec(110)},
+ {{1, 1}, DataRate::KilobitsPerSec(80)}}}),
+ [](const testing::TestParamInfo<SvcTestParam>& info) {
+ return info.param.name;
+ });
+
+} // namespace
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/h264/DEPS b/third_party/libwebrtc/modules/video_coding/codecs/h264/DEPS
new file mode 100644
index 0000000000..4e110917d8
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/h264/DEPS
@@ -0,0 +1,5 @@
+include_rules = [
+ "+third_party/ffmpeg",
+ "+third_party/openh264",
+ "+media/base",
+]
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/h264/OWNERS b/third_party/libwebrtc/modules/video_coding/codecs/h264/OWNERS
new file mode 100644
index 0000000000..4b06c4e32b
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/h264/OWNERS
@@ -0,0 +1,2 @@
+sprang@webrtc.org
+ssilkin@webrtc.org
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/h264/h264.cc b/third_party/libwebrtc/modules/video_coding/codecs/h264/h264.cc
new file mode 100644
index 0000000000..23580d7a4a
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/h264/h264.cc
@@ -0,0 +1,166 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ *
+ */
+
+#include "modules/video_coding/codecs/h264/include/h264.h"
+
+#include <memory>
+#include <string>
+
+#include "absl/container/inlined_vector.h"
+#include "absl/types/optional.h"
+#include "api/video_codecs/sdp_video_format.h"
+#include "media/base/media_constants.h"
+#include "rtc_base/trace_event.h"
+
+#if defined(WEBRTC_USE_H264)
+#include "modules/video_coding/codecs/h264/h264_decoder_impl.h"
+#include "modules/video_coding/codecs/h264/h264_encoder_impl.h"
+#endif
+
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+
+namespace {
+
+#if defined(WEBRTC_USE_H264)
+bool g_rtc_use_h264 = true;
+#endif
+
+// If H.264 OpenH264/FFmpeg codec is supported.
+bool IsH264CodecSupported() {
+#if defined(WEBRTC_USE_H264)
+ return g_rtc_use_h264;
+#else
+ return false;
+#endif
+}
+
+constexpr ScalabilityMode kSupportedScalabilityModes[] = {
+ ScalabilityMode::kL1T1, ScalabilityMode::kL1T2, ScalabilityMode::kL1T3};
+
+} // namespace
+
+SdpVideoFormat CreateH264Format(H264Profile profile,
+ H264Level level,
+ const std::string& packetization_mode,
+ bool add_scalability_modes) {
+ const absl::optional<std::string> profile_string =
+ H264ProfileLevelIdToString(H264ProfileLevelId(profile, level));
+ RTC_CHECK(profile_string);
+ absl::InlinedVector<ScalabilityMode, kScalabilityModeCount> scalability_modes;
+ if (add_scalability_modes) {
+ for (const auto scalability_mode : kSupportedScalabilityModes) {
+ scalability_modes.push_back(scalability_mode);
+ }
+ }
+ return SdpVideoFormat(
+ cricket::kH264CodecName,
+ {{cricket::kH264FmtpProfileLevelId, *profile_string},
+ {cricket::kH264FmtpLevelAsymmetryAllowed, "1"},
+ {cricket::kH264FmtpPacketizationMode, packetization_mode}},
+ scalability_modes);
+}
+
+void DisableRtcUseH264() {
+#if defined(WEBRTC_USE_H264)
+ g_rtc_use_h264 = false;
+#endif
+}
+
+std::vector<SdpVideoFormat> SupportedH264Codecs(bool add_scalability_modes) {
+ TRACE_EVENT0("webrtc", __func__);
+ if (!IsH264CodecSupported())
+ return std::vector<SdpVideoFormat>();
+ // We only support encoding Constrained Baseline Profile (CBP), but the
+ // decoder supports more profiles. We can list all profiles here that are
+ // supported by the decoder and that are also supersets of CBP, i.e. the
+ // decoder for that profile is required to be able to decode CBP. This means
+ // we can encode and send CBP even though we negotiated a potentially
+ // higher profile. See the H264 spec for more information.
+ //
+ // We support both packetization modes 0 (mandatory) and 1 (optional,
+ // preferred).
+ return {CreateH264Format(H264Profile::kProfileBaseline, H264Level::kLevel3_1,
+ "1", add_scalability_modes),
+ CreateH264Format(H264Profile::kProfileBaseline, H264Level::kLevel3_1,
+ "0", add_scalability_modes),
+ CreateH264Format(H264Profile::kProfileConstrainedBaseline,
+ H264Level::kLevel3_1, "1", add_scalability_modes),
+ CreateH264Format(H264Profile::kProfileConstrainedBaseline,
+ H264Level::kLevel3_1, "0", add_scalability_modes),
+ CreateH264Format(H264Profile::kProfileMain, H264Level::kLevel3_1, "1",
+ add_scalability_modes),
+ CreateH264Format(H264Profile::kProfileMain, H264Level::kLevel3_1, "0",
+ add_scalability_modes)};
+}
+
+std::vector<SdpVideoFormat> SupportedH264DecoderCodecs() {
+ TRACE_EVENT0("webrtc", __func__);
+ if (!IsH264CodecSupported())
+ return std::vector<SdpVideoFormat>();
+
+ std::vector<SdpVideoFormat> supportedCodecs = SupportedH264Codecs();
+
+ // OpenH264 doesn't yet support High Predictive 4:4:4 encoding but it does
+ // support decoding.
+ supportedCodecs.push_back(CreateH264Format(
+ H264Profile::kProfilePredictiveHigh444, H264Level::kLevel3_1, "1"));
+ supportedCodecs.push_back(CreateH264Format(
+ H264Profile::kProfilePredictiveHigh444, H264Level::kLevel3_1, "0"));
+
+ return supportedCodecs;
+}
+
+std::unique_ptr<H264Encoder> H264Encoder::Create(
+ const cricket::VideoCodec& codec) {
+ RTC_DCHECK(H264Encoder::IsSupported());
+#if defined(WEBRTC_USE_H264)
+ RTC_CHECK(g_rtc_use_h264);
+ RTC_LOG(LS_INFO) << "Creating H264EncoderImpl.";
+ return std::make_unique<H264EncoderImpl>(codec);
+#else
+ RTC_DCHECK_NOTREACHED();
+ return nullptr;
+#endif
+}
+
+bool H264Encoder::IsSupported() {
+ return IsH264CodecSupported();
+}
+
+bool H264Encoder::SupportsScalabilityMode(ScalabilityMode scalability_mode) {
+ for (const auto& entry : kSupportedScalabilityModes) {
+ if (entry == scalability_mode) {
+ return true;
+ }
+ }
+ return false;
+}
+
+std::unique_ptr<H264Decoder> H264Decoder::Create() {
+ RTC_DCHECK(H264Decoder::IsSupported());
+#if defined(WEBRTC_USE_H264)
+ RTC_CHECK(g_rtc_use_h264);
+ RTC_LOG(LS_INFO) << "Creating H264DecoderImpl.";
+ return std::make_unique<H264DecoderImpl>();
+#else
+ RTC_DCHECK_NOTREACHED();
+ return nullptr;
+#endif
+}
+
+bool H264Decoder::IsSupported() {
+ return IsH264CodecSupported();
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/h264/h264_color_space.cc b/third_party/libwebrtc/modules/video_coding/codecs/h264/h264_color_space.cc
new file mode 100644
index 0000000000..59921263e3
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/h264/h264_color_space.cc
@@ -0,0 +1,178 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+// Everything declared/defined in this header is only required when WebRTC is
+// build with H264 support, please do not move anything out of the
+// #ifdef unless needed and tested.
+#ifdef WEBRTC_USE_H264
+
+#include "modules/video_coding/codecs/h264/h264_color_space.h"
+
+namespace webrtc {
+
+ColorSpace ExtractH264ColorSpace(AVCodecContext* codec) {
+ ColorSpace::PrimaryID primaries = ColorSpace::PrimaryID::kUnspecified;
+ switch (codec->color_primaries) {
+ case AVCOL_PRI_BT709:
+ primaries = ColorSpace::PrimaryID::kBT709;
+ break;
+ case AVCOL_PRI_BT470M:
+ primaries = ColorSpace::PrimaryID::kBT470M;
+ break;
+ case AVCOL_PRI_BT470BG:
+ primaries = ColorSpace::PrimaryID::kBT470BG;
+ break;
+ case AVCOL_PRI_SMPTE170M:
+ primaries = ColorSpace::PrimaryID::kSMPTE170M;
+ break;
+ case AVCOL_PRI_SMPTE240M:
+ primaries = ColorSpace::PrimaryID::kSMPTE240M;
+ break;
+ case AVCOL_PRI_FILM:
+ primaries = ColorSpace::PrimaryID::kFILM;
+ break;
+ case AVCOL_PRI_BT2020:
+ primaries = ColorSpace::PrimaryID::kBT2020;
+ break;
+ case AVCOL_PRI_SMPTE428:
+ primaries = ColorSpace::PrimaryID::kSMPTEST428;
+ break;
+ case AVCOL_PRI_SMPTE431:
+ primaries = ColorSpace::PrimaryID::kSMPTEST431;
+ break;
+ case AVCOL_PRI_SMPTE432:
+ primaries = ColorSpace::PrimaryID::kSMPTEST432;
+ break;
+ case AVCOL_PRI_JEDEC_P22:
+ primaries = ColorSpace::PrimaryID::kJEDECP22;
+ break;
+ case AVCOL_PRI_RESERVED0:
+ case AVCOL_PRI_UNSPECIFIED:
+ case AVCOL_PRI_RESERVED:
+ default:
+ break;
+ }
+
+ ColorSpace::TransferID transfer = ColorSpace::TransferID::kUnspecified;
+ switch (codec->color_trc) {
+ case AVCOL_TRC_BT709:
+ transfer = ColorSpace::TransferID::kBT709;
+ break;
+ case AVCOL_TRC_GAMMA22:
+ transfer = ColorSpace::TransferID::kGAMMA22;
+ break;
+ case AVCOL_TRC_GAMMA28:
+ transfer = ColorSpace::TransferID::kGAMMA28;
+ break;
+ case AVCOL_TRC_SMPTE170M:
+ transfer = ColorSpace::TransferID::kSMPTE170M;
+ break;
+ case AVCOL_TRC_SMPTE240M:
+ transfer = ColorSpace::TransferID::kSMPTE240M;
+ break;
+ case AVCOL_TRC_LINEAR:
+ transfer = ColorSpace::TransferID::kLINEAR;
+ break;
+ case AVCOL_TRC_LOG:
+ transfer = ColorSpace::TransferID::kLOG;
+ break;
+ case AVCOL_TRC_LOG_SQRT:
+ transfer = ColorSpace::TransferID::kLOG_SQRT;
+ break;
+ case AVCOL_TRC_IEC61966_2_4:
+ transfer = ColorSpace::TransferID::kIEC61966_2_4;
+ break;
+ case AVCOL_TRC_BT1361_ECG:
+ transfer = ColorSpace::TransferID::kBT1361_ECG;
+ break;
+ case AVCOL_TRC_IEC61966_2_1:
+ transfer = ColorSpace::TransferID::kIEC61966_2_1;
+ break;
+ case AVCOL_TRC_BT2020_10:
+ transfer = ColorSpace::TransferID::kBT2020_10;
+ break;
+ case AVCOL_TRC_BT2020_12:
+ transfer = ColorSpace::TransferID::kBT2020_12;
+ break;
+ case AVCOL_TRC_SMPTE2084:
+ transfer = ColorSpace::TransferID::kSMPTEST2084;
+ break;
+ case AVCOL_TRC_SMPTE428:
+ transfer = ColorSpace::TransferID::kSMPTEST428;
+ break;
+ case AVCOL_TRC_ARIB_STD_B67:
+ transfer = ColorSpace::TransferID::kARIB_STD_B67;
+ break;
+ case AVCOL_TRC_RESERVED0:
+ case AVCOL_TRC_UNSPECIFIED:
+ case AVCOL_TRC_RESERVED:
+ default:
+ break;
+ }
+
+ ColorSpace::MatrixID matrix = ColorSpace::MatrixID::kUnspecified;
+ switch (codec->colorspace) {
+ case AVCOL_SPC_RGB:
+ matrix = ColorSpace::MatrixID::kRGB;
+ break;
+ case AVCOL_SPC_BT709:
+ matrix = ColorSpace::MatrixID::kBT709;
+ break;
+ case AVCOL_SPC_FCC:
+ matrix = ColorSpace::MatrixID::kFCC;
+ break;
+ case AVCOL_SPC_BT470BG:
+ matrix = ColorSpace::MatrixID::kBT470BG;
+ break;
+ case AVCOL_SPC_SMPTE170M:
+ matrix = ColorSpace::MatrixID::kSMPTE170M;
+ break;
+ case AVCOL_SPC_SMPTE240M:
+ matrix = ColorSpace::MatrixID::kSMPTE240M;
+ break;
+ case AVCOL_SPC_YCGCO:
+ matrix = ColorSpace::MatrixID::kYCOCG;
+ break;
+ case AVCOL_SPC_BT2020_NCL:
+ matrix = ColorSpace::MatrixID::kBT2020_NCL;
+ break;
+ case AVCOL_SPC_BT2020_CL:
+ matrix = ColorSpace::MatrixID::kBT2020_CL;
+ break;
+ case AVCOL_SPC_SMPTE2085:
+ matrix = ColorSpace::MatrixID::kSMPTE2085;
+ break;
+ case AVCOL_SPC_CHROMA_DERIVED_NCL:
+ case AVCOL_SPC_CHROMA_DERIVED_CL:
+ case AVCOL_SPC_ICTCP:
+ case AVCOL_SPC_UNSPECIFIED:
+ case AVCOL_SPC_RESERVED:
+ default:
+ break;
+ }
+
+ ColorSpace::RangeID range = ColorSpace::RangeID::kInvalid;
+ switch (codec->color_range) {
+ case AVCOL_RANGE_MPEG:
+ range = ColorSpace::RangeID::kLimited;
+ break;
+ case AVCOL_RANGE_JPEG:
+ range = ColorSpace::RangeID::kFull;
+ break;
+ case AVCOL_RANGE_UNSPECIFIED:
+ default:
+ break;
+ }
+ return ColorSpace(primaries, transfer, matrix, range);
+}
+
+} // namespace webrtc
+
+#endif // WEBRTC_USE_H264
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/h264/h264_color_space.h b/third_party/libwebrtc/modules/video_coding/codecs/h264/h264_color_space.h
new file mode 100644
index 0000000000..392ccaf563
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/h264/h264_color_space.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_CODECS_H264_H264_COLOR_SPACE_H_
+#define MODULES_VIDEO_CODING_CODECS_H264_H264_COLOR_SPACE_H_
+
+// Everything declared in this header is only required when WebRTC is
+// build with H264 support, please do not move anything out of the
+// #ifdef unless needed and tested.
+#ifdef WEBRTC_USE_H264
+
+#if defined(WEBRTC_WIN) && !defined(__clang__)
+#error "See: bugs.webrtc.org/9213#c13."
+#endif
+
+#include "api/video/color_space.h"
+
+extern "C" {
+#include "third_party/ffmpeg/libavcodec/avcodec.h"
+} // extern "C"
+
+namespace webrtc {
+
+// Helper class for extracting color space information from H264 stream.
+ColorSpace ExtractH264ColorSpace(AVCodecContext* codec);
+
+} // namespace webrtc
+
+#endif // WEBRTC_USE_H264
+
+#endif // MODULES_VIDEO_CODING_CODECS_H264_H264_COLOR_SPACE_H_
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/h264/h264_decoder_impl.cc b/third_party/libwebrtc/modules/video_coding/codecs/h264/h264_decoder_impl.cc
new file mode 100644
index 0000000000..f67718cb23
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/h264/h264_decoder_impl.cc
@@ -0,0 +1,657 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ *
+ */
+
+// Everything declared/defined in this header is only required when WebRTC is
+// build with H264 support, please do not move anything out of the
+// #ifdef unless needed and tested.
+#ifdef WEBRTC_USE_H264
+
+#include "modules/video_coding/codecs/h264/h264_decoder_impl.h"
+
+#include <algorithm>
+#include <limits>
+#include <memory>
+
+extern "C" {
+#include "third_party/ffmpeg/libavcodec/avcodec.h"
+#include "third_party/ffmpeg/libavformat/avformat.h"
+#include "third_party/ffmpeg/libavutil/imgutils.h"
+} // extern "C"
+
+#include "api/video/color_space.h"
+#include "api/video/i010_buffer.h"
+#include "api/video/i420_buffer.h"
+#include "common_video/include/video_frame_buffer.h"
+#include "modules/video_coding/codecs/h264/h264_color_space.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "system_wrappers/include/metrics.h"
+
+namespace webrtc {
+
+namespace {
+
+constexpr std::array<AVPixelFormat, 9> kPixelFormatsSupported = {
+ AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV444P,
+ AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ444P,
+ AV_PIX_FMT_YUV420P10LE, AV_PIX_FMT_YUV422P10LE, AV_PIX_FMT_YUV444P10LE};
+const size_t kYPlaneIndex = 0;
+const size_t kUPlaneIndex = 1;
+const size_t kVPlaneIndex = 2;
+
+// Used by histograms. Values of entries should not be changed.
+enum H264DecoderImplEvent {
+ kH264DecoderEventInit = 0,
+ kH264DecoderEventError = 1,
+ kH264DecoderEventMax = 16,
+};
+
+struct ScopedPtrAVFreePacket {
+ void operator()(AVPacket* packet) { av_packet_free(&packet); }
+};
+typedef std::unique_ptr<AVPacket, ScopedPtrAVFreePacket> ScopedAVPacket;
+
+ScopedAVPacket MakeScopedAVPacket() {
+ ScopedAVPacket packet(av_packet_alloc());
+ return packet;
+}
+
+} // namespace
+
+int H264DecoderImpl::AVGetBuffer2(AVCodecContext* context,
+ AVFrame* av_frame,
+ int flags) {
+ // Set in `Configure`.
+ H264DecoderImpl* decoder = static_cast<H264DecoderImpl*>(context->opaque);
+ // DCHECK values set in `Configure`.
+ RTC_DCHECK(decoder);
+ // Necessary capability to be allowed to provide our own buffers.
+ RTC_DCHECK(context->codec->capabilities | AV_CODEC_CAP_DR1);
+
+ auto pixelFormatSupported = std::find_if(
+ kPixelFormatsSupported.begin(), kPixelFormatsSupported.end(),
+ [context](AVPixelFormat format) { return context->pix_fmt == format; });
+
+ RTC_CHECK(pixelFormatSupported != kPixelFormatsSupported.end());
+
+ // `av_frame->width` and `av_frame->height` are set by FFmpeg. These are the
+ // actual image's dimensions and may be different from `context->width` and
+ // `context->coded_width` due to reordering.
+ int width = av_frame->width;
+ int height = av_frame->height;
+ // See `lowres`, if used the decoder scales the image by 1/2^(lowres). This
+ // has implications on which resolutions are valid, but we don't use it.
+ RTC_CHECK_EQ(context->lowres, 0);
+ // Adjust the `width` and `height` to values acceptable by the decoder.
+ // Without this, FFmpeg may overflow the buffer. If modified, `width` and/or
+ // `height` are larger than the actual image and the image has to be cropped
+ // (top-left corner) after decoding to avoid visible borders to the right and
+ // bottom of the actual image.
+ avcodec_align_dimensions(context, &width, &height);
+
+ RTC_CHECK_GE(width, 0);
+ RTC_CHECK_GE(height, 0);
+ int ret = av_image_check_size(static_cast<unsigned int>(width),
+ static_cast<unsigned int>(height), 0, nullptr);
+ if (ret < 0) {
+ RTC_LOG(LS_ERROR) << "Invalid picture size " << width << "x" << height;
+ decoder->ReportError();
+ return ret;
+ }
+
+ // The video frame is stored in `frame_buffer`. `av_frame` is FFmpeg's version
+ // of a video frame and will be set up to reference `frame_buffer`'s data.
+
+ // FFmpeg expects the initial allocation to be zero-initialized according to
+ // http://crbug.com/390941. Our pool is set up to zero-initialize new buffers.
+ // TODO(https://crbug.com/390941): Delete that feature from the video pool,
+ // instead add an explicit call to InitializeData here.
+ rtc::scoped_refptr<PlanarYuvBuffer> frame_buffer;
+ rtc::scoped_refptr<I444Buffer> i444_buffer;
+ rtc::scoped_refptr<I420Buffer> i420_buffer;
+ rtc::scoped_refptr<I422Buffer> i422_buffer;
+ rtc::scoped_refptr<I010Buffer> i010_buffer;
+ rtc::scoped_refptr<I210Buffer> i210_buffer;
+ rtc::scoped_refptr<I410Buffer> i410_buffer;
+ int bytes_per_pixel = 1;
+ switch (context->pix_fmt) {
+ case AV_PIX_FMT_YUV420P:
+ case AV_PIX_FMT_YUVJ420P:
+ i420_buffer =
+ decoder->ffmpeg_buffer_pool_.CreateI420Buffer(width, height);
+ // Set `av_frame` members as required by FFmpeg.
+ av_frame->data[kYPlaneIndex] = i420_buffer->MutableDataY();
+ av_frame->linesize[kYPlaneIndex] = i420_buffer->StrideY();
+ av_frame->data[kUPlaneIndex] = i420_buffer->MutableDataU();
+ av_frame->linesize[kUPlaneIndex] = i420_buffer->StrideU();
+ av_frame->data[kVPlaneIndex] = i420_buffer->MutableDataV();
+ av_frame->linesize[kVPlaneIndex] = i420_buffer->StrideV();
+ RTC_DCHECK_EQ(av_frame->extended_data, av_frame->data);
+ frame_buffer = i420_buffer;
+ break;
+ case AV_PIX_FMT_YUV444P:
+ case AV_PIX_FMT_YUVJ444P:
+ i444_buffer =
+ decoder->ffmpeg_buffer_pool_.CreateI444Buffer(width, height);
+ // Set `av_frame` members as required by FFmpeg.
+ av_frame->data[kYPlaneIndex] = i444_buffer->MutableDataY();
+ av_frame->linesize[kYPlaneIndex] = i444_buffer->StrideY();
+ av_frame->data[kUPlaneIndex] = i444_buffer->MutableDataU();
+ av_frame->linesize[kUPlaneIndex] = i444_buffer->StrideU();
+ av_frame->data[kVPlaneIndex] = i444_buffer->MutableDataV();
+ av_frame->linesize[kVPlaneIndex] = i444_buffer->StrideV();
+ frame_buffer = i444_buffer;
+ break;
+ case AV_PIX_FMT_YUV422P:
+ case AV_PIX_FMT_YUVJ422P:
+ i422_buffer =
+ decoder->ffmpeg_buffer_pool_.CreateI422Buffer(width, height);
+ // Set `av_frame` members as required by FFmpeg.
+ av_frame->data[kYPlaneIndex] = i422_buffer->MutableDataY();
+ av_frame->linesize[kYPlaneIndex] = i422_buffer->StrideY();
+ av_frame->data[kUPlaneIndex] = i422_buffer->MutableDataU();
+ av_frame->linesize[kUPlaneIndex] = i422_buffer->StrideU();
+ av_frame->data[kVPlaneIndex] = i422_buffer->MutableDataV();
+ av_frame->linesize[kVPlaneIndex] = i422_buffer->StrideV();
+ frame_buffer = i422_buffer;
+ break;
+ case AV_PIX_FMT_YUV420P10LE:
+ i010_buffer =
+ decoder->ffmpeg_buffer_pool_.CreateI010Buffer(width, height);
+ // Set `av_frame` members as required by FFmpeg.
+ av_frame->data[kYPlaneIndex] =
+ reinterpret_cast<uint8_t*>(i010_buffer->MutableDataY());
+ av_frame->linesize[kYPlaneIndex] = i010_buffer->StrideY() * 2;
+ av_frame->data[kUPlaneIndex] =
+ reinterpret_cast<uint8_t*>(i010_buffer->MutableDataU());
+ av_frame->linesize[kUPlaneIndex] = i010_buffer->StrideU() * 2;
+ av_frame->data[kVPlaneIndex] =
+ reinterpret_cast<uint8_t*>(i010_buffer->MutableDataV());
+ av_frame->linesize[kVPlaneIndex] = i010_buffer->StrideV() * 2;
+ frame_buffer = i010_buffer;
+ bytes_per_pixel = 2;
+ break;
+ case AV_PIX_FMT_YUV422P10LE:
+ i210_buffer =
+ decoder->ffmpeg_buffer_pool_.CreateI210Buffer(width, height);
+ // Set `av_frame` members as required by FFmpeg.
+ av_frame->data[kYPlaneIndex] =
+ reinterpret_cast<uint8_t*>(i210_buffer->MutableDataY());
+ av_frame->linesize[kYPlaneIndex] = i210_buffer->StrideY() * 2;
+ av_frame->data[kUPlaneIndex] =
+ reinterpret_cast<uint8_t*>(i210_buffer->MutableDataU());
+ av_frame->linesize[kUPlaneIndex] = i210_buffer->StrideU() * 2;
+ av_frame->data[kVPlaneIndex] =
+ reinterpret_cast<uint8_t*>(i210_buffer->MutableDataV());
+ av_frame->linesize[kVPlaneIndex] = i210_buffer->StrideV() * 2;
+ frame_buffer = i210_buffer;
+ bytes_per_pixel = 2;
+ break;
+ case AV_PIX_FMT_YUV444P10LE:
+ i410_buffer =
+ decoder->ffmpeg_buffer_pool_.CreateI410Buffer(width, height);
+ // Set `av_frame` members as required by FFmpeg.
+ av_frame->data[kYPlaneIndex] =
+ reinterpret_cast<uint8_t*>(i410_buffer->MutableDataY());
+ av_frame->linesize[kYPlaneIndex] = i410_buffer->StrideY() * 2;
+ av_frame->data[kUPlaneIndex] =
+ reinterpret_cast<uint8_t*>(i410_buffer->MutableDataU());
+ av_frame->linesize[kUPlaneIndex] = i410_buffer->StrideU() * 2;
+ av_frame->data[kVPlaneIndex] =
+ reinterpret_cast<uint8_t*>(i410_buffer->MutableDataV());
+ av_frame->linesize[kVPlaneIndex] = i410_buffer->StrideV() * 2;
+ frame_buffer = i410_buffer;
+ bytes_per_pixel = 2;
+ break;
+ default:
+ RTC_LOG(LS_ERROR) << "Unsupported buffer type " << context->pix_fmt
+ << ". Check supported supported pixel formats!";
+ decoder->ReportError();
+ return -1;
+ }
+
+ int y_size = width * height * bytes_per_pixel;
+ int uv_size = frame_buffer->ChromaWidth() * frame_buffer->ChromaHeight() *
+ bytes_per_pixel;
+ // DCHECK that we have a continuous buffer as is required.
+ RTC_DCHECK_EQ(av_frame->data[kUPlaneIndex],
+ av_frame->data[kYPlaneIndex] + y_size);
+ RTC_DCHECK_EQ(av_frame->data[kVPlaneIndex],
+ av_frame->data[kUPlaneIndex] + uv_size);
+ int total_size = y_size + 2 * uv_size;
+
+ av_frame->format = context->pix_fmt;
+ av_frame->reordered_opaque = context->reordered_opaque;
+
+ // Create a VideoFrame object, to keep a reference to the buffer.
+ // TODO(nisse): The VideoFrame's timestamp and rotation info is not used.
+ // Refactor to do not use a VideoFrame object at all.
+ av_frame->buf[0] = av_buffer_create(
+ av_frame->data[kYPlaneIndex], total_size, AVFreeBuffer2,
+ static_cast<void*>(
+ std::make_unique<VideoFrame>(VideoFrame::Builder()
+ .set_video_frame_buffer(frame_buffer)
+ .set_rotation(kVideoRotation_0)
+ .set_timestamp_us(0)
+ .build())
+ .release()),
+ 0);
+ RTC_CHECK(av_frame->buf[0]);
+ return 0;
+}
+
+void H264DecoderImpl::AVFreeBuffer2(void* opaque, uint8_t* data) {
+ // The buffer pool recycles the buffer used by `video_frame` when there are no
+ // more references to it. `video_frame` is a thin buffer holder and is not
+ // recycled.
+ VideoFrame* video_frame = static_cast<VideoFrame*>(opaque);
+ delete video_frame;
+}
+
+H264DecoderImpl::H264DecoderImpl()
+ : ffmpeg_buffer_pool_(true),
+ decoded_image_callback_(nullptr),
+ has_reported_init_(false),
+ has_reported_error_(false) {}
+
+H264DecoderImpl::~H264DecoderImpl() {
+ Release();
+}
+
+bool H264DecoderImpl::Configure(const Settings& settings) {
+ ReportInit();
+ if (settings.codec_type() != kVideoCodecH264) {
+ ReportError();
+ return false;
+ }
+
+ // Release necessary in case of re-initializing.
+ int32_t ret = Release();
+ if (ret != WEBRTC_VIDEO_CODEC_OK) {
+ ReportError();
+ return false;
+ }
+ RTC_DCHECK(!av_context_);
+
+ // Initialize AVCodecContext.
+ av_context_.reset(avcodec_alloc_context3(nullptr));
+
+ av_context_->codec_type = AVMEDIA_TYPE_VIDEO;
+ av_context_->codec_id = AV_CODEC_ID_H264;
+ const RenderResolution& resolution = settings.max_render_resolution();
+ if (resolution.Valid()) {
+ av_context_->coded_width = resolution.Width();
+ av_context_->coded_height = resolution.Height();
+ }
+ av_context_->extradata = nullptr;
+ av_context_->extradata_size = 0;
+
+ // If this is ever increased, look at `av_context_->thread_safe_callbacks` and
+ // make it possible to disable the thread checker in the frame buffer pool.
+ av_context_->thread_count = 1;
+ av_context_->thread_type = FF_THREAD_SLICE;
+
+ // Function used by FFmpeg to get buffers to store decoded frames in.
+ av_context_->get_buffer2 = AVGetBuffer2;
+ // `get_buffer2` is called with the context, there `opaque` can be used to get
+ // a pointer `this`.
+ av_context_->opaque = this;
+
+ const AVCodec* codec = avcodec_find_decoder(av_context_->codec_id);
+ if (!codec) {
+ // This is an indication that FFmpeg has not been initialized or it has not
+ // been compiled/initialized with the correct set of codecs.
+ RTC_LOG(LS_ERROR) << "FFmpeg H.264 decoder not found.";
+ Release();
+ ReportError();
+ return false;
+ }
+ int res = avcodec_open2(av_context_.get(), codec, nullptr);
+ if (res < 0) {
+ RTC_LOG(LS_ERROR) << "avcodec_open2 error: " << res;
+ Release();
+ ReportError();
+ return false;
+ }
+
+ av_frame_.reset(av_frame_alloc());
+
+ if (absl::optional<int> buffer_pool_size = settings.buffer_pool_size()) {
+ if (!ffmpeg_buffer_pool_.Resize(*buffer_pool_size)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+int32_t H264DecoderImpl::Release() {
+ av_context_.reset();
+ av_frame_.reset();
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+int32_t H264DecoderImpl::RegisterDecodeCompleteCallback(
+ DecodedImageCallback* callback) {
+ decoded_image_callback_ = callback;
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+int32_t H264DecoderImpl::Decode(const EncodedImage& input_image,
+ bool /*missing_frames*/,
+ int64_t /*render_time_ms*/) {
+ if (!IsInitialized()) {
+ ReportError();
+ return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
+ }
+ if (!decoded_image_callback_) {
+ RTC_LOG(LS_WARNING)
+ << "Configure() has been called, but a callback function "
+ "has not been set with RegisterDecodeCompleteCallback()";
+ ReportError();
+ return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
+ }
+ if (!input_image.data() || !input_image.size()) {
+ ReportError();
+ return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
+ }
+
+ ScopedAVPacket packet = MakeScopedAVPacket();
+ if (!packet) {
+ ReportError();
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+ // packet.data has a non-const type, but isn't modified by
+ // avcodec_send_packet.
+ packet->data = const_cast<uint8_t*>(input_image.data());
+ if (input_image.size() >
+ static_cast<size_t>(std::numeric_limits<int>::max())) {
+ ReportError();
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+ packet->size = static_cast<int>(input_image.size());
+ int64_t frame_timestamp_us = input_image.ntp_time_ms_ * 1000; // ms -> μs
+ av_context_->reordered_opaque = frame_timestamp_us;
+
+ int result = avcodec_send_packet(av_context_.get(), packet.get());
+
+ if (result < 0) {
+ RTC_LOG(LS_ERROR) << "avcodec_send_packet error: " << result;
+ ReportError();
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+
+ result = avcodec_receive_frame(av_context_.get(), av_frame_.get());
+ if (result < 0) {
+ RTC_LOG(LS_ERROR) << "avcodec_receive_frame error: " << result;
+ ReportError();
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+
+ // We don't expect reordering. Decoded frame timestamp should match
+ // the input one.
+ RTC_DCHECK_EQ(av_frame_->reordered_opaque, frame_timestamp_us);
+
+ // TODO(sakal): Maybe it is possible to get QP directly from FFmpeg.
+ h264_bitstream_parser_.ParseBitstream(input_image);
+ absl::optional<int> qp = h264_bitstream_parser_.GetLastSliceQp();
+
+ // Obtain the `video_frame` containing the decoded image.
+ VideoFrame* input_frame =
+ static_cast<VideoFrame*>(av_buffer_get_opaque(av_frame_->buf[0]));
+ RTC_DCHECK(input_frame);
+ rtc::scoped_refptr<VideoFrameBuffer> frame_buffer =
+ input_frame->video_frame_buffer();
+
+ // Instantiate Planar YUV buffer according to video frame buffer type
+ const webrtc::PlanarYuvBuffer* planar_yuv_buffer = nullptr;
+ const webrtc::PlanarYuv8Buffer* planar_yuv8_buffer = nullptr;
+ const webrtc::PlanarYuv16BBuffer* planar_yuv16_buffer = nullptr;
+ VideoFrameBuffer::Type video_frame_buffer_type = frame_buffer->type();
+ switch (video_frame_buffer_type) {
+ case VideoFrameBuffer::Type::kI420:
+ planar_yuv_buffer = frame_buffer->GetI420();
+ planar_yuv8_buffer =
+ reinterpret_cast<const webrtc::PlanarYuv8Buffer*>(planar_yuv_buffer);
+ break;
+ case VideoFrameBuffer::Type::kI444:
+ planar_yuv_buffer = frame_buffer->GetI444();
+ planar_yuv8_buffer =
+ reinterpret_cast<const webrtc::PlanarYuv8Buffer*>(planar_yuv_buffer);
+ break;
+ case VideoFrameBuffer::Type::kI422:
+ planar_yuv_buffer = frame_buffer->GetI422();
+ planar_yuv8_buffer =
+ reinterpret_cast<const webrtc::PlanarYuv8Buffer*>(planar_yuv_buffer);
+ break;
+ case VideoFrameBuffer::Type::kI010:
+ planar_yuv_buffer = frame_buffer->GetI010();
+ planar_yuv16_buffer = reinterpret_cast<const webrtc::PlanarYuv16BBuffer*>(
+ planar_yuv_buffer);
+ break;
+ case VideoFrameBuffer::Type::kI210:
+ planar_yuv_buffer = frame_buffer->GetI210();
+ planar_yuv16_buffer = reinterpret_cast<const webrtc::PlanarYuv16BBuffer*>(
+ planar_yuv_buffer);
+ break;
+ case VideoFrameBuffer::Type::kI410:
+ planar_yuv_buffer = frame_buffer->GetI410();
+ planar_yuv16_buffer = reinterpret_cast<const webrtc::PlanarYuv16BBuffer*>(
+ planar_yuv_buffer);
+ break;
+ default:
+ // If this code is changed to allow other video frame buffer type,
+ // make sure that the code below which wraps I420/I422/I444 buffer and
+ // code which converts to NV12 is changed
+ // to work with new video frame buffer type
+
+ RTC_LOG(LS_ERROR) << "frame_buffer type: "
+ << static_cast<int32_t>(video_frame_buffer_type)
+ << " is not supported!";
+ ReportError();
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+
+ // When needed, FFmpeg applies cropping by moving plane pointers and adjusting
+ // frame width/height. Ensure that cropped buffers lie within the allocated
+ // memory.
+ RTC_DCHECK_LE(av_frame_->width, planar_yuv_buffer->width());
+ RTC_DCHECK_LE(av_frame_->height, planar_yuv_buffer->height());
+ switch (video_frame_buffer_type) {
+ case VideoFrameBuffer::Type::kI420:
+ case VideoFrameBuffer::Type::kI444:
+ case VideoFrameBuffer::Type::kI422: {
+ RTC_DCHECK_GE(av_frame_->data[kYPlaneIndex], planar_yuv8_buffer->DataY());
+ RTC_DCHECK_LE(
+ av_frame_->data[kYPlaneIndex] +
+ av_frame_->linesize[kYPlaneIndex] * av_frame_->height,
+ planar_yuv8_buffer->DataY() +
+ planar_yuv8_buffer->StrideY() * planar_yuv8_buffer->height());
+ RTC_DCHECK_GE(av_frame_->data[kUPlaneIndex], planar_yuv8_buffer->DataU());
+ RTC_DCHECK_LE(
+ av_frame_->data[kUPlaneIndex] +
+ av_frame_->linesize[kUPlaneIndex] *
+ planar_yuv8_buffer->ChromaHeight(),
+ planar_yuv8_buffer->DataU() + planar_yuv8_buffer->StrideU() *
+ planar_yuv8_buffer->ChromaHeight());
+ RTC_DCHECK_GE(av_frame_->data[kVPlaneIndex], planar_yuv8_buffer->DataV());
+ RTC_DCHECK_LE(
+ av_frame_->data[kVPlaneIndex] +
+ av_frame_->linesize[kVPlaneIndex] *
+ planar_yuv8_buffer->ChromaHeight(),
+ planar_yuv8_buffer->DataV() + planar_yuv8_buffer->StrideV() *
+ planar_yuv8_buffer->ChromaHeight());
+ break;
+ }
+ case VideoFrameBuffer::Type::kI010:
+ case VideoFrameBuffer::Type::kI210:
+ case VideoFrameBuffer::Type::kI410: {
+ RTC_DCHECK_GE(
+ av_frame_->data[kYPlaneIndex],
+ reinterpret_cast<const uint8_t*>(planar_yuv16_buffer->DataY()));
+ RTC_DCHECK_LE(
+ av_frame_->data[kYPlaneIndex] +
+ av_frame_->linesize[kYPlaneIndex] * av_frame_->height,
+ reinterpret_cast<const uint8_t*>(planar_yuv16_buffer->DataY()) +
+ planar_yuv16_buffer->StrideY() * 2 *
+ planar_yuv16_buffer->height());
+ RTC_DCHECK_GE(
+ av_frame_->data[kUPlaneIndex],
+ reinterpret_cast<const uint8_t*>(planar_yuv16_buffer->DataU()));
+ RTC_DCHECK_LE(
+ av_frame_->data[kUPlaneIndex] +
+ av_frame_->linesize[kUPlaneIndex] *
+ planar_yuv16_buffer->ChromaHeight(),
+ reinterpret_cast<const uint8_t*>(planar_yuv16_buffer->DataU()) +
+ planar_yuv16_buffer->StrideU() * 2 *
+ planar_yuv16_buffer->ChromaHeight());
+ RTC_DCHECK_GE(
+ av_frame_->data[kVPlaneIndex],
+ reinterpret_cast<const uint8_t*>(planar_yuv16_buffer->DataV()));
+ RTC_DCHECK_LE(
+ av_frame_->data[kVPlaneIndex] +
+ av_frame_->linesize[kVPlaneIndex] *
+ planar_yuv16_buffer->ChromaHeight(),
+ reinterpret_cast<const uint8_t*>(planar_yuv16_buffer->DataV()) +
+ planar_yuv16_buffer->StrideV() * 2 *
+ planar_yuv16_buffer->ChromaHeight());
+ break;
+ }
+ default:
+ RTC_LOG(LS_ERROR) << "frame_buffer type: "
+ << static_cast<int32_t>(video_frame_buffer_type)
+ << " is not supported!";
+ ReportError();
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+
+ rtc::scoped_refptr<webrtc::VideoFrameBuffer> cropped_buffer;
+ switch (video_frame_buffer_type) {
+ case VideoFrameBuffer::Type::kI420:
+ cropped_buffer = WrapI420Buffer(
+ av_frame_->width, av_frame_->height, av_frame_->data[kYPlaneIndex],
+ av_frame_->linesize[kYPlaneIndex], av_frame_->data[kUPlaneIndex],
+ av_frame_->linesize[kUPlaneIndex], av_frame_->data[kVPlaneIndex],
+ av_frame_->linesize[kVPlaneIndex],
+ // To keep reference alive.
+ [frame_buffer] {});
+ break;
+ case VideoFrameBuffer::Type::kI444:
+ cropped_buffer = WrapI444Buffer(
+ av_frame_->width, av_frame_->height, av_frame_->data[kYPlaneIndex],
+ av_frame_->linesize[kYPlaneIndex], av_frame_->data[kUPlaneIndex],
+ av_frame_->linesize[kUPlaneIndex], av_frame_->data[kVPlaneIndex],
+ av_frame_->linesize[kVPlaneIndex],
+ // To keep reference alive.
+ [frame_buffer] {});
+ break;
+ case VideoFrameBuffer::Type::kI422:
+ cropped_buffer = WrapI422Buffer(
+ av_frame_->width, av_frame_->height, av_frame_->data[kYPlaneIndex],
+ av_frame_->linesize[kYPlaneIndex], av_frame_->data[kUPlaneIndex],
+ av_frame_->linesize[kUPlaneIndex], av_frame_->data[kVPlaneIndex],
+ av_frame_->linesize[kVPlaneIndex],
+ // To keep reference alive.
+ [frame_buffer] {});
+ break;
+ case VideoFrameBuffer::Type::kI010:
+ cropped_buffer = WrapI010Buffer(
+ av_frame_->width, av_frame_->height,
+ reinterpret_cast<const uint16_t*>(av_frame_->data[kYPlaneIndex]),
+ av_frame_->linesize[kYPlaneIndex] / 2,
+ reinterpret_cast<const uint16_t*>(av_frame_->data[kUPlaneIndex]),
+ av_frame_->linesize[kUPlaneIndex] / 2,
+ reinterpret_cast<const uint16_t*>(av_frame_->data[kVPlaneIndex]),
+ av_frame_->linesize[kVPlaneIndex] / 2,
+ // To keep reference alive.
+ [frame_buffer] {});
+ break;
+ case VideoFrameBuffer::Type::kI210:
+ cropped_buffer = WrapI210Buffer(
+ av_frame_->width, av_frame_->height,
+ reinterpret_cast<const uint16_t*>(av_frame_->data[kYPlaneIndex]),
+ av_frame_->linesize[kYPlaneIndex] / 2,
+ reinterpret_cast<const uint16_t*>(av_frame_->data[kUPlaneIndex]),
+ av_frame_->linesize[kUPlaneIndex] / 2,
+ reinterpret_cast<const uint16_t*>(av_frame_->data[kVPlaneIndex]),
+ av_frame_->linesize[kVPlaneIndex] / 2,
+ // To keep reference alive.
+ [frame_buffer] {});
+ break;
+ case VideoFrameBuffer::Type::kI410:
+ cropped_buffer = WrapI410Buffer(
+ av_frame_->width, av_frame_->height,
+ reinterpret_cast<const uint16_t*>(av_frame_->data[kYPlaneIndex]),
+ av_frame_->linesize[kYPlaneIndex] / 2,
+ reinterpret_cast<const uint16_t*>(av_frame_->data[kUPlaneIndex]),
+ av_frame_->linesize[kUPlaneIndex] / 2,
+ reinterpret_cast<const uint16_t*>(av_frame_->data[kVPlaneIndex]),
+ av_frame_->linesize[kVPlaneIndex] / 2,
+ // To keep reference alive.
+ [frame_buffer] {});
+ break;
+ default:
+ RTC_LOG(LS_ERROR) << "frame_buffer type: "
+ << static_cast<int32_t>(video_frame_buffer_type)
+ << " is not supported!";
+ ReportError();
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+
+ // Pass on color space from input frame if explicitly specified.
+ const ColorSpace& color_space =
+ input_image.ColorSpace() ? *input_image.ColorSpace()
+ : ExtractH264ColorSpace(av_context_.get());
+
+ VideoFrame decoded_frame = VideoFrame::Builder()
+ .set_video_frame_buffer(cropped_buffer)
+ .set_timestamp_rtp(input_image.Timestamp())
+ .set_color_space(color_space)
+ .build();
+
+ // Return decoded frame.
+ // TODO(nisse): Timestamp and rotation are all zero here. Change decoder
+ // interface to pass a VideoFrameBuffer instead of a VideoFrame?
+ decoded_image_callback_->Decoded(decoded_frame, absl::nullopt, qp);
+
+ // Stop referencing it, possibly freeing `input_frame`.
+ av_frame_unref(av_frame_.get());
+ input_frame = nullptr;
+
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+const char* H264DecoderImpl::ImplementationName() const {
+ return "FFmpeg";
+}
+
+bool H264DecoderImpl::IsInitialized() const {
+ return av_context_ != nullptr;
+}
+
+void H264DecoderImpl::ReportInit() {
+ if (has_reported_init_)
+ return;
+ RTC_HISTOGRAM_ENUMERATION("WebRTC.Video.H264DecoderImpl.Event",
+ kH264DecoderEventInit, kH264DecoderEventMax);
+ has_reported_init_ = true;
+}
+
+void H264DecoderImpl::ReportError() {
+ if (has_reported_error_)
+ return;
+ RTC_HISTOGRAM_ENUMERATION("WebRTC.Video.H264DecoderImpl.Event",
+ kH264DecoderEventError, kH264DecoderEventMax);
+ has_reported_error_ = true;
+}
+
+} // namespace webrtc
+
+#endif // WEBRTC_USE_H264
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/h264/h264_decoder_impl.h b/third_party/libwebrtc/modules/video_coding/codecs/h264/h264_decoder_impl.h
new file mode 100644
index 0000000000..97d091cf4b
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/h264/h264_decoder_impl.h
@@ -0,0 +1,109 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ *
+ */
+
+#ifndef MODULES_VIDEO_CODING_CODECS_H264_H264_DECODER_IMPL_H_
+#define MODULES_VIDEO_CODING_CODECS_H264_H264_DECODER_IMPL_H_
+
+// Everything declared in this header is only required when WebRTC is
+// build with H264 support, please do not move anything out of the
+// #ifdef unless needed and tested.
+#ifdef WEBRTC_USE_H264
+
+#if defined(WEBRTC_WIN) && !defined(__clang__)
+#error "See: bugs.webrtc.org/9213#c13."
+#endif
+
+#include <memory>
+
+#include "modules/video_coding/codecs/h264/include/h264.h"
+
+// CAVEAT: According to ffmpeg docs for avcodec_send_packet, ffmpeg requires a
+// few extra padding bytes after the end of input. And in addition, docs for
+// AV_INPUT_BUFFER_PADDING_SIZE says "If the first 23 bits of the additional
+// bytes are not 0, then damaged MPEG bitstreams could cause overread and
+// segfault."
+//
+// WebRTC doesn't ensure any such padding, and REQUIRES ffmpeg to be compiled
+// with CONFIG_SAFE_BITSTREAM_READER, which is intended to eliminate
+// out-of-bounds reads. ffmpeg docs doesn't say explicitly what effects this
+// flag has on the h.264 decoder or avcodec_send_packet, though, so this is in
+// some way depending on undocumented behavior. If any problems turn up, we may
+// have to add an extra copy operation, to enforce padding before buffers are
+// passed to ffmpeg.
+
+extern "C" {
+#include "third_party/ffmpeg/libavcodec/avcodec.h"
+} // extern "C"
+
+#include "common_video/h264/h264_bitstream_parser.h"
+#include "common_video/include/video_frame_buffer_pool.h"
+
+namespace webrtc {
+
+struct AVCodecContextDeleter {
+ void operator()(AVCodecContext* ptr) const { avcodec_free_context(&ptr); }
+};
+struct AVFrameDeleter {
+ void operator()(AVFrame* ptr) const { av_frame_free(&ptr); }
+};
+
+class H264DecoderImpl : public H264Decoder {
+ public:
+ H264DecoderImpl();
+ ~H264DecoderImpl() override;
+
+ bool Configure(const Settings& settings) override;
+ int32_t Release() override;
+
+ int32_t RegisterDecodeCompleteCallback(
+ DecodedImageCallback* callback) override;
+
+ // `missing_frames`, `fragmentation` and `render_time_ms` are ignored.
+ int32_t Decode(const EncodedImage& input_image,
+ bool /*missing_frames*/,
+ int64_t render_time_ms = -1) override;
+
+ const char* ImplementationName() const override;
+
+ private:
+ // Called by FFmpeg when it needs a frame buffer to store decoded frames in.
+ // The `VideoFrame` returned by FFmpeg at `Decode` originate from here. Their
+ // buffers are reference counted and freed by FFmpeg using `AVFreeBuffer2`.
+ static int AVGetBuffer2(AVCodecContext* context,
+ AVFrame* av_frame,
+ int flags);
+ // Called by FFmpeg when it is done with a video frame, see `AVGetBuffer2`.
+ static void AVFreeBuffer2(void* opaque, uint8_t* data);
+
+ bool IsInitialized() const;
+
+ // Reports statistics with histograms.
+ void ReportInit();
+ void ReportError();
+
+ // Used by ffmpeg via `AVGetBuffer2()` to allocate I420 images.
+ VideoFrameBufferPool ffmpeg_buffer_pool_;
+ std::unique_ptr<AVCodecContext, AVCodecContextDeleter> av_context_;
+ std::unique_ptr<AVFrame, AVFrameDeleter> av_frame_;
+
+ DecodedImageCallback* decoded_image_callback_;
+
+ bool has_reported_init_;
+ bool has_reported_error_;
+
+ webrtc::H264BitstreamParser h264_bitstream_parser_;
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_USE_H264
+
+#endif // MODULES_VIDEO_CODING_CODECS_H264_H264_DECODER_IMPL_H_
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/h264/h264_encoder_impl.cc b/third_party/libwebrtc/modules/video_coding/codecs/h264/h264_encoder_impl.cc
new file mode 100644
index 0000000000..b8055ac85f
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/h264/h264_encoder_impl.cc
@@ -0,0 +1,713 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ *
+ */
+
+// Everything declared/defined in this header is only required when WebRTC is
+// build with H264 support, please do not move anything out of the
+// #ifdef unless needed and tested.
+#ifdef WEBRTC_USE_H264
+
+#include "modules/video_coding/codecs/h264/h264_encoder_impl.h"
+
+#include <algorithm>
+#include <limits>
+#include <string>
+
+#include "absl/strings/match.h"
+#include "absl/types/optional.h"
+#include "api/video/video_codec_constants.h"
+#include "api/video_codecs/scalability_mode.h"
+#include "common_video/libyuv/include/webrtc_libyuv.h"
+#include "modules/video_coding/svc/create_scalability_structure.h"
+#include "modules/video_coding/utility/simulcast_rate_allocator.h"
+#include "modules/video_coding/utility/simulcast_utility.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/time_utils.h"
+#include "system_wrappers/include/metrics.h"
+#include "third_party/libyuv/include/libyuv/convert.h"
+#include "third_party/libyuv/include/libyuv/scale.h"
+#include "third_party/openh264/src/codec/api/wels/codec_api.h"
+#include "third_party/openh264/src/codec/api/wels/codec_app_def.h"
+#include "third_party/openh264/src/codec/api/wels/codec_def.h"
+#include "third_party/openh264/src/codec/api/wels/codec_ver.h"
+
+namespace webrtc {
+
+namespace {
+
+const bool kOpenH264EncoderDetailedLogging = false;
+
+// QP scaling thresholds.
+static const int kLowH264QpThreshold = 24;
+static const int kHighH264QpThreshold = 37;
+
+// Used by histograms. Values of entries should not be changed.
+enum H264EncoderImplEvent {
+ kH264EncoderEventInit = 0,
+ kH264EncoderEventError = 1,
+ kH264EncoderEventMax = 16,
+};
+
+int NumberOfThreads(int width, int height, int number_of_cores) {
+ // TODO(hbos): In Chromium, multiple threads do not work with sandbox on Mac,
+ // see crbug.com/583348. Until further investigated, only use one thread.
+ // if (width * height >= 1920 * 1080 && number_of_cores > 8) {
+ // return 8; // 8 threads for 1080p on high perf machines.
+ // } else if (width * height > 1280 * 960 && number_of_cores >= 6) {
+ // return 3; // 3 threads for 1080p.
+ // } else if (width * height > 640 * 480 && number_of_cores >= 3) {
+ // return 2; // 2 threads for qHD/HD.
+ // } else {
+ // return 1; // 1 thread for VGA or less.
+ // }
+ // TODO(sprang): Also check sSliceArgument.uiSliceNum om GetEncoderPrams(),
+ // before enabling multithreading here.
+ return 1;
+}
+
+VideoFrameType ConvertToVideoFrameType(EVideoFrameType type) {
+ switch (type) {
+ case videoFrameTypeIDR:
+ return VideoFrameType::kVideoFrameKey;
+ case videoFrameTypeSkip:
+ case videoFrameTypeI:
+ case videoFrameTypeP:
+ case videoFrameTypeIPMixed:
+ return VideoFrameType::kVideoFrameDelta;
+ case videoFrameTypeInvalid:
+ break;
+ }
+ RTC_DCHECK_NOTREACHED() << "Unexpected/invalid frame type: " << type;
+ return VideoFrameType::kEmptyFrame;
+}
+
+absl::optional<ScalabilityMode> ScalabilityModeFromTemporalLayers(
+ int num_temporal_layers) {
+ switch (num_temporal_layers) {
+ case 0:
+ break;
+ case 1:
+ return ScalabilityMode::kL1T1;
+ case 2:
+ return ScalabilityMode::kL1T2;
+ case 3:
+ return ScalabilityMode::kL1T3;
+ default:
+ RTC_DCHECK_NOTREACHED();
+ }
+ return absl::nullopt;
+}
+
+} // namespace
+
+// Helper method used by H264EncoderImpl::Encode.
+// Copies the encoded bytes from `info` to `encoded_image`. The
+// `encoded_image->_buffer` may be deleted and reallocated if a bigger buffer is
+// required.
+//
+// After OpenH264 encoding, the encoded bytes are stored in `info` spread out
+// over a number of layers and "NAL units". Each NAL unit is a fragment starting
+// with the four-byte start code {0,0,0,1}. All of this data (including the
+// start codes) is copied to the `encoded_image->_buffer`.
+static void RtpFragmentize(EncodedImage* encoded_image, SFrameBSInfo* info) {
+ // Calculate minimum buffer size required to hold encoded data.
+ size_t required_capacity = 0;
+ size_t fragments_count = 0;
+ for (int layer = 0; layer < info->iLayerNum; ++layer) {
+ const SLayerBSInfo& layerInfo = info->sLayerInfo[layer];
+ for (int nal = 0; nal < layerInfo.iNalCount; ++nal, ++fragments_count) {
+ RTC_CHECK_GE(layerInfo.pNalLengthInByte[nal], 0);
+ // Ensure `required_capacity` will not overflow.
+ RTC_CHECK_LE(layerInfo.pNalLengthInByte[nal],
+ std::numeric_limits<size_t>::max() - required_capacity);
+ required_capacity += layerInfo.pNalLengthInByte[nal];
+ }
+ }
+ auto buffer = EncodedImageBuffer::Create(required_capacity);
+ encoded_image->SetEncodedData(buffer);
+
+ // Iterate layers and NAL units, note each NAL unit as a fragment and copy
+ // the data to `encoded_image->_buffer`.
+ const uint8_t start_code[4] = {0, 0, 0, 1};
+ size_t frag = 0;
+ encoded_image->set_size(0);
+ for (int layer = 0; layer < info->iLayerNum; ++layer) {
+ const SLayerBSInfo& layerInfo = info->sLayerInfo[layer];
+ // Iterate NAL units making up this layer, noting fragments.
+ size_t layer_len = 0;
+ for (int nal = 0; nal < layerInfo.iNalCount; ++nal, ++frag) {
+ // Because the sum of all layer lengths, `required_capacity`, fits in a
+ // `size_t`, we know that any indices in-between will not overflow.
+ RTC_DCHECK_GE(layerInfo.pNalLengthInByte[nal], 4);
+ RTC_DCHECK_EQ(layerInfo.pBsBuf[layer_len + 0], start_code[0]);
+ RTC_DCHECK_EQ(layerInfo.pBsBuf[layer_len + 1], start_code[1]);
+ RTC_DCHECK_EQ(layerInfo.pBsBuf[layer_len + 2], start_code[2]);
+ RTC_DCHECK_EQ(layerInfo.pBsBuf[layer_len + 3], start_code[3]);
+ layer_len += layerInfo.pNalLengthInByte[nal];
+ }
+ // Copy the entire layer's data (including start codes).
+ memcpy(buffer->data() + encoded_image->size(), layerInfo.pBsBuf, layer_len);
+ encoded_image->set_size(encoded_image->size() + layer_len);
+ }
+}
+
+H264EncoderImpl::H264EncoderImpl(const cricket::VideoCodec& codec)
+ : packetization_mode_(H264PacketizationMode::SingleNalUnit),
+ max_payload_size_(0),
+ number_of_cores_(0),
+ encoded_image_callback_(nullptr),
+ has_reported_init_(false),
+ has_reported_error_(false) {
+ RTC_CHECK(absl::EqualsIgnoreCase(codec.name, cricket::kH264CodecName));
+ std::string packetization_mode_string;
+ if (codec.GetParam(cricket::kH264FmtpPacketizationMode,
+ &packetization_mode_string) &&
+ packetization_mode_string == "1") {
+ packetization_mode_ = H264PacketizationMode::NonInterleaved;
+ }
+ downscaled_buffers_.reserve(kMaxSimulcastStreams - 1);
+ encoded_images_.reserve(kMaxSimulcastStreams);
+ encoders_.reserve(kMaxSimulcastStreams);
+ configurations_.reserve(kMaxSimulcastStreams);
+ tl0sync_limit_.reserve(kMaxSimulcastStreams);
+ svc_controllers_.reserve(kMaxSimulcastStreams);
+}
+
+H264EncoderImpl::~H264EncoderImpl() {
+ Release();
+}
+
+int32_t H264EncoderImpl::InitEncode(const VideoCodec* inst,
+ const VideoEncoder::Settings& settings) {
+ ReportInit();
+ if (!inst || inst->codecType != kVideoCodecH264) {
+ ReportError();
+ return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
+ }
+ if (inst->maxFramerate == 0) {
+ ReportError();
+ return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
+ }
+ if (inst->width < 1 || inst->height < 1) {
+ ReportError();
+ return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
+ }
+
+ int32_t release_ret = Release();
+ if (release_ret != WEBRTC_VIDEO_CODEC_OK) {
+ ReportError();
+ return release_ret;
+ }
+
+ int number_of_streams = SimulcastUtility::NumberOfSimulcastStreams(*inst);
+ bool doing_simulcast = (number_of_streams > 1);
+
+ if (doing_simulcast &&
+ !SimulcastUtility::ValidSimulcastParameters(*inst, number_of_streams)) {
+ return WEBRTC_VIDEO_CODEC_ERR_SIMULCAST_PARAMETERS_NOT_SUPPORTED;
+ }
+ downscaled_buffers_.resize(number_of_streams - 1);
+ encoded_images_.resize(number_of_streams);
+ encoders_.resize(number_of_streams);
+ pictures_.resize(number_of_streams);
+ svc_controllers_.resize(number_of_streams);
+ scalability_modes_.resize(number_of_streams);
+ configurations_.resize(number_of_streams);
+ tl0sync_limit_.resize(number_of_streams);
+
+ number_of_cores_ = settings.number_of_cores;
+ max_payload_size_ = settings.max_payload_size;
+ codec_ = *inst;
+
+ // Code expects simulcastStream resolutions to be correct, make sure they are
+ // filled even when there are no simulcast layers.
+ if (codec_.numberOfSimulcastStreams == 0) {
+ codec_.simulcastStream[0].width = codec_.width;
+ codec_.simulcastStream[0].height = codec_.height;
+ }
+
+ for (int i = 0, idx = number_of_streams - 1; i < number_of_streams;
+ ++i, --idx) {
+ ISVCEncoder* openh264_encoder;
+ // Create encoder.
+ if (WelsCreateSVCEncoder(&openh264_encoder) != 0) {
+ // Failed to create encoder.
+ RTC_LOG(LS_ERROR) << "Failed to create OpenH264 encoder";
+ RTC_DCHECK(!openh264_encoder);
+ Release();
+ ReportError();
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+ RTC_DCHECK(openh264_encoder);
+ if (kOpenH264EncoderDetailedLogging) {
+ int trace_level = WELS_LOG_DETAIL;
+ openh264_encoder->SetOption(ENCODER_OPTION_TRACE_LEVEL, &trace_level);
+ }
+ // else WELS_LOG_DEFAULT is used by default.
+
+ // Store h264 encoder.
+ encoders_[i] = openh264_encoder;
+
+ // Set internal settings from codec_settings
+ configurations_[i].simulcast_idx = idx;
+ configurations_[i].sending = false;
+ configurations_[i].width = codec_.simulcastStream[idx].width;
+ configurations_[i].height = codec_.simulcastStream[idx].height;
+ configurations_[i].max_frame_rate = static_cast<float>(codec_.maxFramerate);
+ configurations_[i].frame_dropping_on = codec_.GetFrameDropEnabled();
+ configurations_[i].key_frame_interval = codec_.H264()->keyFrameInterval;
+ configurations_[i].num_temporal_layers =
+ std::max(codec_.H264()->numberOfTemporalLayers,
+ codec_.simulcastStream[idx].numberOfTemporalLayers);
+
+ // Create downscaled image buffers.
+ if (i > 0) {
+ downscaled_buffers_[i - 1] = I420Buffer::Create(
+ configurations_[i].width, configurations_[i].height,
+ configurations_[i].width, configurations_[i].width / 2,
+ configurations_[i].width / 2);
+ }
+
+ // Codec_settings uses kbits/second; encoder uses bits/second.
+ configurations_[i].max_bps = codec_.maxBitrate * 1000;
+ configurations_[i].target_bps = codec_.startBitrate * 1000;
+
+ // Create encoder parameters based on the layer configuration.
+ SEncParamExt encoder_params = CreateEncoderParams(i);
+
+ // Initialize.
+ if (openh264_encoder->InitializeExt(&encoder_params) != 0) {
+ RTC_LOG(LS_ERROR) << "Failed to initialize OpenH264 encoder";
+ Release();
+ ReportError();
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+ // TODO(pbos): Base init params on these values before submitting.
+ int video_format = EVideoFormatType::videoFormatI420;
+ openh264_encoder->SetOption(ENCODER_OPTION_DATAFORMAT, &video_format);
+
+ // Initialize encoded image. Default buffer size: size of unencoded data.
+
+ const size_t new_capacity =
+ CalcBufferSize(VideoType::kI420, codec_.simulcastStream[idx].width,
+ codec_.simulcastStream[idx].height);
+ encoded_images_[i].SetEncodedData(EncodedImageBuffer::Create(new_capacity));
+ encoded_images_[i]._encodedWidth = codec_.simulcastStream[idx].width;
+ encoded_images_[i]._encodedHeight = codec_.simulcastStream[idx].height;
+ encoded_images_[i].set_size(0);
+
+ tl0sync_limit_[i] = configurations_[i].num_temporal_layers;
+ scalability_modes_[i] = ScalabilityModeFromTemporalLayers(
+ configurations_[i].num_temporal_layers);
+ if (scalability_modes_[i].has_value()) {
+ svc_controllers_[i] = CreateScalabilityStructure(*scalability_modes_[i]);
+ if (svc_controllers_[i] == nullptr) {
+ RTC_LOG(LS_ERROR) << "Failed to create scalability structure";
+ Release();
+ ReportError();
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+ }
+ }
+
+ SimulcastRateAllocator init_allocator(codec_);
+ VideoBitrateAllocation allocation =
+ init_allocator.Allocate(VideoBitrateAllocationParameters(
+ DataRate::KilobitsPerSec(codec_.startBitrate), codec_.maxFramerate));
+ SetRates(RateControlParameters(allocation, codec_.maxFramerate));
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+int32_t H264EncoderImpl::Release() {
+ while (!encoders_.empty()) {
+ ISVCEncoder* openh264_encoder = encoders_.back();
+ if (openh264_encoder) {
+ RTC_CHECK_EQ(0, openh264_encoder->Uninitialize());
+ WelsDestroySVCEncoder(openh264_encoder);
+ }
+ encoders_.pop_back();
+ }
+ downscaled_buffers_.clear();
+ configurations_.clear();
+ encoded_images_.clear();
+ pictures_.clear();
+ tl0sync_limit_.clear();
+ svc_controllers_.clear();
+ scalability_modes_.clear();
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+int32_t H264EncoderImpl::RegisterEncodeCompleteCallback(
+ EncodedImageCallback* callback) {
+ encoded_image_callback_ = callback;
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+void H264EncoderImpl::SetRates(const RateControlParameters& parameters) {
+ if (encoders_.empty()) {
+ RTC_LOG(LS_WARNING) << "SetRates() while uninitialized.";
+ return;
+ }
+
+ if (parameters.framerate_fps < 1.0) {
+ RTC_LOG(LS_WARNING) << "Invalid frame rate: " << parameters.framerate_fps;
+ return;
+ }
+
+ if (parameters.bitrate.get_sum_bps() == 0) {
+ // Encoder paused, turn off all encoding.
+ for (size_t i = 0; i < configurations_.size(); ++i) {
+ configurations_[i].SetStreamState(false);
+ }
+ return;
+ }
+
+ codec_.maxFramerate = static_cast<uint32_t>(parameters.framerate_fps);
+
+ size_t stream_idx = encoders_.size() - 1;
+ for (size_t i = 0; i < encoders_.size(); ++i, --stream_idx) {
+ // Update layer config.
+ configurations_[i].target_bps =
+ parameters.bitrate.GetSpatialLayerSum(stream_idx);
+ configurations_[i].max_frame_rate = parameters.framerate_fps;
+
+ if (configurations_[i].target_bps) {
+ configurations_[i].SetStreamState(true);
+
+ // Update h264 encoder.
+ SBitrateInfo target_bitrate;
+ memset(&target_bitrate, 0, sizeof(SBitrateInfo));
+ target_bitrate.iLayer = SPATIAL_LAYER_ALL,
+ target_bitrate.iBitrate = configurations_[i].target_bps;
+ encoders_[i]->SetOption(ENCODER_OPTION_BITRATE, &target_bitrate);
+ encoders_[i]->SetOption(ENCODER_OPTION_FRAME_RATE,
+ &configurations_[i].max_frame_rate);
+ } else {
+ configurations_[i].SetStreamState(false);
+ }
+ }
+}
+
+int32_t H264EncoderImpl::Encode(
+ const VideoFrame& input_frame,
+ const std::vector<VideoFrameType>* frame_types) {
+ if (encoders_.empty()) {
+ ReportError();
+ return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
+ }
+ if (!encoded_image_callback_) {
+ RTC_LOG(LS_WARNING)
+ << "InitEncode() has been called, but a callback function "
+ "has not been set with RegisterEncodeCompleteCallback()";
+ ReportError();
+ return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
+ }
+
+ rtc::scoped_refptr<I420BufferInterface> frame_buffer =
+ input_frame.video_frame_buffer()->ToI420();
+ if (!frame_buffer) {
+ RTC_LOG(LS_ERROR) << "Failed to convert "
+ << VideoFrameBufferTypeToString(
+ input_frame.video_frame_buffer()->type())
+ << " image to I420. Can't encode frame.";
+ return WEBRTC_VIDEO_CODEC_ENCODER_FAILURE;
+ }
+ RTC_CHECK(frame_buffer->type() == VideoFrameBuffer::Type::kI420 ||
+ frame_buffer->type() == VideoFrameBuffer::Type::kI420A);
+
+ bool is_keyframe_needed = false;
+ for (size_t i = 0; i < configurations_.size(); ++i) {
+ if (configurations_[i].key_frame_request && configurations_[i].sending) {
+ // This is legacy behavior, generating a keyframe on all layers
+ // when generating one for a layer that became active for the first time
+ // or after being disabled.
+ is_keyframe_needed = true;
+ break;
+ }
+ }
+
+ RTC_DCHECK_EQ(configurations_[0].width, frame_buffer->width());
+ RTC_DCHECK_EQ(configurations_[0].height, frame_buffer->height());
+
+ // Encode image for each layer.
+ for (size_t i = 0; i < encoders_.size(); ++i) {
+ // EncodeFrame input.
+ pictures_[i] = {0};
+ pictures_[i].iPicWidth = configurations_[i].width;
+ pictures_[i].iPicHeight = configurations_[i].height;
+ pictures_[i].iColorFormat = EVideoFormatType::videoFormatI420;
+ pictures_[i].uiTimeStamp = input_frame.ntp_time_ms();
+ // Downscale images on second and ongoing layers.
+ if (i == 0) {
+ pictures_[i].iStride[0] = frame_buffer->StrideY();
+ pictures_[i].iStride[1] = frame_buffer->StrideU();
+ pictures_[i].iStride[2] = frame_buffer->StrideV();
+ pictures_[i].pData[0] = const_cast<uint8_t*>(frame_buffer->DataY());
+ pictures_[i].pData[1] = const_cast<uint8_t*>(frame_buffer->DataU());
+ pictures_[i].pData[2] = const_cast<uint8_t*>(frame_buffer->DataV());
+ } else {
+ pictures_[i].iStride[0] = downscaled_buffers_[i - 1]->StrideY();
+ pictures_[i].iStride[1] = downscaled_buffers_[i - 1]->StrideU();
+ pictures_[i].iStride[2] = downscaled_buffers_[i - 1]->StrideV();
+ pictures_[i].pData[0] =
+ const_cast<uint8_t*>(downscaled_buffers_[i - 1]->DataY());
+ pictures_[i].pData[1] =
+ const_cast<uint8_t*>(downscaled_buffers_[i - 1]->DataU());
+ pictures_[i].pData[2] =
+ const_cast<uint8_t*>(downscaled_buffers_[i - 1]->DataV());
+ // Scale the image down a number of times by downsampling factor.
+ libyuv::I420Scale(pictures_[i - 1].pData[0], pictures_[i - 1].iStride[0],
+ pictures_[i - 1].pData[1], pictures_[i - 1].iStride[1],
+ pictures_[i - 1].pData[2], pictures_[i - 1].iStride[2],
+ configurations_[i - 1].width,
+ configurations_[i - 1].height, pictures_[i].pData[0],
+ pictures_[i].iStride[0], pictures_[i].pData[1],
+ pictures_[i].iStride[1], pictures_[i].pData[2],
+ pictures_[i].iStride[2], configurations_[i].width,
+ configurations_[i].height, libyuv::kFilterBox);
+ }
+
+ if (!configurations_[i].sending) {
+ continue;
+ }
+ if (frame_types != nullptr && i < frame_types->size()) {
+ // Skip frame?
+ if ((*frame_types)[i] == VideoFrameType::kEmptyFrame) {
+ continue;
+ }
+ }
+ // Send a key frame either when this layer is configured to require one
+ // or we have explicitly been asked to.
+ const size_t simulcast_idx =
+ static_cast<size_t>(configurations_[i].simulcast_idx);
+ bool send_key_frame =
+ is_keyframe_needed ||
+ (frame_types && simulcast_idx < frame_types->size() &&
+ (*frame_types)[simulcast_idx] == VideoFrameType::kVideoFrameKey);
+ if (send_key_frame) {
+ // API doc says ForceIntraFrame(false) does nothing, but calling this
+ // function forces a key frame regardless of the `bIDR` argument's value.
+ // (If every frame is a key frame we get lag/delays.)
+ encoders_[i]->ForceIntraFrame(true);
+ configurations_[i].key_frame_request = false;
+ }
+ // EncodeFrame output.
+ SFrameBSInfo info;
+ memset(&info, 0, sizeof(SFrameBSInfo));
+
+ std::vector<ScalableVideoController::LayerFrameConfig> layer_frames;
+ if (svc_controllers_[i]) {
+ layer_frames = svc_controllers_[i]->NextFrameConfig(send_key_frame);
+ RTC_CHECK_EQ(layer_frames.size(), 1);
+ }
+
+ // Encode!
+ int enc_ret = encoders_[i]->EncodeFrame(&pictures_[i], &info);
+ if (enc_ret != 0) {
+ RTC_LOG(LS_ERROR)
+ << "OpenH264 frame encoding failed, EncodeFrame returned " << enc_ret
+ << ".";
+ ReportError();
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+
+ encoded_images_[i]._encodedWidth = configurations_[i].width;
+ encoded_images_[i]._encodedHeight = configurations_[i].height;
+ encoded_images_[i].SetTimestamp(input_frame.timestamp());
+ encoded_images_[i].SetColorSpace(input_frame.color_space());
+ encoded_images_[i]._frameType = ConvertToVideoFrameType(info.eFrameType);
+ encoded_images_[i].SetSpatialIndex(configurations_[i].simulcast_idx);
+
+ // Split encoded image up into fragments. This also updates
+ // `encoded_image_`.
+ RtpFragmentize(&encoded_images_[i], &info);
+
+ // Encoder can skip frames to save bandwidth in which case
+ // `encoded_images_[i]._length` == 0.
+ if (encoded_images_[i].size() > 0) {
+ // Parse QP.
+ h264_bitstream_parser_.ParseBitstream(encoded_images_[i]);
+ encoded_images_[i].qp_ =
+ h264_bitstream_parser_.GetLastSliceQp().value_or(-1);
+
+ // Deliver encoded image.
+ CodecSpecificInfo codec_specific;
+ codec_specific.codecType = kVideoCodecH264;
+ codec_specific.codecSpecific.H264.packetization_mode =
+ packetization_mode_;
+ codec_specific.codecSpecific.H264.temporal_idx = kNoTemporalIdx;
+ codec_specific.codecSpecific.H264.idr_frame =
+ info.eFrameType == videoFrameTypeIDR;
+ codec_specific.codecSpecific.H264.base_layer_sync = false;
+ if (configurations_[i].num_temporal_layers > 1) {
+ const uint8_t tid = info.sLayerInfo[0].uiTemporalId;
+ codec_specific.codecSpecific.H264.temporal_idx = tid;
+ codec_specific.codecSpecific.H264.base_layer_sync =
+ tid > 0 && tid < tl0sync_limit_[i];
+ if (svc_controllers_[i]) {
+ if (layer_frames[0].TemporalId() != tid) {
+ RTC_LOG(LS_WARNING)
+ << "Encoder produced a frame for layer S" << (i + 1) << "T"
+ << tid + 1 << " that wasn't requested.";
+ continue;
+ }
+ encoded_images_[i].SetTemporalIndex(tid);
+ }
+ if (codec_specific.codecSpecific.H264.base_layer_sync) {
+ tl0sync_limit_[i] = tid;
+ }
+ if (tid == 0) {
+ tl0sync_limit_[i] = configurations_[i].num_temporal_layers;
+ }
+ }
+ if (svc_controllers_[i]) {
+ codec_specific.generic_frame_info =
+ svc_controllers_[i]->OnEncodeDone(layer_frames[0]);
+ if (send_key_frame && codec_specific.generic_frame_info.has_value()) {
+ codec_specific.template_structure =
+ svc_controllers_[i]->DependencyStructure();
+ }
+ codec_specific.scalability_mode = scalability_modes_[i];
+ }
+ encoded_image_callback_->OnEncodedImage(encoded_images_[i],
+ &codec_specific);
+ }
+ }
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+// Initialization parameters.
+// There are two ways to initialize. There is SEncParamBase (cleared with
+// memset(&p, 0, sizeof(SEncParamBase)) used in Initialize, and SEncParamExt
+// which is a superset of SEncParamBase (cleared with GetDefaultParams) used
+// in InitializeExt.
+SEncParamExt H264EncoderImpl::CreateEncoderParams(size_t i) const {
+ SEncParamExt encoder_params;
+ encoders_[i]->GetDefaultParams(&encoder_params);
+ if (codec_.mode == VideoCodecMode::kRealtimeVideo) {
+ encoder_params.iUsageType = CAMERA_VIDEO_REAL_TIME;
+ } else if (codec_.mode == VideoCodecMode::kScreensharing) {
+ encoder_params.iUsageType = SCREEN_CONTENT_REAL_TIME;
+ } else {
+ RTC_DCHECK_NOTREACHED();
+ }
+ encoder_params.iPicWidth = configurations_[i].width;
+ encoder_params.iPicHeight = configurations_[i].height;
+ encoder_params.iTargetBitrate = configurations_[i].target_bps;
+ // Keep unspecified. WebRTC's max codec bitrate is not the same setting
+ // as OpenH264's iMaxBitrate. More details in https://crbug.com/webrtc/11543
+ encoder_params.iMaxBitrate = UNSPECIFIED_BIT_RATE;
+ // Rate Control mode
+ encoder_params.iRCMode = RC_BITRATE_MODE;
+ encoder_params.fMaxFrameRate = configurations_[i].max_frame_rate;
+
+ // The following parameters are extension parameters (they're in SEncParamExt,
+ // not in SEncParamBase).
+ encoder_params.bEnableFrameSkip = configurations_[i].frame_dropping_on;
+ // `uiIntraPeriod` - multiple of GOP size
+ // `keyFrameInterval` - number of frames
+ encoder_params.uiIntraPeriod = configurations_[i].key_frame_interval;
+ // Reuse SPS id if possible. This helps to avoid reset of chromium HW decoder
+ // on each key-frame.
+ // Note that WebRTC resets encoder on resolution change which makes all
+ // EParameterSetStrategy modes except INCREASING_ID (default) essentially
+ // equivalent to CONSTANT_ID.
+ encoder_params.eSpsPpsIdStrategy = SPS_LISTING;
+ encoder_params.uiMaxNalSize = 0;
+ // Threading model: use auto.
+ // 0: auto (dynamic imp. internal encoder)
+ // 1: single thread (default value)
+ // >1: number of threads
+ encoder_params.iMultipleThreadIdc = NumberOfThreads(
+ encoder_params.iPicWidth, encoder_params.iPicHeight, number_of_cores_);
+ // The base spatial layer 0 is the only one we use.
+ encoder_params.sSpatialLayers[0].iVideoWidth = encoder_params.iPicWidth;
+ encoder_params.sSpatialLayers[0].iVideoHeight = encoder_params.iPicHeight;
+ encoder_params.sSpatialLayers[0].fFrameRate = encoder_params.fMaxFrameRate;
+ encoder_params.sSpatialLayers[0].iSpatialBitrate =
+ encoder_params.iTargetBitrate;
+ encoder_params.sSpatialLayers[0].iMaxSpatialBitrate =
+ encoder_params.iMaxBitrate;
+ encoder_params.iTemporalLayerNum = configurations_[i].num_temporal_layers;
+ if (encoder_params.iTemporalLayerNum > 1) {
+ // iNumRefFrame specifies total number of reference buffers to allocate.
+ // For N temporal layers we need at least (N - 1) buffers to store last
+ // encoded frames of all reference temporal layers.
+ // Note that there is no API in OpenH264 encoder to specify exact set of
+ // references to be used to prediction of a given frame. Encoder can
+ // theoretically use all available reference buffers.
+ encoder_params.iNumRefFrame = encoder_params.iTemporalLayerNum - 1;
+ }
+ RTC_LOG(LS_INFO) << "OpenH264 version is " << OPENH264_MAJOR << "."
+ << OPENH264_MINOR;
+ switch (packetization_mode_) {
+ case H264PacketizationMode::SingleNalUnit:
+ // Limit the size of the packets produced.
+ encoder_params.sSpatialLayers[0].sSliceArgument.uiSliceNum = 1;
+ encoder_params.sSpatialLayers[0].sSliceArgument.uiSliceMode =
+ SM_SIZELIMITED_SLICE;
+ encoder_params.sSpatialLayers[0].sSliceArgument.uiSliceSizeConstraint =
+ static_cast<unsigned int>(max_payload_size_);
+ RTC_LOG(LS_INFO) << "Encoder is configured with NALU constraint: "
+ << max_payload_size_ << " bytes";
+ break;
+ case H264PacketizationMode::NonInterleaved:
+ // When uiSliceMode = SM_FIXEDSLCNUM_SLICE, uiSliceNum = 0 means auto
+ // design it with cpu core number.
+ // TODO(sprang): Set to 0 when we understand why the rate controller borks
+ // when uiSliceNum > 1.
+ encoder_params.sSpatialLayers[0].sSliceArgument.uiSliceNum = 1;
+ encoder_params.sSpatialLayers[0].sSliceArgument.uiSliceMode =
+ SM_FIXEDSLCNUM_SLICE;
+ break;
+ }
+ return encoder_params;
+}
+
+void H264EncoderImpl::ReportInit() {
+ if (has_reported_init_)
+ return;
+ RTC_HISTOGRAM_ENUMERATION("WebRTC.Video.H264EncoderImpl.Event",
+ kH264EncoderEventInit, kH264EncoderEventMax);
+ has_reported_init_ = true;
+}
+
+void H264EncoderImpl::ReportError() {
+ if (has_reported_error_)
+ return;
+ RTC_HISTOGRAM_ENUMERATION("WebRTC.Video.H264EncoderImpl.Event",
+ kH264EncoderEventError, kH264EncoderEventMax);
+ has_reported_error_ = true;
+}
+
+VideoEncoder::EncoderInfo H264EncoderImpl::GetEncoderInfo() const {
+ EncoderInfo info;
+ info.supports_native_handle = false;
+ info.implementation_name = "OpenH264";
+ info.scaling_settings =
+ VideoEncoder::ScalingSettings(kLowH264QpThreshold, kHighH264QpThreshold);
+ info.is_hardware_accelerated = false;
+ info.supports_simulcast = true;
+ info.preferred_pixel_formats = {VideoFrameBuffer::Type::kI420};
+ return info;
+}
+
+void H264EncoderImpl::LayerConfig::SetStreamState(bool send_stream) {
+ if (send_stream && !sending) {
+ // Need a key frame if we have not sent this stream before.
+ key_frame_request = true;
+ }
+ sending = send_stream;
+}
+
+} // namespace webrtc
+
+#endif // WEBRTC_USE_H264
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/h264/h264_encoder_impl.h b/third_party/libwebrtc/modules/video_coding/codecs/h264/h264_encoder_impl.h
new file mode 100644
index 0000000000..f02521f0dc
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/h264/h264_encoder_impl.h
@@ -0,0 +1,125 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ *
+ */
+
+#ifndef MODULES_VIDEO_CODING_CODECS_H264_H264_ENCODER_IMPL_H_
+#define MODULES_VIDEO_CODING_CODECS_H264_H264_ENCODER_IMPL_H_
+
+// Everything declared in this header is only required when WebRTC is
+// build with H264 support, please do not move anything out of the
+// #ifdef unless needed and tested.
+#ifdef WEBRTC_USE_H264
+
+#if defined(WEBRTC_WIN) && !defined(__clang__)
+#error "See: bugs.webrtc.org/9213#c13."
+#endif
+
+#include <memory>
+#include <vector>
+
+#include "absl/container/inlined_vector.h"
+#include "api/transport/rtp/dependency_descriptor.h"
+#include "api/video/i420_buffer.h"
+#include "api/video/video_codec_constants.h"
+#include "api/video_codecs/scalability_mode.h"
+#include "api/video_codecs/video_encoder.h"
+#include "common_video/h264/h264_bitstream_parser.h"
+#include "modules/video_coding/codecs/h264/include/h264.h"
+#include "modules/video_coding/svc/scalable_video_controller.h"
+#include "modules/video_coding/utility/quality_scaler.h"
+#include "third_party/openh264/src/codec/api/wels/codec_app_def.h"
+
+class ISVCEncoder;
+
+namespace webrtc {
+
+class H264EncoderImpl : public H264Encoder {
+ public:
+ struct LayerConfig {
+ int simulcast_idx = 0;
+ int width = -1;
+ int height = -1;
+ bool sending = true;
+ bool key_frame_request = false;
+ float max_frame_rate = 0;
+ uint32_t target_bps = 0;
+ uint32_t max_bps = 0;
+ bool frame_dropping_on = false;
+ int key_frame_interval = 0;
+ int num_temporal_layers = 1;
+
+ void SetStreamState(bool send_stream);
+ };
+
+ public:
+ explicit H264EncoderImpl(const cricket::VideoCodec& codec);
+ ~H264EncoderImpl() override;
+
+ // `settings.max_payload_size` is ignored.
+ // The following members of `codec_settings` are used. The rest are ignored.
+ // - codecType (must be kVideoCodecH264)
+ // - targetBitrate
+ // - maxFramerate
+ // - width
+ // - height
+ int32_t InitEncode(const VideoCodec* codec_settings,
+ const VideoEncoder::Settings& settings) override;
+ int32_t Release() override;
+
+ int32_t RegisterEncodeCompleteCallback(
+ EncodedImageCallback* callback) override;
+ void SetRates(const RateControlParameters& parameters) override;
+
+ // The result of encoding - an EncodedImage and CodecSpecificInfo - are
+ // passed to the encode complete callback.
+ int32_t Encode(const VideoFrame& frame,
+ const std::vector<VideoFrameType>* frame_types) override;
+
+ EncoderInfo GetEncoderInfo() const override;
+
+ // Exposed for testing.
+ H264PacketizationMode PacketizationModeForTesting() const {
+ return packetization_mode_;
+ }
+
+ private:
+ SEncParamExt CreateEncoderParams(size_t i) const;
+
+ webrtc::H264BitstreamParser h264_bitstream_parser_;
+ // Reports statistics with histograms.
+ void ReportInit();
+ void ReportError();
+
+ std::vector<ISVCEncoder*> encoders_;
+ std::vector<SSourcePicture> pictures_;
+ std::vector<rtc::scoped_refptr<I420Buffer>> downscaled_buffers_;
+ std::vector<LayerConfig> configurations_;
+ std::vector<EncodedImage> encoded_images_;
+ std::vector<std::unique_ptr<ScalableVideoController>> svc_controllers_;
+ absl::InlinedVector<absl::optional<ScalabilityMode>, kMaxSimulcastStreams>
+ scalability_modes_;
+
+ VideoCodec codec_;
+ H264PacketizationMode packetization_mode_;
+ size_t max_payload_size_;
+ int32_t number_of_cores_;
+ EncodedImageCallback* encoded_image_callback_;
+
+ bool has_reported_init_;
+ bool has_reported_error_;
+
+ std::vector<uint8_t> tl0sync_limit_;
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_USE_H264
+
+#endif // MODULES_VIDEO_CODING_CODECS_H264_H264_ENCODER_IMPL_H_
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/h264/h264_encoder_impl_unittest.cc b/third_party/libwebrtc/modules/video_coding/codecs/h264/h264_encoder_impl_unittest.cc
new file mode 100644
index 0000000000..52d26955ab
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/h264/h264_encoder_impl_unittest.cc
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ *
+ */
+
+#include "modules/video_coding/codecs/h264/h264_encoder_impl.h"
+
+#include "api/video_codecs/video_encoder.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+namespace {
+
+const int kMaxPayloadSize = 1024;
+const int kNumCores = 1;
+
+const VideoEncoder::Capabilities kCapabilities(false);
+const VideoEncoder::Settings kSettings(kCapabilities,
+ kNumCores,
+ kMaxPayloadSize);
+
+void SetDefaultSettings(VideoCodec* codec_settings) {
+ codec_settings->codecType = kVideoCodecH264;
+ codec_settings->maxFramerate = 60;
+ codec_settings->width = 640;
+ codec_settings->height = 480;
+ // If frame dropping is false, we get a warning that bitrate can't
+ // be controlled for RC_QUALITY_MODE; RC_BITRATE_MODE and RC_TIMESTAMP_MODE
+ codec_settings->SetFrameDropEnabled(true);
+ codec_settings->startBitrate = 2000;
+ codec_settings->maxBitrate = 4000;
+}
+
+TEST(H264EncoderImplTest, CanInitializeWithDefaultParameters) {
+ H264EncoderImpl encoder(cricket::VideoCodec("H264"));
+ VideoCodec codec_settings;
+ SetDefaultSettings(&codec_settings);
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder.InitEncode(&codec_settings, kSettings));
+ EXPECT_EQ(H264PacketizationMode::NonInterleaved,
+ encoder.PacketizationModeForTesting());
+}
+
+TEST(H264EncoderImplTest, CanInitializeWithNonInterleavedModeExplicitly) {
+ cricket::VideoCodec codec("H264");
+ codec.SetParam(cricket::kH264FmtpPacketizationMode, "1");
+ H264EncoderImpl encoder(codec);
+ VideoCodec codec_settings;
+ SetDefaultSettings(&codec_settings);
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder.InitEncode(&codec_settings, kSettings));
+ EXPECT_EQ(H264PacketizationMode::NonInterleaved,
+ encoder.PacketizationModeForTesting());
+}
+
+TEST(H264EncoderImplTest, CanInitializeWithSingleNalUnitModeExplicitly) {
+ cricket::VideoCodec codec("H264");
+ codec.SetParam(cricket::kH264FmtpPacketizationMode, "0");
+ H264EncoderImpl encoder(codec);
+ VideoCodec codec_settings;
+ SetDefaultSettings(&codec_settings);
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder.InitEncode(&codec_settings, kSettings));
+ EXPECT_EQ(H264PacketizationMode::SingleNalUnit,
+ encoder.PacketizationModeForTesting());
+}
+
+TEST(H264EncoderImplTest, CanInitializeWithRemovedParameter) {
+ cricket::VideoCodec codec("H264");
+ codec.RemoveParam(cricket::kH264FmtpPacketizationMode);
+ H264EncoderImpl encoder(codec);
+ VideoCodec codec_settings;
+ SetDefaultSettings(&codec_settings);
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder.InitEncode(&codec_settings, kSettings));
+ EXPECT_EQ(H264PacketizationMode::SingleNalUnit,
+ encoder.PacketizationModeForTesting());
+}
+
+} // anonymous namespace
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/h264/h264_simulcast_unittest.cc b/third_party/libwebrtc/modules/video_coding/codecs/h264/h264_simulcast_unittest.cc
new file mode 100644
index 0000000000..2acb629a76
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/h264/h264_simulcast_unittest.cc
@@ -0,0 +1,107 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <memory>
+
+#include "api/test/create_simulcast_test_fixture.h"
+#include "api/test/simulcast_test_fixture.h"
+#include "api/test/video/function_video_decoder_factory.h"
+#include "api/test/video/function_video_encoder_factory.h"
+#include "modules/video_coding/codecs/h264/include/h264.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace test {
+
+namespace {
+std::unique_ptr<SimulcastTestFixture> CreateSpecificSimulcastTestFixture() {
+ std::unique_ptr<VideoEncoderFactory> encoder_factory =
+ std::make_unique<FunctionVideoEncoderFactory>(
+ []() { return H264Encoder::Create(cricket::VideoCodec("H264")); });
+ std::unique_ptr<VideoDecoderFactory> decoder_factory =
+ std::make_unique<FunctionVideoDecoderFactory>(
+ []() { return H264Decoder::Create(); });
+ return CreateSimulcastTestFixture(std::move(encoder_factory),
+ std::move(decoder_factory),
+ SdpVideoFormat("H264"));
+}
+} // namespace
+
+TEST(TestH264Simulcast, TestKeyFrameRequestsOnAllStreams) {
+ GTEST_SKIP() << "Not applicable to H264.";
+}
+
+TEST(TestH264Simulcast, TestKeyFrameRequestsOnSpecificStreams) {
+ auto fixture = CreateSpecificSimulcastTestFixture();
+ fixture->TestKeyFrameRequestsOnSpecificStreams();
+}
+
+TEST(TestH264Simulcast, TestPaddingAllStreams) {
+ auto fixture = CreateSpecificSimulcastTestFixture();
+ fixture->TestPaddingAllStreams();
+}
+
+TEST(TestH264Simulcast, TestPaddingTwoStreams) {
+ auto fixture = CreateSpecificSimulcastTestFixture();
+ fixture->TestPaddingTwoStreams();
+}
+
+TEST(TestH264Simulcast, TestPaddingTwoStreamsOneMaxedOut) {
+ auto fixture = CreateSpecificSimulcastTestFixture();
+ fixture->TestPaddingTwoStreamsOneMaxedOut();
+}
+
+TEST(TestH264Simulcast, TestPaddingOneStream) {
+ auto fixture = CreateSpecificSimulcastTestFixture();
+ fixture->TestPaddingOneStream();
+}
+
+TEST(TestH264Simulcast, TestPaddingOneStreamTwoMaxedOut) {
+ auto fixture = CreateSpecificSimulcastTestFixture();
+ fixture->TestPaddingOneStreamTwoMaxedOut();
+}
+
+TEST(TestH264Simulcast, TestSendAllStreams) {
+ auto fixture = CreateSpecificSimulcastTestFixture();
+ fixture->TestSendAllStreams();
+}
+
+TEST(TestH264Simulcast, TestDisablingStreams) {
+ auto fixture = CreateSpecificSimulcastTestFixture();
+ fixture->TestDisablingStreams();
+}
+
+TEST(TestH264Simulcast, TestActiveStreams) {
+ auto fixture = CreateSpecificSimulcastTestFixture();
+ fixture->TestActiveStreams();
+}
+
+TEST(TestH264Simulcast, TestSwitchingToOneStream) {
+ auto fixture = CreateSpecificSimulcastTestFixture();
+ fixture->TestSwitchingToOneStream();
+}
+
+TEST(TestH264Simulcast, TestSwitchingToOneOddStream) {
+ auto fixture = CreateSpecificSimulcastTestFixture();
+ fixture->TestSwitchingToOneOddStream();
+}
+
+TEST(TestH264Simulcast, TestStrideEncodeDecode) {
+ auto fixture = CreateSpecificSimulcastTestFixture();
+ fixture->TestStrideEncodeDecode();
+}
+
+TEST(TestH264Simulcast, TestSpatioTemporalLayers333PatternEncoder) {
+ auto fixture = CreateSpecificSimulcastTestFixture();
+ fixture->TestSpatioTemporalLayers333PatternEncoder();
+}
+
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/h264/include/h264.h b/third_party/libwebrtc/modules/video_coding/codecs/h264/include/h264.h
new file mode 100644
index 0000000000..2635b53842
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/h264/include/h264.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ *
+ */
+
+#ifndef MODULES_VIDEO_CODING_CODECS_H264_INCLUDE_H264_H_
+#define MODULES_VIDEO_CODING_CODECS_H264_INCLUDE_H264_H_
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "api/video_codecs/h264_profile_level_id.h"
+#include "api/video_codecs/scalability_mode.h"
+#include "media/base/codec.h"
+#include "modules/video_coding/include/video_codec_interface.h"
+#include "rtc_base/system/rtc_export.h"
+
+namespace webrtc {
+
+struct SdpVideoFormat;
+
+// Creates an H264 SdpVideoFormat entry with specified paramters.
+RTC_EXPORT SdpVideoFormat
+CreateH264Format(H264Profile profile,
+ H264Level level,
+ const std::string& packetization_mode,
+ bool add_scalability_modes = false);
+
+// Set to disable the H.264 encoder/decoder implementations that are provided if
+// `rtc_use_h264` build flag is true (if false, this function does nothing).
+// This function should only be called before or during WebRTC initialization
+// and is not thread-safe.
+RTC_EXPORT void DisableRtcUseH264();
+
+// Returns a vector with all supported internal H264 encode profiles that we can
+// negotiate in SDP, in order of preference.
+std::vector<SdpVideoFormat> SupportedH264Codecs(
+ bool add_scalability_modes = false);
+
+// Returns a vector with all supported internal H264 decode profiles that we can
+// negotiate in SDP, in order of preference. This will be available for receive
+// only connections.
+std::vector<SdpVideoFormat> SupportedH264DecoderCodecs();
+
+class RTC_EXPORT H264Encoder : public VideoEncoder {
+ public:
+ static std::unique_ptr<H264Encoder> Create(const cricket::VideoCodec& codec);
+ // If H.264 is supported (any implementation).
+ static bool IsSupported();
+ static bool SupportsScalabilityMode(ScalabilityMode scalability_mode);
+
+ ~H264Encoder() override {}
+};
+
+class RTC_EXPORT H264Decoder : public VideoDecoder {
+ public:
+ static std::unique_ptr<H264Decoder> Create();
+ static bool IsSupported();
+
+ ~H264Decoder() override {}
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_CODECS_H264_INCLUDE_H264_H_
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/h264/include/h264_globals.h b/third_party/libwebrtc/modules/video_coding/codecs/h264/include/h264_globals.h
new file mode 100644
index 0000000000..b61dc8c507
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/h264/include/h264_globals.h
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+// This file contains codec dependent definitions that are needed in
+// order to compile the WebRTC codebase, even if this codec is not used.
+
+#ifndef MODULES_VIDEO_CODING_CODECS_H264_INCLUDE_H264_GLOBALS_H_
+#define MODULES_VIDEO_CODING_CODECS_H264_INCLUDE_H264_GLOBALS_H_
+
+#include <string>
+
+#include "modules/video_coding/codecs/interface/common_constants.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+// The packetization types that we support: single, aggregated, and fragmented.
+enum H264PacketizationTypes {
+ kH264SingleNalu, // This packet contains a single NAL unit.
+ kH264StapA, // This packet contains STAP-A (single time
+ // aggregation) packets. If this packet has an
+ // associated NAL unit type, it'll be for the
+ // first such aggregated packet.
+ kH264FuA, // This packet contains a FU-A (fragmentation
+ // unit) packet, meaning it is a part of a frame
+ // that was too large to fit into a single packet.
+};
+
+// Packetization modes are defined in RFC 6184 section 6
+// Due to the structure containing this being initialized with zeroes
+// in some places, and mode 1 being default, mode 1 needs to have the value
+// zero. https://crbug.com/webrtc/6803
+enum class H264PacketizationMode {
+ NonInterleaved = 0, // Mode 1 - STAP-A, FU-A is allowed
+ SingleNalUnit // Mode 0 - only single NALU allowed
+};
+
+// This function is declared inline because it is not clear which
+// .cc file it should belong to.
+// TODO(hta): Refactor. https://bugs.webrtc.org/6842
+// TODO(jonasolsson): Use absl::string_view instead when that's available.
+inline std::string ToString(H264PacketizationMode mode) {
+ if (mode == H264PacketizationMode::NonInterleaved) {
+ return "NonInterleaved";
+ } else if (mode == H264PacketizationMode::SingleNalUnit) {
+ return "SingleNalUnit";
+ }
+ RTC_DCHECK_NOTREACHED();
+ return "";
+}
+
+struct NaluInfo {
+ uint8_t type;
+ int sps_id;
+ int pps_id;
+};
+
+const size_t kMaxNalusPerPacket = 10;
+
+struct RTPVideoHeaderH264 {
+ // The NAL unit type. If this is a header for a
+ // fragmented packet, it's the NAL unit type of
+ // the original data. If this is the header for an
+ // aggregated packet, it's the NAL unit type of
+ // the first NAL unit in the packet.
+ uint8_t nalu_type;
+ // The packetization type of this buffer - single, aggregated or fragmented.
+ H264PacketizationTypes packetization_type;
+ NaluInfo nalus[kMaxNalusPerPacket];
+ size_t nalus_length;
+ // The packetization mode of this transport. Packetization mode
+ // determines which packetization types are allowed when packetizing.
+ H264PacketizationMode packetization_mode;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_CODECS_H264_INCLUDE_H264_GLOBALS_H_
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/h264/test/h264_impl_unittest.cc b/third_party/libwebrtc/modules/video_coding/codecs/h264/test/h264_impl_unittest.cc
new file mode 100644
index 0000000000..595e627bcc
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/h264/test/h264_impl_unittest.cc
@@ -0,0 +1,99 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <stdint.h>
+
+#include <memory>
+
+#include "absl/types/optional.h"
+#include "api/video/color_space.h"
+#include "api/video/encoded_image.h"
+#include "api/video/video_frame.h"
+#include "api/video_codecs/video_codec.h"
+#include "api/video_codecs/video_decoder.h"
+#include "api/video_codecs/video_encoder.h"
+#include "common_video/libyuv/include/webrtc_libyuv.h"
+#include "media/base/codec.h"
+#include "media/base/media_constants.h"
+#include "modules/video_coding/codecs/h264/include/h264.h"
+#include "modules/video_coding/codecs/test/video_codec_unittest.h"
+#include "modules/video_coding/include/video_codec_interface.h"
+#include "modules/video_coding/include/video_error_codes.h"
+#include "test/gtest.h"
+#include "test/video_codec_settings.h"
+
+namespace webrtc {
+
+class TestH264Impl : public VideoCodecUnitTest {
+ protected:
+ std::unique_ptr<VideoEncoder> CreateEncoder() override {
+ return H264Encoder::Create(cricket::VideoCodec(cricket::kH264CodecName));
+ }
+
+ std::unique_ptr<VideoDecoder> CreateDecoder() override {
+ return H264Decoder::Create();
+ }
+
+ void ModifyCodecSettings(VideoCodec* codec_settings) override {
+ webrtc::test::CodecSettings(kVideoCodecH264, codec_settings);
+ }
+};
+
+#ifdef WEBRTC_USE_H264
+#define MAYBE_EncodeDecode EncodeDecode
+#define MAYBE_DecodedQpEqualsEncodedQp DecodedQpEqualsEncodedQp
+#else
+#define MAYBE_EncodeDecode DISABLED_EncodeDecode
+#define MAYBE_DecodedQpEqualsEncodedQp DISABLED_DecodedQpEqualsEncodedQp
+#endif
+
+TEST_F(TestH264Impl, MAYBE_EncodeDecode) {
+ VideoFrame input_frame = NextInputFrame();
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Encode(input_frame, nullptr));
+ EncodedImage encoded_frame;
+ CodecSpecificInfo codec_specific_info;
+ ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
+ // First frame should be a key frame.
+ encoded_frame._frameType = VideoFrameType::kVideoFrameKey;
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder_->Decode(encoded_frame, false, 0));
+ std::unique_ptr<VideoFrame> decoded_frame;
+ absl::optional<uint8_t> decoded_qp;
+ ASSERT_TRUE(WaitForDecodedFrame(&decoded_frame, &decoded_qp));
+ ASSERT_TRUE(decoded_frame);
+ EXPECT_GT(I420PSNR(&input_frame, decoded_frame.get()), 36);
+
+ const ColorSpace color_space = *decoded_frame->color_space();
+ EXPECT_EQ(ColorSpace::PrimaryID::kUnspecified, color_space.primaries());
+ EXPECT_EQ(ColorSpace::TransferID::kUnspecified, color_space.transfer());
+ EXPECT_EQ(ColorSpace::MatrixID::kUnspecified, color_space.matrix());
+ EXPECT_EQ(ColorSpace::RangeID::kInvalid, color_space.range());
+ EXPECT_EQ(ColorSpace::ChromaSiting::kUnspecified,
+ color_space.chroma_siting_horizontal());
+ EXPECT_EQ(ColorSpace::ChromaSiting::kUnspecified,
+ color_space.chroma_siting_vertical());
+}
+
+TEST_F(TestH264Impl, MAYBE_DecodedQpEqualsEncodedQp) {
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Encode(NextInputFrame(), nullptr));
+ EncodedImage encoded_frame;
+ CodecSpecificInfo codec_specific_info;
+ ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
+ // First frame should be a key frame.
+ encoded_frame._frameType = VideoFrameType::kVideoFrameKey;
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder_->Decode(encoded_frame, false, 0));
+ std::unique_ptr<VideoFrame> decoded_frame;
+ absl::optional<uint8_t> decoded_qp;
+ ASSERT_TRUE(WaitForDecodedFrame(&decoded_frame, &decoded_qp));
+ ASSERT_TRUE(decoded_frame);
+ ASSERT_TRUE(decoded_qp);
+ EXPECT_EQ(encoded_frame.qp_, *decoded_qp);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/interface/common_constants.h b/third_party/libwebrtc/modules/video_coding/codecs/interface/common_constants.h
new file mode 100644
index 0000000000..a8fc6290b9
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/interface/common_constants.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+// This file contains constants that are used by multiple global
+// codec definitions (modules/video_coding/codecs/*/include/*_globals.h)
+
+#ifndef MODULES_VIDEO_CODING_CODECS_INTERFACE_COMMON_CONSTANTS_H_
+#define MODULES_VIDEO_CODING_CODECS_INTERFACE_COMMON_CONSTANTS_H_
+
+#include <stdint.h>
+
+namespace webrtc {
+
+const int16_t kNoPictureId = -1;
+const int16_t kNoTl0PicIdx = -1;
+const uint8_t kNoTemporalIdx = 0xFF;
+const int kNoKeyIdx = -1;
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_CODECS_INTERFACE_COMMON_CONSTANTS_H_
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/interface/libvpx_interface.cc b/third_party/libwebrtc/modules/video_coding/codecs/interface/libvpx_interface.cc
new file mode 100644
index 0000000000..4f33bef2ba
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/interface/libvpx_interface.cc
@@ -0,0 +1,373 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/codecs/interface/libvpx_interface.h"
+
+#include <memory>
+
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+namespace {
+class LibvpxFacade : public LibvpxInterface {
+ public:
+ LibvpxFacade() = default;
+ ~LibvpxFacade() override = default;
+
+ vpx_image_t* img_alloc(vpx_image_t* img,
+ vpx_img_fmt_t fmt,
+ unsigned int d_w,
+ unsigned int d_h,
+ unsigned int align) const override {
+ return ::vpx_img_alloc(img, fmt, d_w, d_h, align);
+ }
+
+ vpx_image_t* img_wrap(vpx_image_t* img,
+ vpx_img_fmt_t fmt,
+ unsigned int d_w,
+ unsigned int d_h,
+ unsigned int stride_align,
+ unsigned char* img_data) const override {
+ return ::vpx_img_wrap(img, fmt, d_w, d_h, stride_align, img_data);
+ }
+
+ void img_free(vpx_image_t* img) const override { ::vpx_img_free(img); }
+
+ vpx_codec_err_t codec_enc_config_set(
+ vpx_codec_ctx_t* ctx,
+ const vpx_codec_enc_cfg_t* cfg) const override {
+ return ::vpx_codec_enc_config_set(ctx, cfg);
+ }
+
+ vpx_codec_err_t codec_enc_config_default(vpx_codec_iface_t* iface,
+ vpx_codec_enc_cfg_t* cfg,
+ unsigned int usage) const override {
+ return ::vpx_codec_enc_config_default(iface, cfg, usage);
+ }
+
+ vpx_codec_err_t codec_enc_init(vpx_codec_ctx_t* ctx,
+ vpx_codec_iface_t* iface,
+ const vpx_codec_enc_cfg_t* cfg,
+ vpx_codec_flags_t flags) const override {
+ return ::vpx_codec_enc_init(ctx, iface, cfg, flags);
+ }
+
+ vpx_codec_err_t codec_enc_init_multi(vpx_codec_ctx_t* ctx,
+ vpx_codec_iface_t* iface,
+ vpx_codec_enc_cfg_t* cfg,
+ int num_enc,
+ vpx_codec_flags_t flags,
+ vpx_rational_t* dsf) const override {
+ return ::vpx_codec_enc_init_multi(ctx, iface, cfg, num_enc, flags, dsf);
+ }
+
+ vpx_codec_err_t codec_destroy(vpx_codec_ctx_t* ctx) const override {
+ return ::vpx_codec_destroy(ctx);
+ }
+
+ // For types related to these parameters, see section
+ // "VP8 encoder control function parameter type" in vpx/vp8cx.h.
+
+ vpx_codec_err_t codec_control(vpx_codec_ctx_t* ctx,
+ vp8e_enc_control_id ctrl_id,
+ uint32_t param) const override {
+ // We need an explicit call for each type since vpx_codec_control is a
+ // macro that gets expanded into another call based on the parameter name.
+ switch (ctrl_id) {
+ case VP8E_SET_ENABLEAUTOALTREF:
+ return vpx_codec_control(ctx, VP8E_SET_ENABLEAUTOALTREF, param);
+ case VP8E_SET_NOISE_SENSITIVITY:
+ return vpx_codec_control(ctx, VP8E_SET_NOISE_SENSITIVITY, param);
+ case VP8E_SET_SHARPNESS:
+ return vpx_codec_control(ctx, VP8E_SET_SHARPNESS, param);
+ case VP8E_SET_STATIC_THRESHOLD:
+ return vpx_codec_control(ctx, VP8E_SET_STATIC_THRESHOLD, param);
+ case VP8E_SET_ARNR_MAXFRAMES:
+ return vpx_codec_control(ctx, VP8E_SET_ARNR_MAXFRAMES, param);
+ case VP8E_SET_ARNR_STRENGTH:
+ return vpx_codec_control(ctx, VP8E_SET_ARNR_STRENGTH, param);
+ case VP8E_SET_CQ_LEVEL:
+ return vpx_codec_control(ctx, VP8E_SET_CQ_LEVEL, param);
+ case VP8E_SET_MAX_INTRA_BITRATE_PCT:
+ return vpx_codec_control(ctx, VP8E_SET_MAX_INTRA_BITRATE_PCT, param);
+ case VP9E_SET_MAX_INTER_BITRATE_PCT:
+ return vpx_codec_control(ctx, VP9E_SET_MAX_INTER_BITRATE_PCT, param);
+ case VP8E_SET_GF_CBR_BOOST_PCT:
+ return vpx_codec_control(ctx, VP8E_SET_GF_CBR_BOOST_PCT, param);
+ case VP8E_SET_SCREEN_CONTENT_MODE:
+ return vpx_codec_control(ctx, VP8E_SET_SCREEN_CONTENT_MODE, param);
+ case VP9E_SET_GF_CBR_BOOST_PCT:
+ return vpx_codec_control(ctx, VP9E_SET_GF_CBR_BOOST_PCT, param);
+ case VP9E_SET_LOSSLESS:
+ return vpx_codec_control(ctx, VP9E_SET_LOSSLESS, param);
+ case VP9E_SET_FRAME_PARALLEL_DECODING:
+ return vpx_codec_control(ctx, VP9E_SET_FRAME_PARALLEL_DECODING, param);
+ case VP9E_SET_AQ_MODE:
+ return vpx_codec_control(ctx, VP9E_SET_AQ_MODE, param);
+ case VP9E_SET_FRAME_PERIODIC_BOOST:
+ return vpx_codec_control(ctx, VP9E_SET_FRAME_PERIODIC_BOOST, param);
+ case VP9E_SET_NOISE_SENSITIVITY:
+ return vpx_codec_control(ctx, VP9E_SET_NOISE_SENSITIVITY, param);
+ case VP9E_SET_MIN_GF_INTERVAL:
+ return vpx_codec_control(ctx, VP9E_SET_MIN_GF_INTERVAL, param);
+ case VP9E_SET_MAX_GF_INTERVAL:
+ return vpx_codec_control(ctx, VP9E_SET_MAX_GF_INTERVAL, param);
+ case VP9E_SET_TARGET_LEVEL:
+ return vpx_codec_control(ctx, VP9E_SET_TARGET_LEVEL, param);
+ case VP9E_SET_ROW_MT:
+ return vpx_codec_control(ctx, VP9E_SET_ROW_MT, param);
+ case VP9E_ENABLE_MOTION_VECTOR_UNIT_TEST:
+ return vpx_codec_control(ctx, VP9E_ENABLE_MOTION_VECTOR_UNIT_TEST,
+ param);
+ case VP9E_SET_SVC_INTER_LAYER_PRED:
+ return vpx_codec_control(ctx, VP9E_SET_SVC_INTER_LAYER_PRED, param);
+ case VP9E_SET_SVC_GF_TEMPORAL_REF:
+ return vpx_codec_control(ctx, VP9E_SET_SVC_GF_TEMPORAL_REF, param);
+ case VP9E_SET_POSTENCODE_DROP:
+ return vpx_codec_control(ctx, VP9E_SET_POSTENCODE_DROP, param);
+ default:
+ RTC_DCHECK_NOTREACHED() << "Unsupported libvpx ctrl_id: " << ctrl_id;
+ }
+ return VPX_CODEC_ERROR;
+ }
+
+ vpx_codec_err_t codec_control(vpx_codec_ctx_t* ctx,
+ vp8e_enc_control_id ctrl_id,
+ int param) const override {
+ switch (ctrl_id) {
+ case VP8E_SET_FRAME_FLAGS:
+ return vpx_codec_control(ctx, VP8E_SET_FRAME_FLAGS, param);
+ case VP8E_SET_TEMPORAL_LAYER_ID:
+ return vpx_codec_control(ctx, VP8E_SET_TEMPORAL_LAYER_ID, param);
+ case VP9E_SET_SVC:
+ return vpx_codec_control(ctx, VP9E_SET_SVC, param);
+ case VP8E_SET_CPUUSED:
+ return vpx_codec_control(ctx, VP8E_SET_CPUUSED, param);
+ case VP8E_SET_TOKEN_PARTITIONS:
+ return vpx_codec_control(ctx, VP8E_SET_TOKEN_PARTITIONS, param);
+ case VP8E_SET_TUNING:
+ return vpx_codec_control(ctx, VP8E_SET_TUNING, param);
+ case VP9E_SET_TILE_COLUMNS:
+ return vpx_codec_control(ctx, VP9E_SET_TILE_COLUMNS, param);
+ case VP9E_SET_TILE_ROWS:
+ return vpx_codec_control(ctx, VP9E_SET_TILE_ROWS, param);
+ case VP9E_SET_TPL:
+ return vpx_codec_control(ctx, VP9E_SET_TPL, param);
+ case VP9E_SET_ALT_REF_AQ:
+ return vpx_codec_control(ctx, VP9E_SET_ALT_REF_AQ, param);
+ case VP9E_SET_TUNE_CONTENT:
+ return vpx_codec_control(ctx, VP9E_SET_TUNE_CONTENT, param);
+ case VP9E_SET_COLOR_SPACE:
+ return vpx_codec_control(ctx, VP9E_SET_COLOR_SPACE, param);
+ case VP9E_SET_COLOR_RANGE:
+ return vpx_codec_control(ctx, VP9E_SET_COLOR_RANGE, param);
+ case VP9E_SET_DELTA_Q_UV:
+ return vpx_codec_control(ctx, VP9E_SET_DELTA_Q_UV, param);
+ case VP9E_SET_DISABLE_OVERSHOOT_MAXQ_CBR:
+ return vpx_codec_control(ctx, VP9E_SET_DISABLE_OVERSHOOT_MAXQ_CBR,
+ param);
+ case VP9E_SET_DISABLE_LOOPFILTER:
+ return vpx_codec_control(ctx, VP9E_SET_DISABLE_LOOPFILTER, param);
+
+ default:
+ if (param >= 0) {
+ // Might be intended for uint32_t but int literal used, try fallback.
+ return codec_control(ctx, ctrl_id, static_cast<uint32_t>(param));
+ }
+ RTC_DCHECK_NOTREACHED() << "Unsupported libvpx ctrl_id: " << ctrl_id;
+ }
+ return VPX_CODEC_ERROR;
+ }
+
+ vpx_codec_err_t codec_control(vpx_codec_ctx_t* ctx,
+ vp8e_enc_control_id ctrl_id,
+ int* param) const override {
+ switch (ctrl_id) {
+ case VP8E_GET_LAST_QUANTIZER:
+ return vpx_codec_control(ctx, VP8E_GET_LAST_QUANTIZER, param);
+ case VP8E_GET_LAST_QUANTIZER_64:
+ return vpx_codec_control(ctx, VP8E_GET_LAST_QUANTIZER_64, param);
+ case VP9E_SET_RENDER_SIZE:
+ return vpx_codec_control(ctx, VP9E_SET_RENDER_SIZE, param);
+ case VP9E_GET_LEVEL:
+ return vpx_codec_control(ctx, VP9E_GET_LEVEL, param);
+ default:
+ RTC_DCHECK_NOTREACHED() << "Unsupported libvpx ctrl_id: " << ctrl_id;
+ }
+ return VPX_CODEC_ERROR;
+ }
+
+ vpx_codec_err_t codec_control(vpx_codec_ctx_t* ctx,
+ vp8e_enc_control_id ctrl_id,
+ vpx_roi_map* param) const override {
+ switch (ctrl_id) {
+ case VP8E_SET_ROI_MAP:
+ return vpx_codec_control(ctx, VP8E_SET_ROI_MAP, param);
+ case VP9E_SET_ROI_MAP:
+ return vpx_codec_control(ctx, VP9E_SET_ROI_MAP, param);
+ default:
+ RTC_DCHECK_NOTREACHED() << "Unsupported libvpx ctrl_id: " << ctrl_id;
+ }
+ return VPX_CODEC_ERROR;
+ }
+
+ vpx_codec_err_t codec_control(vpx_codec_ctx_t* ctx,
+ vp8e_enc_control_id ctrl_id,
+ vpx_active_map* param) const override {
+ switch (ctrl_id) {
+ case VP8E_SET_ACTIVEMAP:
+ return vpx_codec_control(ctx, VP8E_SET_ACTIVEMAP, param);
+ case VP9E_GET_ACTIVEMAP:
+ return vpx_codec_control(ctx, VP8E_SET_ACTIVEMAP, param);
+ default:
+ RTC_DCHECK_NOTREACHED() << "Unsupported libvpx ctrl_id: " << ctrl_id;
+ }
+ return VPX_CODEC_ERROR;
+ }
+
+ vpx_codec_err_t codec_control(vpx_codec_ctx_t* ctx,
+ vp8e_enc_control_id ctrl_id,
+ vpx_scaling_mode* param) const override {
+ switch (ctrl_id) {
+ case VP8E_SET_SCALEMODE:
+ return vpx_codec_control(ctx, VP8E_SET_SCALEMODE, param);
+ default:
+ RTC_DCHECK_NOTREACHED() << "Unsupported libvpx ctrl_id: " << ctrl_id;
+ }
+ return VPX_CODEC_ERROR;
+ }
+
+ vpx_codec_err_t codec_control(vpx_codec_ctx_t* ctx,
+ vp8e_enc_control_id ctrl_id,
+ vpx_svc_extra_cfg_t* param) const override {
+ switch (ctrl_id) {
+ case VP9E_SET_SVC_PARAMETERS:
+ return vpx_codec_control_(ctx, VP9E_SET_SVC_PARAMETERS, param);
+ default:
+ RTC_DCHECK_NOTREACHED() << "Unsupported libvpx ctrl_id: " << ctrl_id;
+ }
+ return VPX_CODEC_ERROR;
+ }
+
+ vpx_codec_err_t codec_control(vpx_codec_ctx_t* ctx,
+ vp8e_enc_control_id ctrl_id,
+ vpx_svc_frame_drop_t* param) const override {
+ switch (ctrl_id) {
+ case VP9E_SET_SVC_FRAME_DROP_LAYER:
+ return vpx_codec_control_(ctx, VP9E_SET_SVC_FRAME_DROP_LAYER, param);
+ default:
+ RTC_DCHECK_NOTREACHED() << "Unsupported libvpx ctrl_id: " << ctrl_id;
+ }
+ return VPX_CODEC_ERROR;
+ }
+
+ vpx_codec_err_t codec_control(vpx_codec_ctx_t* ctx,
+ vp8e_enc_control_id ctrl_id,
+ void* param) const override {
+ switch (ctrl_id) {
+ case VP9E_SET_SVC_PARAMETERS:
+ return vpx_codec_control_(ctx, VP9E_SET_SVC_PARAMETERS, param);
+ case VP9E_REGISTER_CX_CALLBACK:
+ return vpx_codec_control_(ctx, VP9E_REGISTER_CX_CALLBACK, param);
+ default:
+ RTC_DCHECK_NOTREACHED() << "Unsupported libvpx ctrl_id: " << ctrl_id;
+ }
+ return VPX_CODEC_ERROR;
+ }
+
+ vpx_codec_err_t codec_control(vpx_codec_ctx_t* ctx,
+ vp8e_enc_control_id ctrl_id,
+ vpx_svc_layer_id_t* param) const override {
+ switch (ctrl_id) {
+ case VP9E_SET_SVC_LAYER_ID:
+ return vpx_codec_control_(ctx, VP9E_SET_SVC_LAYER_ID, param);
+ case VP9E_GET_SVC_LAYER_ID:
+ return vpx_codec_control_(ctx, VP9E_GET_SVC_LAYER_ID, param);
+ default:
+ RTC_DCHECK_NOTREACHED() << "Unsupported libvpx ctrl_id: " << ctrl_id;
+ }
+ return VPX_CODEC_ERROR;
+ }
+
+ vpx_codec_err_t codec_control(
+ vpx_codec_ctx_t* ctx,
+ vp8e_enc_control_id ctrl_id,
+ vpx_svc_ref_frame_config_t* param) const override {
+ switch (ctrl_id) {
+ case VP9E_SET_SVC_REF_FRAME_CONFIG:
+ return vpx_codec_control_(ctx, VP9E_SET_SVC_REF_FRAME_CONFIG, param);
+ case VP9E_GET_SVC_REF_FRAME_CONFIG:
+ return vpx_codec_control_(ctx, VP9E_GET_SVC_REF_FRAME_CONFIG, param);
+ default:
+ RTC_DCHECK_NOTREACHED() << "Unsupported libvpx ctrl_id: " << ctrl_id;
+ }
+ return VPX_CODEC_ERROR;
+ }
+
+ vpx_codec_err_t codec_control(
+ vpx_codec_ctx_t* ctx,
+ vp8e_enc_control_id ctrl_id,
+ vpx_svc_spatial_layer_sync_t* param) const override {
+ switch (ctrl_id) {
+ case VP9E_SET_SVC_SPATIAL_LAYER_SYNC:
+ return vpx_codec_control_(ctx, VP9E_SET_SVC_SPATIAL_LAYER_SYNC, param);
+ default:
+ RTC_DCHECK_NOTREACHED() << "Unsupported libvpx ctrl_id: " << ctrl_id;
+ }
+ return VPX_CODEC_ERROR;
+ }
+
+ vpx_codec_err_t codec_control(vpx_codec_ctx_t* ctx,
+ vp8e_enc_control_id ctrl_id,
+ vpx_rc_funcs_t* param) const override {
+ switch (ctrl_id) {
+ case VP9E_SET_EXTERNAL_RATE_CONTROL:
+ return vpx_codec_control_(ctx, VP9E_SET_EXTERNAL_RATE_CONTROL, param);
+ default:
+ RTC_DCHECK_NOTREACHED() << "Unsupported libvpx ctrl_id: " << ctrl_id;
+ }
+ return VPX_CODEC_ERROR;
+ }
+
+ vpx_codec_err_t codec_encode(vpx_codec_ctx_t* ctx,
+ const vpx_image_t* img,
+ vpx_codec_pts_t pts,
+ uint64_t duration,
+ vpx_enc_frame_flags_t flags,
+ uint64_t deadline) const override {
+ return ::vpx_codec_encode(ctx, img, pts, duration, flags, deadline);
+ }
+
+ const vpx_codec_cx_pkt_t* codec_get_cx_data(
+ vpx_codec_ctx_t* ctx,
+ vpx_codec_iter_t* iter) const override {
+ return ::vpx_codec_get_cx_data(ctx, iter);
+ }
+
+ const char* codec_error_detail(vpx_codec_ctx_t* ctx) const override {
+ return ::vpx_codec_error_detail(ctx);
+ }
+
+ const char* codec_error(vpx_codec_ctx_t* ctx) const override {
+ return ::vpx_codec_error(ctx);
+ }
+
+ const char* codec_err_to_string(vpx_codec_err_t err) const override {
+ return ::vpx_codec_err_to_string(err);
+ }
+};
+
+} // namespace
+
+std::unique_ptr<LibvpxInterface> LibvpxInterface::Create() {
+ return std::make_unique<LibvpxFacade>();
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/interface/libvpx_interface.h b/third_party/libwebrtc/modules/video_coding/codecs/interface/libvpx_interface.h
new file mode 100644
index 0000000000..3dea24dd6d
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/interface/libvpx_interface.h
@@ -0,0 +1,128 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_CODECS_INTERFACE_LIBVPX_INTERFACE_H_
+#define MODULES_VIDEO_CODING_CODECS_INTERFACE_LIBVPX_INTERFACE_H_
+
+#include <stdint.h>
+
+#include <memory>
+
+#include "vpx/vp8cx.h"
+#include "vpx/vpx_codec.h"
+#include "vpx/vpx_encoder.h"
+#include "vpx/vpx_image.h"
+
+namespace webrtc {
+
+// This interface is a proxy to the static libvpx functions, so that they
+// can be mocked for testing. Currently supports VP8 encoder functions.
+// TODO(sprang): Extend this to VP8 decoder and VP9 encoder/decoder too.
+class LibvpxInterface {
+ public:
+ LibvpxInterface() = default;
+ virtual ~LibvpxInterface() = default;
+
+ virtual vpx_image_t* img_alloc(vpx_image_t* img,
+ vpx_img_fmt_t fmt,
+ unsigned int d_w,
+ unsigned int d_h,
+ unsigned int align) const = 0;
+ virtual vpx_image_t* img_wrap(vpx_image_t* img,
+ vpx_img_fmt_t fmt,
+ unsigned int d_w,
+ unsigned int d_h,
+ unsigned int stride_align,
+ unsigned char* img_data) const = 0;
+ virtual void img_free(vpx_image_t* img) const = 0;
+
+ virtual vpx_codec_err_t codec_enc_config_set(
+ vpx_codec_ctx_t* ctx,
+ const vpx_codec_enc_cfg_t* cfg) const = 0;
+ virtual vpx_codec_err_t codec_enc_config_default(
+ vpx_codec_iface_t* iface,
+ vpx_codec_enc_cfg_t* cfg,
+ unsigned int usage) const = 0;
+
+ virtual vpx_codec_err_t codec_enc_init(vpx_codec_ctx_t* ctx,
+ vpx_codec_iface_t* iface,
+ const vpx_codec_enc_cfg_t* cfg,
+ vpx_codec_flags_t flags) const = 0;
+ virtual vpx_codec_err_t codec_enc_init_multi(vpx_codec_ctx_t* ctx,
+ vpx_codec_iface_t* iface,
+ vpx_codec_enc_cfg_t* cfg,
+ int num_enc,
+ vpx_codec_flags_t flags,
+ vpx_rational_t* dsf) const = 0;
+ virtual vpx_codec_err_t codec_destroy(vpx_codec_ctx_t* ctx) const = 0;
+
+ virtual vpx_codec_err_t codec_control(vpx_codec_ctx_t* ctx,
+ vp8e_enc_control_id ctrl_id,
+ uint32_t param) const = 0;
+ virtual vpx_codec_err_t codec_control(vpx_codec_ctx_t* ctx,
+ vp8e_enc_control_id ctrl_id,
+ int param) const = 0;
+ virtual vpx_codec_err_t codec_control(vpx_codec_ctx_t* ctx,
+ vp8e_enc_control_id ctrl_id,
+ int* param) const = 0;
+ virtual vpx_codec_err_t codec_control(vpx_codec_ctx_t* ctx,
+ vp8e_enc_control_id ctrl_id,
+ vpx_roi_map* param) const = 0;
+ virtual vpx_codec_err_t codec_control(vpx_codec_ctx_t* ctx,
+ vp8e_enc_control_id ctrl_id,
+ vpx_active_map* param) const = 0;
+ virtual vpx_codec_err_t codec_control(vpx_codec_ctx_t* ctx,
+ vp8e_enc_control_id ctrl_id,
+ vpx_scaling_mode* param) const = 0;
+ virtual vpx_codec_err_t codec_control(vpx_codec_ctx_t* ctx,
+ vp8e_enc_control_id ctrl_id,
+ vpx_svc_extra_cfg_t* param) const = 0;
+ virtual vpx_codec_err_t codec_control(vpx_codec_ctx_t* ctx,
+ vp8e_enc_control_id ctrl_id,
+ vpx_svc_frame_drop_t* param) const = 0;
+ virtual vpx_codec_err_t codec_control(vpx_codec_ctx_t* ctx,
+ vp8e_enc_control_id ctrl_id,
+ void* param) const = 0;
+ virtual vpx_codec_err_t codec_control(vpx_codec_ctx_t* ctx,
+ vp8e_enc_control_id ctrl_id,
+ vpx_svc_layer_id_t* param) const = 0;
+ virtual vpx_codec_err_t codec_control(
+ vpx_codec_ctx_t* ctx,
+ vp8e_enc_control_id ctrl_id,
+ vpx_svc_ref_frame_config_t* param) const = 0;
+ virtual vpx_codec_err_t codec_control(
+ vpx_codec_ctx_t* ctx,
+ vp8e_enc_control_id ctrl_id,
+ vpx_svc_spatial_layer_sync_t* param) const = 0;
+ virtual vpx_codec_err_t codec_control(vpx_codec_ctx_t* ctx,
+ vp8e_enc_control_id ctrl_id,
+ vpx_rc_funcs_t* param) const = 0;
+ virtual vpx_codec_err_t codec_encode(vpx_codec_ctx_t* ctx,
+ const vpx_image_t* img,
+ vpx_codec_pts_t pts,
+ uint64_t duration,
+ vpx_enc_frame_flags_t flags,
+ uint64_t deadline) const = 0;
+
+ virtual const vpx_codec_cx_pkt_t* codec_get_cx_data(
+ vpx_codec_ctx_t* ctx,
+ vpx_codec_iter_t* iter) const = 0;
+
+ virtual const char* codec_error_detail(vpx_codec_ctx_t* ctx) const = 0;
+ virtual const char* codec_error(vpx_codec_ctx_t* ctx) const = 0;
+ virtual const char* codec_err_to_string(vpx_codec_err_t err) const = 0;
+
+ // Returns interface wrapping the actual libvpx functions.
+ static std::unique_ptr<LibvpxInterface> Create();
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_CODECS_INTERFACE_LIBVPX_INTERFACE_H_
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/interface/mock_libvpx_interface.h b/third_party/libwebrtc/modules/video_coding/codecs/interface/mock_libvpx_interface.h
new file mode 100644
index 0000000000..6dfe733dd0
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/interface/mock_libvpx_interface.h
@@ -0,0 +1,147 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_CODECS_INTERFACE_MOCK_LIBVPX_INTERFACE_H_
+#define MODULES_VIDEO_CODING_CODECS_INTERFACE_MOCK_LIBVPX_INTERFACE_H_
+
+#include "modules/video_coding/codecs/interface/libvpx_interface.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+class MockLibvpxInterface : public LibvpxInterface {
+ public:
+ MOCK_METHOD(
+ vpx_image_t*,
+ img_alloc,
+ (vpx_image_t*, vpx_img_fmt_t, unsigned int, unsigned int, unsigned int),
+ (const, override));
+ MOCK_METHOD(vpx_image_t*,
+ img_wrap,
+ (vpx_image_t*,
+ vpx_img_fmt_t,
+ unsigned int,
+ unsigned int,
+ unsigned int,
+ unsigned char*),
+ (const, override));
+ MOCK_METHOD(void, img_free, (vpx_image_t * img), (const, override));
+ MOCK_METHOD(vpx_codec_err_t,
+ codec_enc_config_set,
+ (vpx_codec_ctx_t*, const vpx_codec_enc_cfg_t*),
+ (const, override));
+ MOCK_METHOD(vpx_codec_err_t,
+ codec_enc_config_default,
+ (vpx_codec_iface_t*, vpx_codec_enc_cfg_t*, unsigned int),
+ (const, override));
+ MOCK_METHOD(vpx_codec_err_t,
+ codec_enc_init,
+ (vpx_codec_ctx_t*,
+ vpx_codec_iface_t*,
+ const vpx_codec_enc_cfg_t*,
+ vpx_codec_flags_t),
+ (const, override));
+ MOCK_METHOD(vpx_codec_err_t,
+ codec_enc_init_multi,
+ (vpx_codec_ctx_t*,
+ vpx_codec_iface_t*,
+ vpx_codec_enc_cfg_t*,
+ int,
+ vpx_codec_flags_t,
+ vpx_rational_t*),
+ (const, override));
+ MOCK_METHOD(vpx_codec_err_t,
+ codec_destroy,
+ (vpx_codec_ctx_t*),
+ (const, override));
+ MOCK_METHOD(vpx_codec_err_t,
+ codec_control,
+ (vpx_codec_ctx_t*, vp8e_enc_control_id, uint32_t),
+ (const, override));
+ MOCK_METHOD(vpx_codec_err_t,
+ codec_control,
+ (vpx_codec_ctx_t*, vp8e_enc_control_id, int),
+ (const, override));
+ MOCK_METHOD(vpx_codec_err_t,
+ codec_control,
+ (vpx_codec_ctx_t*, vp8e_enc_control_id, int*),
+ (const, override));
+ MOCK_METHOD(vpx_codec_err_t,
+ codec_control,
+ (vpx_codec_ctx_t*, vp8e_enc_control_id, vpx_roi_map*),
+ (const, override));
+ MOCK_METHOD(vpx_codec_err_t,
+ codec_control,
+ (vpx_codec_ctx_t*, vp8e_enc_control_id, vpx_active_map*),
+ (const, override));
+ MOCK_METHOD(vpx_codec_err_t,
+ codec_control,
+ (vpx_codec_ctx_t*, vp8e_enc_control_id, vpx_scaling_mode*),
+ (const, override));
+ MOCK_METHOD(vpx_codec_err_t,
+ codec_control,
+ (vpx_codec_ctx_t*, vp8e_enc_control_id, vpx_svc_extra_cfg_t*),
+ (const, override));
+ MOCK_METHOD(vpx_codec_err_t,
+ codec_control,
+ (vpx_codec_ctx_t*, vp8e_enc_control_id, vpx_svc_frame_drop_t*),
+ (const, override));
+ MOCK_METHOD(vpx_codec_err_t,
+ codec_control,
+ (vpx_codec_ctx_t*, vp8e_enc_control_id, void*),
+ (const, override));
+ MOCK_METHOD(vpx_codec_err_t,
+ codec_control,
+ (vpx_codec_ctx_t*, vp8e_enc_control_id, vpx_svc_layer_id_t*),
+ (const, override));
+ MOCK_METHOD(vpx_codec_err_t,
+ codec_control,
+ (vpx_codec_ctx_t*,
+ vp8e_enc_control_id,
+ vpx_svc_ref_frame_config_t*),
+ (const, override));
+ MOCK_METHOD(vpx_codec_err_t,
+ codec_control,
+ (vpx_codec_ctx_t*,
+ vp8e_enc_control_id,
+ vpx_svc_spatial_layer_sync_t*),
+ (const, override));
+ MOCK_METHOD(vpx_codec_err_t,
+ codec_control,
+ (vpx_codec_ctx_t*, vp8e_enc_control_id, vpx_rc_funcs_t*),
+ (const, override));
+ MOCK_METHOD(vpx_codec_err_t,
+ codec_encode,
+ (vpx_codec_ctx_t*,
+ const vpx_image_t*,
+ vpx_codec_pts_t,
+ uint64_t,
+ vpx_enc_frame_flags_t,
+ uint64_t),
+ (const, override));
+ MOCK_METHOD(const vpx_codec_cx_pkt_t*,
+ codec_get_cx_data,
+ (vpx_codec_ctx_t*, vpx_codec_iter_t*),
+ (const, override));
+ MOCK_METHOD(const char*,
+ codec_error_detail,
+ (vpx_codec_ctx_t*),
+ (const, override));
+ MOCK_METHOD(const char*, codec_error, (vpx_codec_ctx_t*), (const, override));
+ MOCK_METHOD(const char*,
+ codec_err_to_string,
+ (vpx_codec_err_t),
+ (const, override));
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_CODECS_INTERFACE_MOCK_LIBVPX_INTERFACE_H_
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/multiplex/augmented_video_frame_buffer.cc b/third_party/libwebrtc/modules/video_coding/codecs/multiplex/augmented_video_frame_buffer.cc
new file mode 100644
index 0000000000..8740884f5b
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/multiplex/augmented_video_frame_buffer.cc
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/codecs/multiplex/include/augmented_video_frame_buffer.h"
+
+#include <stdint.h>
+
+#include <utility>
+
+#include "api/video/video_frame_buffer.h"
+
+namespace webrtc {
+
+AugmentedVideoFrameBuffer::AugmentedVideoFrameBuffer(
+ const rtc::scoped_refptr<VideoFrameBuffer>& video_frame_buffer,
+ std::unique_ptr<uint8_t[]> augmenting_data,
+ uint16_t augmenting_data_size)
+ : augmenting_data_size_(augmenting_data_size),
+ augmenting_data_(std::move(augmenting_data)),
+ video_frame_buffer_(video_frame_buffer) {}
+
+rtc::scoped_refptr<VideoFrameBuffer>
+AugmentedVideoFrameBuffer::GetVideoFrameBuffer() const {
+ return video_frame_buffer_;
+}
+
+uint8_t* AugmentedVideoFrameBuffer::GetAugmentingData() const {
+ return augmenting_data_.get();
+}
+
+uint16_t AugmentedVideoFrameBuffer::GetAugmentingDataSize() const {
+ return augmenting_data_size_;
+}
+
+VideoFrameBuffer::Type AugmentedVideoFrameBuffer::type() const {
+ return video_frame_buffer_->type();
+}
+
+int AugmentedVideoFrameBuffer::width() const {
+ return video_frame_buffer_->width();
+}
+
+int AugmentedVideoFrameBuffer::height() const {
+ return video_frame_buffer_->height();
+}
+
+rtc::scoped_refptr<I420BufferInterface> AugmentedVideoFrameBuffer::ToI420() {
+ return video_frame_buffer_->ToI420();
+}
+
+const I420BufferInterface* AugmentedVideoFrameBuffer::GetI420() const {
+ // TODO(https://crbug.com/webrtc/12021): When AugmentedVideoFrameBuffer is
+ // updated to implement the buffer interfaces of relevant
+ // VideoFrameBuffer::Types, stop overriding GetI420() as a workaround to
+ // AugmentedVideoFrameBuffer not being the type that is returned by type().
+ return video_frame_buffer_->GetI420();
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/multiplex/include/augmented_video_frame_buffer.h b/third_party/libwebrtc/modules/video_coding/codecs/multiplex/include/augmented_video_frame_buffer.h
new file mode 100644
index 0000000000..d711cd07da
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/multiplex/include/augmented_video_frame_buffer.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_CODECS_MULTIPLEX_INCLUDE_AUGMENTED_VIDEO_FRAME_BUFFER_H_
+#define MODULES_VIDEO_CODING_CODECS_MULTIPLEX_INCLUDE_AUGMENTED_VIDEO_FRAME_BUFFER_H_
+
+#include <cstdint>
+#include <memory>
+
+#include "api/scoped_refptr.h"
+#include "api/video/video_frame_buffer.h"
+
+namespace webrtc {
+class AugmentedVideoFrameBuffer : public VideoFrameBuffer {
+ public:
+ AugmentedVideoFrameBuffer(
+ const rtc::scoped_refptr<VideoFrameBuffer>& video_frame_buffer,
+ std::unique_ptr<uint8_t[]> augmenting_data,
+ uint16_t augmenting_data_size);
+
+ // Retrieves the underlying VideoFrameBuffer without the augmented data
+ rtc::scoped_refptr<VideoFrameBuffer> GetVideoFrameBuffer() const;
+
+ // Gets a pointer to the augmenting data and moves ownership to the caller
+ uint8_t* GetAugmentingData() const;
+
+ // Get the size of the augmenting data
+ uint16_t GetAugmentingDataSize() const;
+
+ // Returns the type of the underlying VideoFrameBuffer
+ Type type() const final;
+
+ // Returns the width of the underlying VideoFrameBuffer
+ int width() const final;
+
+ // Returns the height of the underlying VideoFrameBuffer
+ int height() const final;
+
+ // Get the I140 Buffer from the underlying frame buffer
+ rtc::scoped_refptr<I420BufferInterface> ToI420() final;
+ // Returns GetI420() of the underlying VideoFrameBuffer.
+ // TODO(hbos): AugmentedVideoFrameBuffer should not return a type (such as
+ // kI420) without also implementing that type's interface (i.e.
+ // I420BufferInterface). Either implement all possible Type's interfaces or
+ // return kNative.
+ const I420BufferInterface* GetI420() const final;
+
+ private:
+ uint16_t augmenting_data_size_;
+ std::unique_ptr<uint8_t[]> augmenting_data_;
+ rtc::scoped_refptr<webrtc::VideoFrameBuffer> video_frame_buffer_;
+};
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_CODECS_MULTIPLEX_INCLUDE_AUGMENTED_VIDEO_FRAME_BUFFER_H_
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/multiplex/include/multiplex_decoder_adapter.h b/third_party/libwebrtc/modules/video_coding/codecs/multiplex/include/multiplex_decoder_adapter.h
new file mode 100644
index 0000000000..e73f7d0e9f
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/multiplex/include/multiplex_decoder_adapter.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_CODECS_MULTIPLEX_INCLUDE_MULTIPLEX_DECODER_ADAPTER_H_
+#define MODULES_VIDEO_CODING_CODECS_MULTIPLEX_INCLUDE_MULTIPLEX_DECODER_ADAPTER_H_
+
+#include <map>
+#include <memory>
+#include <vector>
+
+#include "api/video_codecs/sdp_video_format.h"
+#include "api/video_codecs/video_decoder.h"
+#include "api/video_codecs/video_decoder_factory.h"
+#include "modules/video_coding/codecs/multiplex/include/multiplex_encoder_adapter.h"
+
+namespace webrtc {
+
+class MultiplexDecoderAdapter : public VideoDecoder {
+ public:
+ // `factory` is not owned and expected to outlive this class.
+ MultiplexDecoderAdapter(VideoDecoderFactory* factory,
+ const SdpVideoFormat& associated_format,
+ bool supports_augmenting_data = false);
+ virtual ~MultiplexDecoderAdapter();
+
+ // Implements VideoDecoder
+ bool Configure(const Settings& settings) override;
+ int32_t Decode(const EncodedImage& input_image,
+ bool missing_frames,
+ int64_t render_time_ms) override;
+ int32_t RegisterDecodeCompleteCallback(
+ DecodedImageCallback* callback) override;
+ int32_t Release() override;
+
+ void Decoded(AlphaCodecStream stream_idx,
+ VideoFrame* decoded_image,
+ absl::optional<int32_t> decode_time_ms,
+ absl::optional<uint8_t> qp);
+
+ private:
+ // Wrapper class that redirects Decoded() calls.
+ class AdapterDecodedImageCallback;
+
+ // Holds the decoded image output of a frame.
+ struct DecodedImageData;
+
+ // Holds the augmenting data of an image
+ struct AugmentingData;
+
+ void MergeAlphaImages(VideoFrame* decoded_image,
+ const absl::optional<int32_t>& decode_time_ms,
+ const absl::optional<uint8_t>& qp,
+ VideoFrame* multiplex_decoded_image,
+ const absl::optional<int32_t>& multiplex_decode_time_ms,
+ const absl::optional<uint8_t>& multiplex_qp,
+ std::unique_ptr<uint8_t[]> augmenting_data,
+ uint16_t augmenting_data_length);
+
+ VideoDecoderFactory* const factory_;
+ const SdpVideoFormat associated_format_;
+ std::vector<std::unique_ptr<VideoDecoder>> decoders_;
+ std::vector<std::unique_ptr<AdapterDecodedImageCallback>> adapter_callbacks_;
+ DecodedImageCallback* decoded_complete_callback_;
+
+ // Holds YUV or AXX decode output of a frame that is identified by timestamp.
+ std::map<uint32_t /* timestamp */, DecodedImageData> decoded_data_;
+ std::map<uint32_t /* timestamp */, AugmentingData> decoded_augmenting_data_;
+ const bool supports_augmenting_data_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_CODECS_MULTIPLEX_INCLUDE_MULTIPLEX_DECODER_ADAPTER_H_
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/multiplex/include/multiplex_encoder_adapter.h b/third_party/libwebrtc/modules/video_coding/codecs/multiplex/include/multiplex_encoder_adapter.h
new file mode 100644
index 0000000000..2e5aad8a5b
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/multiplex/include/multiplex_encoder_adapter.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_CODECS_MULTIPLEX_INCLUDE_MULTIPLEX_ENCODER_ADAPTER_H_
+#define MODULES_VIDEO_CODING_CODECS_MULTIPLEX_INCLUDE_MULTIPLEX_ENCODER_ADAPTER_H_
+
+#include <map>
+#include <memory>
+#include <vector>
+
+#include "api/fec_controller_override.h"
+#include "api/video_codecs/sdp_video_format.h"
+#include "api/video_codecs/video_encoder.h"
+#include "api/video_codecs/video_encoder_factory.h"
+#include "modules/video_coding/codecs/multiplex/multiplex_encoded_image_packer.h"
+#include "modules/video_coding/include/video_codec_interface.h"
+#include "rtc_base/synchronization/mutex.h"
+
+namespace webrtc {
+
+enum AlphaCodecStream {
+ kYUVStream = 0,
+ kAXXStream = 1,
+ kAlphaCodecStreams = 2,
+};
+
+class MultiplexEncoderAdapter : public VideoEncoder {
+ public:
+ // `factory` is not owned and expected to outlive this class.
+ MultiplexEncoderAdapter(VideoEncoderFactory* factory,
+ const SdpVideoFormat& associated_format,
+ bool supports_augmenting_data = false);
+ virtual ~MultiplexEncoderAdapter();
+
+ // Implements VideoEncoder
+ void SetFecControllerOverride(
+ FecControllerOverride* fec_controller_override) override;
+ int InitEncode(const VideoCodec* inst,
+ const VideoEncoder::Settings& settings) override;
+ int Encode(const VideoFrame& input_image,
+ const std::vector<VideoFrameType>* frame_types) override;
+ int RegisterEncodeCompleteCallback(EncodedImageCallback* callback) override;
+ void SetRates(const RateControlParameters& parameters) override;
+ void OnPacketLossRateUpdate(float packet_loss_rate) override;
+ void OnRttUpdate(int64_t rtt_ms) override;
+ void OnLossNotification(const LossNotification& loss_notification) override;
+ int Release() override;
+ EncoderInfo GetEncoderInfo() const override;
+
+ EncodedImageCallback::Result OnEncodedImage(
+ AlphaCodecStream stream_idx,
+ const EncodedImage& encodedImage,
+ const CodecSpecificInfo* codecSpecificInfo);
+
+ private:
+ // Wrapper class that redirects OnEncodedImage() calls.
+ class AdapterEncodedImageCallback;
+
+ VideoEncoderFactory* const factory_;
+ const SdpVideoFormat associated_format_;
+ std::vector<std::unique_ptr<VideoEncoder>> encoders_;
+ std::vector<std::unique_ptr<AdapterEncodedImageCallback>> adapter_callbacks_;
+ EncodedImageCallback* encoded_complete_callback_;
+
+ std::map<uint32_t /* timestamp */, MultiplexImage> stashed_images_
+ RTC_GUARDED_BY(mutex_);
+
+ uint16_t picture_index_ = 0;
+ std::vector<uint8_t> multiplex_dummy_planes_;
+
+ int key_frame_interval_;
+ EncodedImage combined_image_;
+
+ Mutex mutex_;
+
+ const bool supports_augmented_data_;
+ int augmenting_data_size_ = 0;
+
+ EncoderInfo encoder_info_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_CODECS_MULTIPLEX_INCLUDE_MULTIPLEX_ENCODER_ADAPTER_H_
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/multiplex/multiplex_decoder_adapter.cc b/third_party/libwebrtc/modules/video_coding/codecs/multiplex/multiplex_decoder_adapter.cc
new file mode 100644
index 0000000000..0ad3d3883a
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/multiplex/multiplex_decoder_adapter.cc
@@ -0,0 +1,266 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/codecs/multiplex/include/multiplex_decoder_adapter.h"
+
+#include "api/video/encoded_image.h"
+#include "api/video/i420_buffer.h"
+#include "api/video/video_frame_buffer.h"
+#include "common_video/include/video_frame_buffer.h"
+#include "common_video/libyuv/include/webrtc_libyuv.h"
+#include "modules/video_coding/codecs/multiplex/include/augmented_video_frame_buffer.h"
+#include "modules/video_coding/codecs/multiplex/multiplex_encoded_image_packer.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+
+class MultiplexDecoderAdapter::AdapterDecodedImageCallback
+ : public webrtc::DecodedImageCallback {
+ public:
+ AdapterDecodedImageCallback(webrtc::MultiplexDecoderAdapter* adapter,
+ AlphaCodecStream stream_idx)
+ : adapter_(adapter), stream_idx_(stream_idx) {}
+
+ void Decoded(VideoFrame& decoded_image,
+ absl::optional<int32_t> decode_time_ms,
+ absl::optional<uint8_t> qp) override {
+ if (!adapter_)
+ return;
+ adapter_->Decoded(stream_idx_, &decoded_image, decode_time_ms, qp);
+ }
+ int32_t Decoded(VideoFrame& decoded_image) override {
+ RTC_DCHECK_NOTREACHED();
+ return WEBRTC_VIDEO_CODEC_OK;
+ }
+ int32_t Decoded(VideoFrame& decoded_image, int64_t decode_time_ms) override {
+ RTC_DCHECK_NOTREACHED();
+ return WEBRTC_VIDEO_CODEC_OK;
+ }
+
+ private:
+ MultiplexDecoderAdapter* adapter_;
+ const AlphaCodecStream stream_idx_;
+};
+
+struct MultiplexDecoderAdapter::DecodedImageData {
+ explicit DecodedImageData(AlphaCodecStream stream_idx)
+ : stream_idx_(stream_idx),
+ decoded_image_(
+ VideoFrame::Builder()
+ .set_video_frame_buffer(
+ I420Buffer::Create(1 /* width */, 1 /* height */))
+ .set_timestamp_rtp(0)
+ .set_timestamp_us(0)
+ .set_rotation(kVideoRotation_0)
+ .build()) {
+ RTC_DCHECK_EQ(kAXXStream, stream_idx);
+ }
+ DecodedImageData(AlphaCodecStream stream_idx,
+ const VideoFrame& decoded_image,
+ const absl::optional<int32_t>& decode_time_ms,
+ const absl::optional<uint8_t>& qp)
+ : stream_idx_(stream_idx),
+ decoded_image_(decoded_image),
+ decode_time_ms_(decode_time_ms),
+ qp_(qp) {}
+
+ DecodedImageData() = delete;
+ DecodedImageData(const DecodedImageData&) = delete;
+ DecodedImageData& operator=(const DecodedImageData&) = delete;
+
+ const AlphaCodecStream stream_idx_;
+ VideoFrame decoded_image_;
+ const absl::optional<int32_t> decode_time_ms_;
+ const absl::optional<uint8_t> qp_;
+};
+
+struct MultiplexDecoderAdapter::AugmentingData {
+ AugmentingData(std::unique_ptr<uint8_t[]> augmenting_data, uint16_t data_size)
+ : data_(std::move(augmenting_data)), size_(data_size) {}
+ AugmentingData() = delete;
+ AugmentingData(const AugmentingData&) = delete;
+ AugmentingData& operator=(const AugmentingData&) = delete;
+
+ std::unique_ptr<uint8_t[]> data_;
+ const uint16_t size_;
+};
+
+MultiplexDecoderAdapter::MultiplexDecoderAdapter(
+ VideoDecoderFactory* factory,
+ const SdpVideoFormat& associated_format,
+ bool supports_augmenting_data)
+ : factory_(factory),
+ associated_format_(associated_format),
+ supports_augmenting_data_(supports_augmenting_data) {}
+
+MultiplexDecoderAdapter::~MultiplexDecoderAdapter() {
+ Release();
+}
+
+bool MultiplexDecoderAdapter::Configure(const Settings& settings) {
+ RTC_DCHECK_EQ(settings.codec_type(), kVideoCodecMultiplex);
+ Settings associated_settings = settings;
+ associated_settings.set_codec_type(
+ PayloadStringToCodecType(associated_format_.name));
+ for (size_t i = 0; i < kAlphaCodecStreams; ++i) {
+ std::unique_ptr<VideoDecoder> decoder =
+ factory_->CreateVideoDecoder(associated_format_);
+ if (!decoder->Configure(associated_settings)) {
+ return false;
+ }
+ adapter_callbacks_.emplace_back(
+ new MultiplexDecoderAdapter::AdapterDecodedImageCallback(
+ this, static_cast<AlphaCodecStream>(i)));
+ decoder->RegisterDecodeCompleteCallback(adapter_callbacks_.back().get());
+ decoders_.emplace_back(std::move(decoder));
+ }
+ return true;
+}
+
+int32_t MultiplexDecoderAdapter::Decode(const EncodedImage& input_image,
+ bool missing_frames,
+ int64_t render_time_ms) {
+ MultiplexImage image = MultiplexEncodedImagePacker::Unpack(input_image);
+
+ if (supports_augmenting_data_) {
+ RTC_DCHECK(decoded_augmenting_data_.find(input_image.Timestamp()) ==
+ decoded_augmenting_data_.end());
+ decoded_augmenting_data_.emplace(
+ std::piecewise_construct,
+ std::forward_as_tuple(input_image.Timestamp()),
+ std::forward_as_tuple(std::move(image.augmenting_data),
+ image.augmenting_data_size));
+ }
+
+ if (image.component_count == 1) {
+ RTC_DCHECK(decoded_data_.find(input_image.Timestamp()) ==
+ decoded_data_.end());
+ decoded_data_.emplace(std::piecewise_construct,
+ std::forward_as_tuple(input_image.Timestamp()),
+ std::forward_as_tuple(kAXXStream));
+ }
+ int32_t rv = 0;
+ for (size_t i = 0; i < image.image_components.size(); i++) {
+ rv = decoders_[image.image_components[i].component_index]->Decode(
+ image.image_components[i].encoded_image, missing_frames,
+ render_time_ms);
+ if (rv != WEBRTC_VIDEO_CODEC_OK)
+ return rv;
+ }
+ return rv;
+}
+
+int32_t MultiplexDecoderAdapter::RegisterDecodeCompleteCallback(
+ DecodedImageCallback* callback) {
+ decoded_complete_callback_ = callback;
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+int32_t MultiplexDecoderAdapter::Release() {
+ for (auto& decoder : decoders_) {
+ const int32_t rv = decoder->Release();
+ if (rv)
+ return rv;
+ }
+ decoders_.clear();
+ adapter_callbacks_.clear();
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+void MultiplexDecoderAdapter::Decoded(AlphaCodecStream stream_idx,
+ VideoFrame* decoded_image,
+ absl::optional<int32_t> decode_time_ms,
+ absl::optional<uint8_t> qp) {
+ const auto& other_decoded_data_it =
+ decoded_data_.find(decoded_image->timestamp());
+ const auto& augmenting_data_it =
+ decoded_augmenting_data_.find(decoded_image->timestamp());
+ const bool has_augmenting_data =
+ augmenting_data_it != decoded_augmenting_data_.end();
+ if (other_decoded_data_it != decoded_data_.end()) {
+ uint16_t augmenting_data_size =
+ has_augmenting_data ? augmenting_data_it->second.size_ : 0;
+ std::unique_ptr<uint8_t[]> augmenting_data =
+ has_augmenting_data ? std::move(augmenting_data_it->second.data_)
+ : nullptr;
+ auto& other_image_data = other_decoded_data_it->second;
+ if (stream_idx == kYUVStream) {
+ RTC_DCHECK_EQ(kAXXStream, other_image_data.stream_idx_);
+ MergeAlphaImages(decoded_image, decode_time_ms, qp,
+ &other_image_data.decoded_image_,
+ other_image_data.decode_time_ms_, other_image_data.qp_,
+ std::move(augmenting_data), augmenting_data_size);
+ } else {
+ RTC_DCHECK_EQ(kYUVStream, other_image_data.stream_idx_);
+ RTC_DCHECK_EQ(kAXXStream, stream_idx);
+ MergeAlphaImages(&other_image_data.decoded_image_,
+ other_image_data.decode_time_ms_, other_image_data.qp_,
+ decoded_image, decode_time_ms, qp,
+ std::move(augmenting_data), augmenting_data_size);
+ }
+ decoded_data_.erase(decoded_data_.begin(), other_decoded_data_it);
+ if (has_augmenting_data) {
+ decoded_augmenting_data_.erase(decoded_augmenting_data_.begin(),
+ augmenting_data_it);
+ }
+ return;
+ }
+ RTC_DCHECK(decoded_data_.find(decoded_image->timestamp()) ==
+ decoded_data_.end());
+ decoded_data_.emplace(
+ std::piecewise_construct,
+ std::forward_as_tuple(decoded_image->timestamp()),
+ std::forward_as_tuple(stream_idx, *decoded_image, decode_time_ms, qp));
+}
+
+void MultiplexDecoderAdapter::MergeAlphaImages(
+ VideoFrame* decoded_image,
+ const absl::optional<int32_t>& decode_time_ms,
+ const absl::optional<uint8_t>& qp,
+ VideoFrame* alpha_decoded_image,
+ const absl::optional<int32_t>& alpha_decode_time_ms,
+ const absl::optional<uint8_t>& alpha_qp,
+ std::unique_ptr<uint8_t[]> augmenting_data,
+ uint16_t augmenting_data_length) {
+ rtc::scoped_refptr<VideoFrameBuffer> merged_buffer;
+ if (!alpha_decoded_image->timestamp()) {
+ merged_buffer = decoded_image->video_frame_buffer();
+ } else {
+ rtc::scoped_refptr<webrtc::I420BufferInterface> yuv_buffer =
+ decoded_image->video_frame_buffer()->ToI420();
+ rtc::scoped_refptr<webrtc::I420BufferInterface> alpha_buffer =
+ alpha_decoded_image->video_frame_buffer()->ToI420();
+ RTC_DCHECK_EQ(yuv_buffer->width(), alpha_buffer->width());
+ RTC_DCHECK_EQ(yuv_buffer->height(), alpha_buffer->height());
+ merged_buffer = WrapI420ABuffer(
+ yuv_buffer->width(), yuv_buffer->height(), yuv_buffer->DataY(),
+ yuv_buffer->StrideY(), yuv_buffer->DataU(), yuv_buffer->StrideU(),
+ yuv_buffer->DataV(), yuv_buffer->StrideV(), alpha_buffer->DataY(),
+ alpha_buffer->StrideY(),
+ // To keep references alive.
+ [yuv_buffer, alpha_buffer] {});
+ }
+ if (supports_augmenting_data_) {
+ merged_buffer = rtc::make_ref_counted<AugmentedVideoFrameBuffer>(
+ merged_buffer, std::move(augmenting_data), augmenting_data_length);
+ }
+
+ VideoFrame merged_image = VideoFrame::Builder()
+ .set_video_frame_buffer(merged_buffer)
+ .set_timestamp_rtp(decoded_image->timestamp())
+ .set_timestamp_us(0)
+ .set_rotation(decoded_image->rotation())
+ .set_id(decoded_image->id())
+ .set_packet_infos(decoded_image->packet_infos())
+ .build();
+ decoded_complete_callback_->Decoded(merged_image, decode_time_ms, qp);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/multiplex/multiplex_encoded_image_packer.cc b/third_party/libwebrtc/modules/video_coding/codecs/multiplex/multiplex_encoded_image_packer.cc
new file mode 100644
index 0000000000..0f05d1a89c
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/multiplex/multiplex_encoded_image_packer.cc
@@ -0,0 +1,277 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/codecs/multiplex/multiplex_encoded_image_packer.h"
+
+#include <cstring>
+#include <utility>
+
+#include "modules/rtp_rtcp/source/byte_io.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+int PackHeader(uint8_t* buffer, MultiplexImageHeader header) {
+ int offset = 0;
+ ByteWriter<uint8_t>::WriteBigEndian(buffer + offset, header.component_count);
+ offset += sizeof(uint8_t);
+
+ ByteWriter<uint16_t>::WriteBigEndian(buffer + offset, header.image_index);
+ offset += sizeof(uint16_t);
+
+ ByteWriter<uint16_t>::WriteBigEndian(buffer + offset,
+ header.augmenting_data_size);
+ offset += sizeof(uint16_t);
+
+ ByteWriter<uint32_t>::WriteBigEndian(buffer + offset,
+ header.augmenting_data_offset);
+ offset += sizeof(uint32_t);
+
+ ByteWriter<uint32_t>::WriteBigEndian(buffer + offset,
+ header.first_component_header_offset);
+ offset += sizeof(uint32_t);
+
+ RTC_DCHECK_EQ(offset, kMultiplexImageHeaderSize);
+ return offset;
+}
+
+MultiplexImageHeader UnpackHeader(const uint8_t* buffer) {
+ MultiplexImageHeader header;
+ int offset = 0;
+ header.component_count = ByteReader<uint8_t>::ReadBigEndian(buffer + offset);
+ offset += sizeof(uint8_t);
+
+ header.image_index = ByteReader<uint16_t>::ReadBigEndian(buffer + offset);
+ offset += sizeof(uint16_t);
+
+ header.augmenting_data_size =
+ ByteReader<uint16_t>::ReadBigEndian(buffer + offset);
+ offset += sizeof(uint16_t);
+
+ header.augmenting_data_offset =
+ ByteReader<uint32_t>::ReadBigEndian(buffer + offset);
+ offset += sizeof(uint32_t);
+
+ header.first_component_header_offset =
+ ByteReader<uint32_t>::ReadBigEndian(buffer + offset);
+ offset += sizeof(uint32_t);
+
+ RTC_DCHECK_EQ(offset, kMultiplexImageHeaderSize);
+ return header;
+}
+
+int PackFrameHeader(uint8_t* buffer,
+ MultiplexImageComponentHeader frame_header) {
+ int offset = 0;
+ ByteWriter<uint32_t>::WriteBigEndian(
+ buffer + offset, frame_header.next_component_header_offset);
+ offset += sizeof(uint32_t);
+
+ ByteWriter<uint8_t>::WriteBigEndian(buffer + offset,
+ frame_header.component_index);
+ offset += sizeof(uint8_t);
+
+ ByteWriter<uint32_t>::WriteBigEndian(buffer + offset,
+ frame_header.bitstream_offset);
+ offset += sizeof(uint32_t);
+
+ ByteWriter<uint32_t>::WriteBigEndian(buffer + offset,
+ frame_header.bitstream_length);
+ offset += sizeof(uint32_t);
+
+ ByteWriter<uint8_t>::WriteBigEndian(buffer + offset, frame_header.codec_type);
+ offset += sizeof(uint8_t);
+
+ ByteWriter<uint8_t>::WriteBigEndian(
+ buffer + offset, static_cast<uint8_t>(frame_header.frame_type));
+ offset += sizeof(uint8_t);
+
+ RTC_DCHECK_EQ(offset, kMultiplexImageComponentHeaderSize);
+ return offset;
+}
+
+MultiplexImageComponentHeader UnpackFrameHeader(const uint8_t* buffer) {
+ MultiplexImageComponentHeader frame_header;
+ int offset = 0;
+
+ frame_header.next_component_header_offset =
+ ByteReader<uint32_t>::ReadBigEndian(buffer + offset);
+ offset += sizeof(uint32_t);
+
+ frame_header.component_index =
+ ByteReader<uint8_t>::ReadBigEndian(buffer + offset);
+ offset += sizeof(uint8_t);
+
+ frame_header.bitstream_offset =
+ ByteReader<uint32_t>::ReadBigEndian(buffer + offset);
+ offset += sizeof(uint32_t);
+
+ frame_header.bitstream_length =
+ ByteReader<uint32_t>::ReadBigEndian(buffer + offset);
+ offset += sizeof(uint32_t);
+
+ // This makes the wire format depend on the numeric values of the
+ // VideoCodecType and VideoFrameType enum constants.
+ frame_header.codec_type = static_cast<VideoCodecType>(
+ ByteReader<uint8_t>::ReadBigEndian(buffer + offset));
+ offset += sizeof(uint8_t);
+
+ frame_header.frame_type = static_cast<VideoFrameType>(
+ ByteReader<uint8_t>::ReadBigEndian(buffer + offset));
+ offset += sizeof(uint8_t);
+
+ RTC_DCHECK_EQ(offset, kMultiplexImageComponentHeaderSize);
+ return frame_header;
+}
+
+void PackBitstream(uint8_t* buffer, MultiplexImageComponent image) {
+ memcpy(buffer, image.encoded_image.data(), image.encoded_image.size());
+}
+
+MultiplexImage::MultiplexImage(uint16_t picture_index,
+ uint8_t frame_count,
+ std::unique_ptr<uint8_t[]> augmenting_data,
+ uint16_t augmenting_data_size)
+ : image_index(picture_index),
+ component_count(frame_count),
+ augmenting_data_size(augmenting_data_size),
+ augmenting_data(std::move(augmenting_data)) {}
+
+EncodedImage MultiplexEncodedImagePacker::PackAndRelease(
+ const MultiplexImage& multiplex_image) {
+ MultiplexImageHeader header;
+ std::vector<MultiplexImageComponentHeader> frame_headers;
+
+ header.component_count = multiplex_image.component_count;
+ header.image_index = multiplex_image.image_index;
+ int header_offset = kMultiplexImageHeaderSize;
+ header.first_component_header_offset = header_offset;
+ header.augmenting_data_offset =
+ header_offset +
+ kMultiplexImageComponentHeaderSize * header.component_count;
+ header.augmenting_data_size = multiplex_image.augmenting_data_size;
+ int bitstream_offset =
+ header.augmenting_data_offset + header.augmenting_data_size;
+
+ const std::vector<MultiplexImageComponent>& images =
+ multiplex_image.image_components;
+ EncodedImage combined_image = images[0].encoded_image;
+ for (size_t i = 0; i < images.size(); i++) {
+ MultiplexImageComponentHeader frame_header;
+ header_offset += kMultiplexImageComponentHeaderSize;
+ frame_header.next_component_header_offset =
+ (i == images.size() - 1) ? 0 : header_offset;
+ frame_header.component_index = images[i].component_index;
+
+ frame_header.bitstream_offset = bitstream_offset;
+ frame_header.bitstream_length =
+ static_cast<uint32_t>(images[i].encoded_image.size());
+ bitstream_offset += frame_header.bitstream_length;
+
+ frame_header.codec_type = images[i].codec_type;
+ frame_header.frame_type = images[i].encoded_image._frameType;
+
+ // As long as one component is delta frame, we have to mark the combined
+ // frame as delta frame, because it is necessary for all components to be
+ // key frame so as to decode the whole image without previous frame data.
+ // Thus only when all components are key frames, we can mark the combined
+ // frame as key frame.
+ if (frame_header.frame_type == VideoFrameType::kVideoFrameDelta) {
+ combined_image._frameType = VideoFrameType::kVideoFrameDelta;
+ }
+
+ frame_headers.push_back(frame_header);
+ }
+
+ auto buffer = EncodedImageBuffer::Create(bitstream_offset);
+ combined_image.SetEncodedData(buffer);
+
+ // header
+ header_offset = PackHeader(buffer->data(), header);
+ RTC_DCHECK_EQ(header.first_component_header_offset,
+ kMultiplexImageHeaderSize);
+
+ // Frame Header
+ for (size_t i = 0; i < images.size(); i++) {
+ int relative_offset =
+ PackFrameHeader(buffer->data() + header_offset, frame_headers[i]);
+ RTC_DCHECK_EQ(relative_offset, kMultiplexImageComponentHeaderSize);
+
+ header_offset = frame_headers[i].next_component_header_offset;
+ RTC_DCHECK_EQ(header_offset,
+ (i == images.size() - 1)
+ ? 0
+ : (kMultiplexImageHeaderSize +
+ kMultiplexImageComponentHeaderSize * (i + 1)));
+ }
+
+ // Augmenting Data
+ if (multiplex_image.augmenting_data_size != 0) {
+ memcpy(buffer->data() + header.augmenting_data_offset,
+ multiplex_image.augmenting_data.get(),
+ multiplex_image.augmenting_data_size);
+ }
+
+ // Bitstreams
+ for (size_t i = 0; i < images.size(); i++) {
+ PackBitstream(buffer->data() + frame_headers[i].bitstream_offset,
+ images[i]);
+ }
+
+ return combined_image;
+}
+
+MultiplexImage MultiplexEncodedImagePacker::Unpack(
+ const EncodedImage& combined_image) {
+ const MultiplexImageHeader& header = UnpackHeader(combined_image.data());
+
+ std::vector<MultiplexImageComponentHeader> frame_headers;
+ int header_offset = header.first_component_header_offset;
+
+ while (header_offset > 0) {
+ frame_headers.push_back(
+ UnpackFrameHeader(combined_image.data() + header_offset));
+ header_offset = frame_headers.back().next_component_header_offset;
+ }
+
+ RTC_DCHECK_LE(frame_headers.size(), header.component_count);
+ std::unique_ptr<uint8_t[]> augmenting_data = nullptr;
+ if (header.augmenting_data_size != 0) {
+ augmenting_data =
+ std::unique_ptr<uint8_t[]>(new uint8_t[header.augmenting_data_size]);
+ memcpy(augmenting_data.get(),
+ combined_image.data() + header.augmenting_data_offset,
+ header.augmenting_data_size);
+ }
+
+ MultiplexImage multiplex_image(header.image_index, header.component_count,
+ std::move(augmenting_data),
+ header.augmenting_data_size);
+
+ for (size_t i = 0; i < frame_headers.size(); i++) {
+ MultiplexImageComponent image_component;
+ image_component.component_index = frame_headers[i].component_index;
+ image_component.codec_type = frame_headers[i].codec_type;
+
+ EncodedImage encoded_image = combined_image;
+ encoded_image.SetTimestamp(combined_image.Timestamp());
+ encoded_image._frameType = frame_headers[i].frame_type;
+ encoded_image.SetEncodedData(EncodedImageBuffer::Create(
+ combined_image.data() + frame_headers[i].bitstream_offset,
+ frame_headers[i].bitstream_length));
+
+ image_component.encoded_image = encoded_image;
+
+ multiplex_image.image_components.push_back(image_component);
+ }
+
+ return multiplex_image;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/multiplex/multiplex_encoded_image_packer.h b/third_party/libwebrtc/modules/video_coding/codecs/multiplex/multiplex_encoded_image_packer.h
new file mode 100644
index 0000000000..299a0159d5
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/multiplex/multiplex_encoded_image_packer.h
@@ -0,0 +1,120 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_CODECS_MULTIPLEX_MULTIPLEX_ENCODED_IMAGE_PACKER_H_
+#define MODULES_VIDEO_CODING_CODECS_MULTIPLEX_MULTIPLEX_ENCODED_IMAGE_PACKER_H_
+
+#include <cstdint>
+#include <memory>
+#include <vector>
+
+#include "api/video/encoded_image.h"
+#include "api/video_codecs/video_codec.h"
+
+namespace webrtc {
+
+// Struct describing the whole bundle of multiple frames of an image.
+// This struct is expected to be the set in the beginning of a picture's
+// bitstream data.
+struct MultiplexImageHeader {
+ // The number of frame components making up the complete picture data.
+ // For example, `frame_count` = 2 for the case of YUV frame with Alpha frame.
+ uint8_t component_count;
+
+ // The increasing image ID given by the encoder. For different components
+ // of a single picture, they have the same `picture_index`.
+ uint16_t image_index;
+
+ // The location of the first MultiplexImageComponentHeader in the bitstream,
+ // in terms of byte from the beginning of the bitstream.
+ uint32_t first_component_header_offset;
+
+ // The location of the augmenting data in the bitstream, in terms of bytes
+ // from the beginning of the bitstream
+ uint32_t augmenting_data_offset;
+
+ // The size of the augmenting data in the bitstream it terms of byte
+ uint16_t augmenting_data_size;
+};
+const int kMultiplexImageHeaderSize =
+ sizeof(uint8_t) + 2 * sizeof(uint16_t) + 2 * sizeof(uint32_t);
+
+// Struct describing the individual image component's content.
+struct MultiplexImageComponentHeader {
+ // The location of the next MultiplexImageComponentHeader in the bitstream,
+ // in terms of the byte from the beginning of the bitstream;
+ uint32_t next_component_header_offset;
+
+ // Identifies which component this frame represent, i.e. YUV frame vs Alpha
+ // frame.
+ uint8_t component_index;
+
+ // The location of the real encoded image data of the frame in the bitstream,
+ // in terms of byte from the beginning of the bitstream.
+ uint32_t bitstream_offset;
+
+ // Indicates the number of bytes of the encoded image data.
+ uint32_t bitstream_length;
+
+ // Indicated the underlying VideoCodecType of the frame, i.e. VP9 or VP8 etc.
+ VideoCodecType codec_type;
+
+ // Indicated the underlying frame is a key frame or delta frame.
+ VideoFrameType frame_type;
+};
+const int kMultiplexImageComponentHeaderSize =
+ sizeof(uint32_t) + sizeof(uint8_t) + sizeof(uint32_t) + sizeof(uint32_t) +
+ sizeof(uint8_t) + sizeof(uint8_t);
+
+// Struct holding the encoded image for one component.
+struct MultiplexImageComponent {
+ // Indicated the underlying VideoCodecType of the frame, i.e. VP9 or VP8 etc.
+ VideoCodecType codec_type;
+
+ // Identifies which component this frame represent, i.e. YUV frame vs Alpha
+ // frame.
+ uint8_t component_index;
+
+ // Stores the actual frame data of the encoded image.
+ EncodedImage encoded_image;
+};
+
+// Struct holding the whole frame bundle of components of an image.
+struct MultiplexImage {
+ uint16_t image_index;
+ uint8_t component_count;
+ uint16_t augmenting_data_size;
+ std::unique_ptr<uint8_t[]> augmenting_data;
+ std::vector<MultiplexImageComponent> image_components;
+
+ MultiplexImage(uint16_t picture_index,
+ uint8_t component_count,
+ std::unique_ptr<uint8_t[]> augmenting_data,
+ uint16_t augmenting_data_size);
+};
+
+// A utility class providing conversion between two representations of a
+// multiplex image frame:
+// 1. Packed version is just one encoded image, we pack all necessary metadata
+// in the bitstream as headers.
+// 2. Unpacked version is essentially a list of encoded images, one for one
+// component.
+class MultiplexEncodedImagePacker {
+ public:
+ // Note: It is caller responsibility to release the buffer of the result.
+ static EncodedImage PackAndRelease(const MultiplexImage& image);
+
+ // Note: The image components just share the memory with `combined_image`.
+ static MultiplexImage Unpack(const EncodedImage& combined_image);
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_CODECS_MULTIPLEX_MULTIPLEX_ENCODED_IMAGE_PACKER_H_
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/multiplex/multiplex_encoder_adapter.cc b/third_party/libwebrtc/modules/video_coding/codecs/multiplex/multiplex_encoder_adapter.cc
new file mode 100644
index 0000000000..80744e2d8c
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/multiplex/multiplex_encoder_adapter.cc
@@ -0,0 +1,353 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/codecs/multiplex/include/multiplex_encoder_adapter.h"
+
+#include <cstring>
+
+#include "api/video/encoded_image.h"
+#include "api/video_codecs/video_encoder.h"
+#include "common_video/include/video_frame_buffer.h"
+#include "common_video/libyuv/include/webrtc_libyuv.h"
+#include "media/base/video_common.h"
+#include "modules/video_coding/codecs/multiplex/include/augmented_video_frame_buffer.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+
+// Callback wrapper that helps distinguish returned results from `encoders_`
+// instances.
+class MultiplexEncoderAdapter::AdapterEncodedImageCallback
+ : public webrtc::EncodedImageCallback {
+ public:
+ AdapterEncodedImageCallback(webrtc::MultiplexEncoderAdapter* adapter,
+ AlphaCodecStream stream_idx)
+ : adapter_(adapter), stream_idx_(stream_idx) {}
+
+ EncodedImageCallback::Result OnEncodedImage(
+ const EncodedImage& encoded_image,
+ const CodecSpecificInfo* codec_specific_info) override {
+ if (!adapter_)
+ return Result(Result::OK);
+ return adapter_->OnEncodedImage(stream_idx_, encoded_image,
+ codec_specific_info);
+ }
+
+ private:
+ MultiplexEncoderAdapter* adapter_;
+ const AlphaCodecStream stream_idx_;
+};
+
+MultiplexEncoderAdapter::MultiplexEncoderAdapter(
+ VideoEncoderFactory* factory,
+ const SdpVideoFormat& associated_format,
+ bool supports_augmented_data)
+ : factory_(factory),
+ associated_format_(associated_format),
+ encoded_complete_callback_(nullptr),
+ key_frame_interval_(0),
+ supports_augmented_data_(supports_augmented_data) {}
+
+MultiplexEncoderAdapter::~MultiplexEncoderAdapter() {
+ Release();
+}
+
+void MultiplexEncoderAdapter::SetFecControllerOverride(
+ FecControllerOverride* fec_controller_override) {
+ // Ignored.
+}
+
+int MultiplexEncoderAdapter::InitEncode(
+ const VideoCodec* inst,
+ const VideoEncoder::Settings& settings) {
+ const size_t buffer_size =
+ CalcBufferSize(VideoType::kI420, inst->width, inst->height);
+ multiplex_dummy_planes_.resize(buffer_size);
+ // It is more expensive to encode 0x00, so use 0x80 instead.
+ std::fill(multiplex_dummy_planes_.begin(), multiplex_dummy_planes_.end(),
+ 0x80);
+
+ RTC_DCHECK_EQ(kVideoCodecMultiplex, inst->codecType);
+ VideoCodec video_codec = *inst;
+ video_codec.codecType = PayloadStringToCodecType(associated_format_.name);
+
+ // Take over the key frame interval at adapter level, because we have to
+ // sync the key frames for both sub-encoders.
+ switch (video_codec.codecType) {
+ case kVideoCodecVP8:
+ key_frame_interval_ = video_codec.VP8()->keyFrameInterval;
+ video_codec.VP8()->keyFrameInterval = 0;
+ break;
+ case kVideoCodecVP9:
+ key_frame_interval_ = video_codec.VP9()->keyFrameInterval;
+ video_codec.VP9()->keyFrameInterval = 0;
+ break;
+ case kVideoCodecH264:
+ key_frame_interval_ = video_codec.H264()->keyFrameInterval;
+ video_codec.H264()->keyFrameInterval = 0;
+ break;
+ default:
+ break;
+ }
+
+ encoder_info_ = EncoderInfo();
+ encoder_info_.implementation_name = "MultiplexEncoderAdapter (";
+ encoder_info_.requested_resolution_alignment = 1;
+ encoder_info_.apply_alignment_to_all_simulcast_layers = false;
+ // This needs to be false so that we can do the split in Encode().
+ encoder_info_.supports_native_handle = false;
+
+ for (size_t i = 0; i < kAlphaCodecStreams; ++i) {
+ std::unique_ptr<VideoEncoder> encoder =
+ factory_->CreateVideoEncoder(associated_format_);
+ const int rv = encoder->InitEncode(&video_codec, settings);
+ if (rv) {
+ RTC_LOG(LS_ERROR) << "Failed to create multiplex codec index " << i;
+ return rv;
+ }
+ adapter_callbacks_.emplace_back(new AdapterEncodedImageCallback(
+ this, static_cast<AlphaCodecStream>(i)));
+ encoder->RegisterEncodeCompleteCallback(adapter_callbacks_.back().get());
+
+ const EncoderInfo& encoder_impl_info = encoder->GetEncoderInfo();
+ encoder_info_.implementation_name += encoder_impl_info.implementation_name;
+ if (i != kAlphaCodecStreams - 1) {
+ encoder_info_.implementation_name += ", ";
+ }
+ // Uses hardware support if any of the encoders uses it.
+ // For example, if we are having issues with down-scaling due to
+ // pipelining delay in HW encoders we need higher encoder usage
+ // thresholds in CPU adaptation.
+ if (i == 0) {
+ encoder_info_.is_hardware_accelerated =
+ encoder_impl_info.is_hardware_accelerated;
+ } else {
+ encoder_info_.is_hardware_accelerated |=
+ encoder_impl_info.is_hardware_accelerated;
+ }
+
+ encoder_info_.requested_resolution_alignment = cricket::LeastCommonMultiple(
+ encoder_info_.requested_resolution_alignment,
+ encoder_impl_info.requested_resolution_alignment);
+
+ if (encoder_impl_info.apply_alignment_to_all_simulcast_layers) {
+ encoder_info_.apply_alignment_to_all_simulcast_layers = true;
+ }
+
+ encoders_.emplace_back(std::move(encoder));
+ }
+ encoder_info_.implementation_name += ")";
+
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+int MultiplexEncoderAdapter::Encode(
+ const VideoFrame& input_image,
+ const std::vector<VideoFrameType>* frame_types) {
+ if (!encoded_complete_callback_) {
+ return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
+ }
+
+ // The input image is forwarded as-is, unless it is a native buffer and
+ // `supports_augmented_data_` is true in which case we need to map it in order
+ // to access the underlying AugmentedVideoFrameBuffer.
+ VideoFrame forwarded_image = input_image;
+ if (supports_augmented_data_ &&
+ forwarded_image.video_frame_buffer()->type() ==
+ VideoFrameBuffer::Type::kNative) {
+ auto info = GetEncoderInfo();
+ rtc::scoped_refptr<VideoFrameBuffer> mapped_buffer =
+ forwarded_image.video_frame_buffer()->GetMappedFrameBuffer(
+ info.preferred_pixel_formats);
+ if (!mapped_buffer) {
+ // Unable to map the buffer.
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+ forwarded_image.set_video_frame_buffer(std::move(mapped_buffer));
+ }
+
+ std::vector<VideoFrameType> adjusted_frame_types;
+ if (key_frame_interval_ > 0 && picture_index_ % key_frame_interval_ == 0) {
+ adjusted_frame_types.push_back(VideoFrameType::kVideoFrameKey);
+ } else {
+ adjusted_frame_types.push_back(VideoFrameType::kVideoFrameDelta);
+ }
+ const bool has_alpha = forwarded_image.video_frame_buffer()->type() ==
+ VideoFrameBuffer::Type::kI420A;
+ std::unique_ptr<uint8_t[]> augmenting_data = nullptr;
+ uint16_t augmenting_data_length = 0;
+ AugmentedVideoFrameBuffer* augmented_video_frame_buffer = nullptr;
+ if (supports_augmented_data_) {
+ augmented_video_frame_buffer = static_cast<AugmentedVideoFrameBuffer*>(
+ forwarded_image.video_frame_buffer().get());
+ augmenting_data_length =
+ augmented_video_frame_buffer->GetAugmentingDataSize();
+ augmenting_data =
+ std::unique_ptr<uint8_t[]>(new uint8_t[augmenting_data_length]);
+ memcpy(augmenting_data.get(),
+ augmented_video_frame_buffer->GetAugmentingData(),
+ augmenting_data_length);
+ augmenting_data_size_ = augmenting_data_length;
+ }
+
+ {
+ MutexLock lock(&mutex_);
+ stashed_images_.emplace(
+ std::piecewise_construct,
+ std::forward_as_tuple(forwarded_image.timestamp()),
+ std::forward_as_tuple(
+ picture_index_, has_alpha ? kAlphaCodecStreams : 1,
+ std::move(augmenting_data), augmenting_data_length));
+ }
+
+ ++picture_index_;
+
+ // Encode YUV
+ int rv =
+ encoders_[kYUVStream]->Encode(forwarded_image, &adjusted_frame_types);
+
+ // If we do not receive an alpha frame, we send a single frame for this
+ // `picture_index_`. The receiver will receive `frame_count` as 1 which
+ // specifies this case.
+ if (rv || !has_alpha)
+ return rv;
+
+ // Encode AXX
+ rtc::scoped_refptr<VideoFrameBuffer> frame_buffer =
+ supports_augmented_data_
+ ? augmented_video_frame_buffer->GetVideoFrameBuffer()
+ : forwarded_image.video_frame_buffer();
+ const I420ABufferInterface* yuva_buffer = frame_buffer->GetI420A();
+ rtc::scoped_refptr<I420BufferInterface> alpha_buffer =
+ WrapI420Buffer(forwarded_image.width(), forwarded_image.height(),
+ yuva_buffer->DataA(), yuva_buffer->StrideA(),
+ multiplex_dummy_planes_.data(), yuva_buffer->StrideU(),
+ multiplex_dummy_planes_.data(), yuva_buffer->StrideV(),
+ // To keep reference alive.
+ [frame_buffer] {});
+ VideoFrame alpha_image =
+ VideoFrame::Builder()
+ .set_video_frame_buffer(alpha_buffer)
+ .set_timestamp_rtp(forwarded_image.timestamp())
+ .set_timestamp_ms(forwarded_image.render_time_ms())
+ .set_rotation(forwarded_image.rotation())
+ .set_id(forwarded_image.id())
+ .set_packet_infos(forwarded_image.packet_infos())
+ .build();
+ rv = encoders_[kAXXStream]->Encode(alpha_image, &adjusted_frame_types);
+ return rv;
+}
+
+int MultiplexEncoderAdapter::RegisterEncodeCompleteCallback(
+ EncodedImageCallback* callback) {
+ encoded_complete_callback_ = callback;
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+void MultiplexEncoderAdapter::SetRates(
+ const RateControlParameters& parameters) {
+ VideoBitrateAllocation bitrate_allocation(parameters.bitrate);
+ bitrate_allocation.SetBitrate(
+ 0, 0, parameters.bitrate.GetBitrate(0, 0) - augmenting_data_size_);
+ for (auto& encoder : encoders_) {
+ // TODO(emircan): `framerate` is used to calculate duration in encoder
+ // instances. We report the total frame rate to keep real time for now.
+ // Remove this after refactoring duration logic.
+ encoder->SetRates(RateControlParameters(
+ bitrate_allocation,
+ static_cast<uint32_t>(encoders_.size() * parameters.framerate_fps),
+ parameters.bandwidth_allocation -
+ DataRate::BitsPerSec(augmenting_data_size_)));
+ }
+}
+
+void MultiplexEncoderAdapter::OnPacketLossRateUpdate(float packet_loss_rate) {
+ for (auto& encoder : encoders_) {
+ encoder->OnPacketLossRateUpdate(packet_loss_rate);
+ }
+}
+
+void MultiplexEncoderAdapter::OnRttUpdate(int64_t rtt_ms) {
+ for (auto& encoder : encoders_) {
+ encoder->OnRttUpdate(rtt_ms);
+ }
+}
+
+void MultiplexEncoderAdapter::OnLossNotification(
+ const LossNotification& loss_notification) {
+ for (auto& encoder : encoders_) {
+ encoder->OnLossNotification(loss_notification);
+ }
+}
+
+int MultiplexEncoderAdapter::Release() {
+ for (auto& encoder : encoders_) {
+ const int rv = encoder->Release();
+ if (rv)
+ return rv;
+ }
+ encoders_.clear();
+ adapter_callbacks_.clear();
+ MutexLock lock(&mutex_);
+ stashed_images_.clear();
+
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+VideoEncoder::EncoderInfo MultiplexEncoderAdapter::GetEncoderInfo() const {
+ return encoder_info_;
+}
+
+EncodedImageCallback::Result MultiplexEncoderAdapter::OnEncodedImage(
+ AlphaCodecStream stream_idx,
+ const EncodedImage& encodedImage,
+ const CodecSpecificInfo* codecSpecificInfo) {
+ // Save the image
+ MultiplexImageComponent image_component;
+ image_component.component_index = stream_idx;
+ image_component.codec_type =
+ PayloadStringToCodecType(associated_format_.name);
+ image_component.encoded_image = encodedImage;
+
+ MutexLock lock(&mutex_);
+ const auto& stashed_image_itr =
+ stashed_images_.find(encodedImage.Timestamp());
+ const auto& stashed_image_next_itr = std::next(stashed_image_itr, 1);
+ RTC_DCHECK(stashed_image_itr != stashed_images_.end());
+ MultiplexImage& stashed_image = stashed_image_itr->second;
+ const uint8_t frame_count = stashed_image.component_count;
+
+ stashed_image.image_components.push_back(image_component);
+
+ if (stashed_image.image_components.size() == frame_count) {
+ // Complete case
+ for (auto iter = stashed_images_.begin();
+ iter != stashed_images_.end() && iter != stashed_image_next_itr;
+ iter++) {
+ // No image at all, skip.
+ if (iter->second.image_components.size() == 0)
+ continue;
+
+ // We have to send out those stashed frames, otherwise the delta frame
+ // dependency chain is broken.
+ combined_image_ =
+ MultiplexEncodedImagePacker::PackAndRelease(iter->second);
+
+ CodecSpecificInfo codec_info = *codecSpecificInfo;
+ codec_info.codecType = kVideoCodecMultiplex;
+ encoded_complete_callback_->OnEncodedImage(combined_image_, &codec_info);
+ }
+
+ stashed_images_.erase(stashed_images_.begin(), stashed_image_next_itr);
+ }
+ return EncodedImageCallback::Result(EncodedImageCallback::Result::OK);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/multiplex/test/multiplex_adapter_unittest.cc b/third_party/libwebrtc/modules/video_coding/codecs/multiplex/test/multiplex_adapter_unittest.cc
new file mode 100644
index 0000000000..be0f5deb52
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/multiplex/test/multiplex_adapter_unittest.cc
@@ -0,0 +1,319 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <stddef.h>
+
+#include <cstdint>
+#include <memory>
+#include <utility>
+#include <vector>
+
+#include "absl/types/optional.h"
+#include "api/scoped_refptr.h"
+#include "api/test/mock_video_decoder_factory.h"
+#include "api/test/mock_video_encoder_factory.h"
+#include "api/video/encoded_image.h"
+#include "api/video/video_frame.h"
+#include "api/video/video_frame_buffer.h"
+#include "api/video/video_rotation.h"
+#include "api/video_codecs/sdp_video_format.h"
+#include "api/video_codecs/video_codec.h"
+#include "api/video_codecs/video_decoder.h"
+#include "api/video_codecs/video_encoder.h"
+#include "common_video/include/video_frame_buffer.h"
+#include "common_video/libyuv/include/webrtc_libyuv.h"
+#include "media/base/media_constants.h"
+#include "modules/video_coding/codecs/multiplex/include/augmented_video_frame_buffer.h"
+#include "modules/video_coding/codecs/multiplex/include/multiplex_decoder_adapter.h"
+#include "modules/video_coding/codecs/multiplex/include/multiplex_encoder_adapter.h"
+#include "modules/video_coding/codecs/multiplex/multiplex_encoded_image_packer.h"
+#include "modules/video_coding/codecs/test/video_codec_unittest.h"
+#include "modules/video_coding/codecs/vp9/include/vp9.h"
+#include "modules/video_coding/include/video_codec_interface.h"
+#include "modules/video_coding/include/video_error_codes.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+#include "test/video_codec_settings.h"
+
+using ::testing::_;
+using ::testing::Return;
+
+namespace webrtc {
+
+constexpr const char* kMultiplexAssociatedCodecName = cricket::kVp9CodecName;
+const VideoCodecType kMultiplexAssociatedCodecType =
+ PayloadStringToCodecType(kMultiplexAssociatedCodecName);
+
+class TestMultiplexAdapter : public VideoCodecUnitTest,
+ public ::testing::WithParamInterface<
+ bool /* supports_augmenting_data */> {
+ public:
+ TestMultiplexAdapter()
+ : decoder_factory_(new webrtc::MockVideoDecoderFactory),
+ encoder_factory_(new webrtc::MockVideoEncoderFactory),
+ supports_augmenting_data_(GetParam()) {}
+
+ protected:
+ std::unique_ptr<VideoDecoder> CreateDecoder() override {
+ return std::make_unique<MultiplexDecoderAdapter>(
+ decoder_factory_.get(), SdpVideoFormat(kMultiplexAssociatedCodecName),
+ supports_augmenting_data_);
+ }
+
+ std::unique_ptr<VideoEncoder> CreateEncoder() override {
+ return std::make_unique<MultiplexEncoderAdapter>(
+ encoder_factory_.get(), SdpVideoFormat(kMultiplexAssociatedCodecName),
+ supports_augmenting_data_);
+ }
+
+ void ModifyCodecSettings(VideoCodec* codec_settings) override {
+ webrtc::test::CodecSettings(kMultiplexAssociatedCodecType, codec_settings);
+ codec_settings->VP9()->numberOfTemporalLayers = 1;
+ codec_settings->VP9()->numberOfSpatialLayers = 1;
+ codec_settings->codecType = webrtc::kVideoCodecMultiplex;
+ }
+
+ std::unique_ptr<VideoFrame> CreateDataAugmentedInputFrame(
+ VideoFrame* video_frame) {
+ rtc::scoped_refptr<VideoFrameBuffer> video_buffer =
+ video_frame->video_frame_buffer();
+ std::unique_ptr<uint8_t[]> data =
+ std::unique_ptr<uint8_t[]>(new uint8_t[16]);
+ for (int i = 0; i < 16; i++) {
+ data[i] = i;
+ }
+ auto augmented_video_frame_buffer =
+ rtc::make_ref_counted<AugmentedVideoFrameBuffer>(video_buffer,
+ std::move(data), 16);
+ return std::make_unique<VideoFrame>(
+ VideoFrame::Builder()
+ .set_video_frame_buffer(augmented_video_frame_buffer)
+ .set_timestamp_rtp(video_frame->timestamp())
+ .set_timestamp_ms(video_frame->render_time_ms())
+ .set_rotation(video_frame->rotation())
+ .set_id(video_frame->id())
+ .build());
+ }
+
+ std::unique_ptr<VideoFrame> CreateI420AInputFrame() {
+ VideoFrame input_frame = NextInputFrame();
+ rtc::scoped_refptr<webrtc::I420BufferInterface> yuv_buffer =
+ input_frame.video_frame_buffer()->ToI420();
+ rtc::scoped_refptr<I420ABufferInterface> yuva_buffer = WrapI420ABuffer(
+ yuv_buffer->width(), yuv_buffer->height(), yuv_buffer->DataY(),
+ yuv_buffer->StrideY(), yuv_buffer->DataU(), yuv_buffer->StrideU(),
+ yuv_buffer->DataV(), yuv_buffer->StrideV(), yuv_buffer->DataY(),
+ yuv_buffer->StrideY(),
+ // To keep reference alive.
+ [yuv_buffer] {});
+ return std::make_unique<VideoFrame>(VideoFrame::Builder()
+ .set_video_frame_buffer(yuva_buffer)
+ .set_timestamp_rtp(123)
+ .set_timestamp_ms(345)
+ .set_rotation(kVideoRotation_0)
+ .build());
+ }
+
+ std::unique_ptr<VideoFrame> CreateInputFrame(bool contains_alpha) {
+ std::unique_ptr<VideoFrame> video_frame;
+ if (contains_alpha) {
+ video_frame = CreateI420AInputFrame();
+ } else {
+ VideoFrame next_frame = NextInputFrame();
+ video_frame = std::make_unique<VideoFrame>(
+ VideoFrame::Builder()
+ .set_video_frame_buffer(next_frame.video_frame_buffer())
+ .set_timestamp_rtp(next_frame.timestamp())
+ .set_timestamp_ms(next_frame.render_time_ms())
+ .set_rotation(next_frame.rotation())
+ .set_id(next_frame.id())
+ .build());
+ }
+ if (supports_augmenting_data_) {
+ video_frame = CreateDataAugmentedInputFrame(video_frame.get());
+ }
+
+ return video_frame;
+ }
+
+ void CheckData(rtc::scoped_refptr<VideoFrameBuffer> video_frame_buffer) {
+ if (!supports_augmenting_data_) {
+ return;
+ }
+ AugmentedVideoFrameBuffer* augmented_buffer =
+ static_cast<AugmentedVideoFrameBuffer*>(video_frame_buffer.get());
+ EXPECT_EQ(augmented_buffer->GetAugmentingDataSize(), 16);
+ uint8_t* data = augmented_buffer->GetAugmentingData();
+ for (int i = 0; i < 16; i++) {
+ EXPECT_EQ(data[i], i);
+ }
+ }
+
+ std::unique_ptr<VideoFrame> ExtractAXXFrame(const VideoFrame& video_frame) {
+ rtc::scoped_refptr<VideoFrameBuffer> video_frame_buffer =
+ video_frame.video_frame_buffer();
+ if (supports_augmenting_data_) {
+ AugmentedVideoFrameBuffer* augmentedBuffer =
+ static_cast<AugmentedVideoFrameBuffer*>(video_frame_buffer.get());
+ video_frame_buffer = augmentedBuffer->GetVideoFrameBuffer();
+ }
+ const I420ABufferInterface* yuva_buffer = video_frame_buffer->GetI420A();
+ rtc::scoped_refptr<I420BufferInterface> axx_buffer = WrapI420Buffer(
+ yuva_buffer->width(), yuva_buffer->height(), yuva_buffer->DataA(),
+ yuva_buffer->StrideA(), yuva_buffer->DataU(), yuva_buffer->StrideU(),
+ yuva_buffer->DataV(), yuva_buffer->StrideV(), [video_frame_buffer] {});
+ return std::make_unique<VideoFrame>(VideoFrame::Builder()
+ .set_video_frame_buffer(axx_buffer)
+ .set_timestamp_rtp(123)
+ .set_timestamp_ms(345)
+ .set_rotation(kVideoRotation_0)
+ .build());
+ }
+
+ private:
+ void SetUp() override {
+ EXPECT_CALL(*decoder_factory_, Die);
+ // The decoders/encoders will be owned by the caller of
+ // CreateVideoDecoder()/CreateVideoEncoder().
+ EXPECT_CALL(*decoder_factory_, CreateVideoDecoder)
+ .Times(2)
+ .WillRepeatedly([] { return VP9Decoder::Create(); });
+
+ EXPECT_CALL(*encoder_factory_, Die);
+ EXPECT_CALL(*encoder_factory_, CreateVideoEncoder)
+ .Times(2)
+ .WillRepeatedly([] { return VP9Encoder::Create(); });
+
+ VideoCodecUnitTest::SetUp();
+ }
+
+ const std::unique_ptr<webrtc::MockVideoDecoderFactory> decoder_factory_;
+ const std::unique_ptr<webrtc::MockVideoEncoderFactory> encoder_factory_;
+ const bool supports_augmenting_data_;
+};
+
+// TODO(emircan): Currently VideoCodecUnitTest tests do a complete setup
+// step that goes beyond constructing `decoder_`. Simplify these tests to do
+// less.
+TEST_P(TestMultiplexAdapter, ConstructAndDestructDecoder) {
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder_->Release());
+}
+
+TEST_P(TestMultiplexAdapter, ConstructAndDestructEncoder) {
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Release());
+}
+
+TEST_P(TestMultiplexAdapter, EncodeDecodeI420Frame) {
+ std::unique_ptr<VideoFrame> input_frame = CreateInputFrame(false);
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Encode(*input_frame, nullptr));
+ EncodedImage encoded_frame;
+ CodecSpecificInfo codec_specific_info;
+ ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
+ EXPECT_EQ(kVideoCodecMultiplex, codec_specific_info.codecType);
+
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder_->Decode(encoded_frame, false, -1));
+ std::unique_ptr<VideoFrame> decoded_frame;
+ absl::optional<uint8_t> decoded_qp;
+ ASSERT_TRUE(WaitForDecodedFrame(&decoded_frame, &decoded_qp));
+ ASSERT_TRUE(decoded_frame);
+ EXPECT_GT(I420PSNR(input_frame.get(), decoded_frame.get()), 36);
+ CheckData(decoded_frame->video_frame_buffer());
+}
+
+TEST_P(TestMultiplexAdapter, EncodeDecodeI420AFrame) {
+ std::unique_ptr<VideoFrame> yuva_frame = CreateInputFrame(true);
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Encode(*yuva_frame, nullptr));
+ EncodedImage encoded_frame;
+ CodecSpecificInfo codec_specific_info;
+ ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
+ EXPECT_EQ(kVideoCodecMultiplex, codec_specific_info.codecType);
+
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder_->Decode(encoded_frame, false, 0));
+ std::unique_ptr<VideoFrame> decoded_frame;
+ absl::optional<uint8_t> decoded_qp;
+ ASSERT_TRUE(WaitForDecodedFrame(&decoded_frame, &decoded_qp));
+ ASSERT_TRUE(decoded_frame);
+ EXPECT_GT(I420PSNR(yuva_frame.get(), decoded_frame.get()), 36);
+
+ // Find PSNR for AXX bits.
+ std::unique_ptr<VideoFrame> input_axx_frame = ExtractAXXFrame(*yuva_frame);
+ std::unique_ptr<VideoFrame> output_axx_frame =
+ ExtractAXXFrame(*decoded_frame);
+ EXPECT_GT(I420PSNR(input_axx_frame.get(), output_axx_frame.get()), 47);
+
+ CheckData(decoded_frame->video_frame_buffer());
+}
+
+TEST_P(TestMultiplexAdapter, CheckSingleFrameEncodedBitstream) {
+ std::unique_ptr<VideoFrame> input_frame = CreateInputFrame(false);
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Encode(*input_frame, nullptr));
+ EncodedImage encoded_frame;
+ CodecSpecificInfo codec_specific_info;
+ ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
+ EXPECT_EQ(kVideoCodecMultiplex, codec_specific_info.codecType);
+ EXPECT_FALSE(encoded_frame.SpatialIndex());
+
+ const MultiplexImage& unpacked_frame =
+ MultiplexEncodedImagePacker::Unpack(encoded_frame);
+ EXPECT_EQ(0, unpacked_frame.image_index);
+ EXPECT_EQ(1, unpacked_frame.component_count);
+ const MultiplexImageComponent& component = unpacked_frame.image_components[0];
+ EXPECT_EQ(0, component.component_index);
+ EXPECT_NE(nullptr, component.encoded_image.data());
+ EXPECT_EQ(VideoFrameType::kVideoFrameKey, component.encoded_image._frameType);
+}
+
+TEST_P(TestMultiplexAdapter, CheckDoubleFramesEncodedBitstream) {
+ std::unique_ptr<VideoFrame> yuva_frame = CreateInputFrame(true);
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Encode(*yuva_frame, nullptr));
+ EncodedImage encoded_frame;
+ CodecSpecificInfo codec_specific_info;
+ ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
+ EXPECT_EQ(kVideoCodecMultiplex, codec_specific_info.codecType);
+ EXPECT_FALSE(encoded_frame.SpatialIndex());
+
+ const MultiplexImage& unpacked_frame =
+ MultiplexEncodedImagePacker::Unpack(encoded_frame);
+ EXPECT_EQ(0, unpacked_frame.image_index);
+ EXPECT_EQ(2, unpacked_frame.component_count);
+ EXPECT_EQ(unpacked_frame.image_components.size(),
+ unpacked_frame.component_count);
+ for (int i = 0; i < unpacked_frame.component_count; ++i) {
+ const MultiplexImageComponent& component =
+ unpacked_frame.image_components[i];
+ EXPECT_EQ(i, component.component_index);
+ EXPECT_NE(nullptr, component.encoded_image.data());
+ EXPECT_EQ(VideoFrameType::kVideoFrameKey,
+ component.encoded_image._frameType);
+ }
+}
+
+TEST_P(TestMultiplexAdapter, ImageIndexIncreases) {
+ std::unique_ptr<VideoFrame> yuva_frame = CreateInputFrame(true);
+ const size_t expected_num_encoded_frames = 3;
+ for (size_t i = 0; i < expected_num_encoded_frames; ++i) {
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Encode(*yuva_frame, nullptr));
+ EncodedImage encoded_frame;
+ CodecSpecificInfo codec_specific_info;
+ ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
+ const MultiplexImage& unpacked_frame =
+ MultiplexEncodedImagePacker::Unpack(encoded_frame);
+ EXPECT_EQ(i, unpacked_frame.image_index);
+ EXPECT_EQ(
+ i ? VideoFrameType::kVideoFrameDelta : VideoFrameType::kVideoFrameKey,
+ encoded_frame._frameType);
+ }
+}
+
+INSTANTIATE_TEST_SUITE_P(TestMultiplexAdapter,
+ TestMultiplexAdapter,
+ ::testing::Bool());
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/test/android_codec_factory_helper.cc b/third_party/libwebrtc/modules/video_coding/codecs/test/android_codec_factory_helper.cc
new file mode 100644
index 0000000000..d1be684cbb
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/test/android_codec_factory_helper.cc
@@ -0,0 +1,78 @@
+/*
+ * Copyright 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/codecs/test/android_codec_factory_helper.h"
+
+#include <jni.h>
+#include <pthread.h>
+#include <stddef.h>
+
+#include <memory>
+
+#include "modules/utility/include/jvm_android.h"
+#include "rtc_base/checks.h"
+#include "sdk/android/native_api/codecs/wrapper.h"
+#include "sdk/android/native_api/jni/class_loader.h"
+#include "sdk/android/native_api/jni/jvm.h"
+#include "sdk/android/native_api/jni/scoped_java_ref.h"
+#include "sdk/android/src/jni/jvm.h"
+
+namespace webrtc {
+namespace test {
+
+namespace {
+
+static pthread_once_t g_initialize_once = PTHREAD_ONCE_INIT;
+
+void EnsureInitializedOnce() {
+ RTC_CHECK(::webrtc::jni::GetJVM() != nullptr);
+
+ JNIEnv* jni = ::webrtc::jni::AttachCurrentThreadIfNeeded();
+ JavaVM* jvm = NULL;
+ RTC_CHECK_EQ(0, jni->GetJavaVM(&jvm));
+
+ // Initialize the Java environment (currently only used by the audio manager).
+ webrtc::JVM::Initialize(jvm);
+}
+
+} // namespace
+
+void InitializeAndroidObjects() {
+ RTC_CHECK_EQ(0, pthread_once(&g_initialize_once, &EnsureInitializedOnce));
+}
+
+std::unique_ptr<VideoEncoderFactory> CreateAndroidEncoderFactory() {
+ JNIEnv* env = AttachCurrentThreadIfNeeded();
+ ScopedJavaLocalRef<jclass> factory_class =
+ GetClass(env, "org/webrtc/HardwareVideoEncoderFactory");
+ jmethodID factory_constructor = env->GetMethodID(
+ factory_class.obj(), "<init>", "(Lorg/webrtc/EglBase$Context;ZZ)V");
+ ScopedJavaLocalRef<jobject> factory_object(
+ env, env->NewObject(factory_class.obj(), factory_constructor,
+ nullptr /* shared_context */,
+ false /* enable_intel_vp8_encoder */,
+ true /* enable_h264_high_profile */));
+ return JavaToNativeVideoEncoderFactory(env, factory_object.obj());
+}
+
+std::unique_ptr<VideoDecoderFactory> CreateAndroidDecoderFactory() {
+ JNIEnv* env = AttachCurrentThreadIfNeeded();
+ ScopedJavaLocalRef<jclass> factory_class =
+ GetClass(env, "org/webrtc/HardwareVideoDecoderFactory");
+ jmethodID factory_constructor = env->GetMethodID(
+ factory_class.obj(), "<init>", "(Lorg/webrtc/EglBase$Context;)V");
+ ScopedJavaLocalRef<jobject> factory_object(
+ env, env->NewObject(factory_class.obj(), factory_constructor,
+ nullptr /* shared_context */));
+ return JavaToNativeVideoDecoderFactory(env, factory_object.obj());
+}
+
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/test/android_codec_factory_helper.h b/third_party/libwebrtc/modules/video_coding/codecs/test/android_codec_factory_helper.h
new file mode 100644
index 0000000000..ad9cf35162
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/test/android_codec_factory_helper.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_CODECS_TEST_ANDROID_CODEC_FACTORY_HELPER_H_
+#define MODULES_VIDEO_CODING_CODECS_TEST_ANDROID_CODEC_FACTORY_HELPER_H_
+
+#include <memory>
+
+#include "api/video_codecs/video_decoder_factory.h"
+#include "api/video_codecs/video_encoder_factory.h"
+
+namespace webrtc {
+namespace test {
+
+void InitializeAndroidObjects();
+
+std::unique_ptr<VideoEncoderFactory> CreateAndroidEncoderFactory();
+std::unique_ptr<VideoDecoderFactory> CreateAndroidDecoderFactory();
+
+} // namespace test
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_CODECS_TEST_ANDROID_CODEC_FACTORY_HELPER_H_
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/test/batch/empty-runtime-deps b/third_party/libwebrtc/modules/video_coding/codecs/test/batch/empty-runtime-deps
new file mode 100644
index 0000000000..6702195ca9
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/test/batch/empty-runtime-deps
@@ -0,0 +1 @@
+does-not-exist
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/test/batch/run-instantiation-tests.sh b/third_party/libwebrtc/modules/video_coding/codecs/test/batch/run-instantiation-tests.sh
new file mode 100755
index 0000000000..28083b1808
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/test/batch/run-instantiation-tests.sh
@@ -0,0 +1,56 @@
+#!/bin/bash
+
+# Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+if [ $# -ne 1 ]; then
+ echo "Usage: run-instantiation-tests.sh ADB-DEVICE-ID"
+ exit 1
+fi
+
+# Paths: update these based on your git checkout and gn output folder names.
+WEBRTC_DIR=$HOME/src/webrtc/src
+BUILD_DIR=$WEBRTC_DIR/out/Android_Release
+
+# Other settings.
+ADB=`which adb`
+SERIAL=$1
+TIMEOUT=7200
+
+# Ensure we are using the latest version.
+ninja -C $BUILD_DIR modules_tests
+
+# Transfer the required files by trying to run a test that doesn't exist.
+echo "===> Transferring required resources to device $1."
+$WEBRTC_DIR/build/android/test_runner.py gtest \
+ --output-directory $BUILD_DIR \
+ --suite modules_tests \
+ --gtest_filter "DoesNotExist" \
+ --shard-timeout $TIMEOUT \
+ --runtime-deps-path $BUILD_DIR/gen.runtime/modules/modules_tests__test_runner_script.runtime_deps \
+ --adb-path $ADB \
+ --device $SERIAL \
+ --verbose
+
+# Run all tests as separate test invocations.
+mkdir $SERIAL
+pushd $SERIAL
+$WEBRTC_DIR/build/android/test_runner.py gtest \
+ --output-directory $BUILD_DIR \
+ --suite modules_tests \
+ --gtest_filter "*InstantiationTest*" \
+ --gtest_also_run_disabled_tests \
+ --shard-timeout $TIMEOUT \
+ --runtime-deps-path ../empty-runtime-deps \
+ --test-launcher-retry-limit 0 \
+ --adb-path $ADB \
+ --device $SERIAL \
+ --verbose \
+ --num-retries 0 \
+ 2>&1 | tee -a instantiation-tests.log
+popd
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/test/batch/run-videoprocessor-tests.sh b/third_party/libwebrtc/modules/video_coding/codecs/test/batch/run-videoprocessor-tests.sh
new file mode 100755
index 0000000000..25c971ba61
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/test/batch/run-videoprocessor-tests.sh
@@ -0,0 +1,70 @@
+#!/bin/bash
+
+# Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+if [ $# -ne 1 ]; then
+ echo "Usage: run.sh ADB-DEVICE-ID"
+ exit 1
+fi
+
+# Paths: update these based on your git checkout and gn output folder names.
+WEBRTC_DIR=$HOME/src/webrtc/src
+BUILD_DIR=$WEBRTC_DIR/out/Android_Release
+
+# Clips: update these to encode/decode other content.
+CLIPS=('Foreman')
+RESOLUTIONS=('128x96' '160x120' '176x144' '320x240' '352x288')
+FRAMERATES=(30)
+
+# Other settings.
+ADB=`which adb`
+SERIAL=$1
+TIMEOUT=7200
+
+# Ensure we are using the latest version.
+ninja -C $BUILD_DIR modules_tests
+
+# Transfer the required files by trying to run a test that doesn't exist.
+echo "===> Transferring required resources to device $1."
+$WEBRTC_DIR/build/android/test_runner.py gtest \
+ --output-directory $BUILD_DIR \
+ --suite modules_tests \
+ --gtest_filter "DoesNotExist" \
+ --shard-timeout $TIMEOUT \
+ --runtime-deps-path $BUILD_DIR/gen.runtime/modules/modules_tests__test_runner_script.runtime_deps \
+ --adb-path $ADB \
+ --device $SERIAL \
+ --verbose
+
+# Run all tests as separate test invocations.
+mkdir $SERIAL
+pushd $SERIAL
+for clip in "${CLIPS[@]}"; do
+ for resolution in "${RESOLUTIONS[@]}"; do
+ for framerate in "${FRAMERATES[@]}"; do
+ test_name="${clip}_${resolution}_${framerate}"
+ log_name="${test_name}.log"
+
+ echo "===> Running ${test_name} on device $1."
+
+ $WEBRTC_DIR/build/android/test_runner.py gtest \
+ --output-directory $BUILD_DIR \
+ --suite modules_tests \
+ --gtest_filter "CodecSettings/*${test_name}*" \
+ --shard-timeout $TIMEOUT \
+ --runtime-deps-path ../empty-runtime-deps \
+ --test-launcher-retry-limit 0 \
+ --adb-path $ADB \
+ --device $SERIAL \
+ --verbose \
+ 2>&1 | tee -a ${log_name}
+ done
+ done
+done
+popd
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/test/encoded_video_frame_producer.cc b/third_party/libwebrtc/modules/video_coding/codecs/test/encoded_video_frame_producer.cc
new file mode 100644
index 0000000000..899826eee4
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/test/encoded_video_frame_producer.cc
@@ -0,0 +1,77 @@
+/*
+ * Copyright 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/codecs/test/encoded_video_frame_producer.h"
+
+#include <memory>
+#include <vector>
+
+#include "api/test/create_frame_generator.h"
+#include "api/test/frame_generator_interface.h"
+#include "api/transport/rtp/dependency_descriptor.h"
+#include "api/video/video_frame.h"
+#include "api/video/video_frame_type.h"
+#include "api/video_codecs/video_encoder.h"
+#include "modules/video_coding/include/video_codec_interface.h"
+#include "modules/video_coding/include/video_error_codes.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+namespace {
+
+class EncoderCallback : public EncodedImageCallback {
+ public:
+ explicit EncoderCallback(
+ std::vector<EncodedVideoFrameProducer::EncodedFrame>& output_frames)
+ : output_frames_(output_frames) {}
+
+ private:
+ Result OnEncodedImage(const EncodedImage& encoded_image,
+ const CodecSpecificInfo* codec_specific_info) override {
+ output_frames_.push_back({encoded_image, *codec_specific_info});
+ return Result(Result::Error::OK);
+ }
+
+ std::vector<EncodedVideoFrameProducer::EncodedFrame>& output_frames_;
+};
+
+} // namespace
+
+std::vector<EncodedVideoFrameProducer::EncodedFrame>
+EncodedVideoFrameProducer::Encode() {
+ std::unique_ptr<test::FrameGeneratorInterface> frame_buffer_generator =
+ test::CreateSquareFrameGenerator(
+ resolution_.Width(), resolution_.Height(),
+ test::FrameGeneratorInterface::OutputType::kI420, absl::nullopt);
+
+ std::vector<EncodedFrame> encoded_frames;
+ EncoderCallback encoder_callback(encoded_frames);
+ RTC_CHECK_EQ(encoder_.RegisterEncodeCompleteCallback(&encoder_callback),
+ WEBRTC_VIDEO_CODEC_OK);
+
+ uint32_t rtp_tick = 90000 / framerate_fps_;
+ for (int i = 0; i < num_input_frames_; ++i) {
+ VideoFrame frame =
+ VideoFrame::Builder()
+ .set_video_frame_buffer(frame_buffer_generator->NextFrame().buffer)
+ .set_timestamp_rtp(rtp_timestamp_)
+ .build();
+ rtp_timestamp_ += rtp_tick;
+ RTC_CHECK_EQ(encoder_.Encode(frame, &next_frame_type_),
+ WEBRTC_VIDEO_CODEC_OK);
+ next_frame_type_[0] = VideoFrameType::kVideoFrameDelta;
+ }
+
+ RTC_CHECK_EQ(encoder_.RegisterEncodeCompleteCallback(nullptr),
+ WEBRTC_VIDEO_CODEC_OK);
+ return encoded_frames;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/test/encoded_video_frame_producer.h b/third_party/libwebrtc/modules/video_coding/codecs/test/encoded_video_frame_producer.h
new file mode 100644
index 0000000000..04f4a64950
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/test/encoded_video_frame_producer.h
@@ -0,0 +1,100 @@
+/*
+ * Copyright 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_CODECS_TEST_ENCODED_VIDEO_FRAME_PRODUCER_H_
+#define MODULES_VIDEO_CODING_CODECS_TEST_ENCODED_VIDEO_FRAME_PRODUCER_H_
+
+#include <stdint.h>
+
+#include <vector>
+
+#include "api/transport/rtp/dependency_descriptor.h"
+#include "api/video/encoded_image.h"
+#include "api/video_codecs/video_encoder.h"
+#include "modules/video_coding/include/video_codec_interface.h"
+
+namespace webrtc {
+
+// Wrapper around VideoEncoder::Encode for convenient input (generates frames)
+// and output (returns encoded frames instead of passing them to callback)
+class EncodedVideoFrameProducer {
+ public:
+ struct EncodedFrame {
+ EncodedImage encoded_image;
+ CodecSpecificInfo codec_specific_info;
+ };
+
+ // `encoder` should be initialized, but shouldn't have `EncoderCallback` set.
+ explicit EncodedVideoFrameProducer(VideoEncoder& encoder)
+ : encoder_(encoder) {}
+ EncodedVideoFrameProducer(const EncodedVideoFrameProducer&) = delete;
+ EncodedVideoFrameProducer& operator=(const EncodedVideoFrameProducer&) =
+ delete;
+
+ // Number of the input frames to pass to the encoder.
+ EncodedVideoFrameProducer& SetNumInputFrames(int value);
+ // Encode next frame as key frame.
+ EncodedVideoFrameProducer& ForceKeyFrame();
+ // Resolution of the input frames.
+ EncodedVideoFrameProducer& SetResolution(RenderResolution value);
+
+ EncodedVideoFrameProducer& SetFramerateFps(int value);
+
+ EncodedVideoFrameProducer& SetRtpTimestamp(uint32_t value);
+
+ // Generates input video frames and encodes them with `encoder` provided in
+ // the constructor. Returns frame passed to the `OnEncodedImage` by wraping
+ // `EncodedImageCallback` underneath.
+ std::vector<EncodedFrame> Encode();
+
+ private:
+ VideoEncoder& encoder_;
+
+ uint32_t rtp_timestamp_ = 1000;
+ int num_input_frames_ = 1;
+ int framerate_fps_ = 30;
+ RenderResolution resolution_ = {320, 180};
+ std::vector<VideoFrameType> next_frame_type_ = {
+ VideoFrameType::kVideoFrameKey};
+};
+
+inline EncodedVideoFrameProducer& EncodedVideoFrameProducer::SetNumInputFrames(
+ int value) {
+ RTC_DCHECK_GT(value, 0);
+ num_input_frames_ = value;
+ return *this;
+}
+
+inline EncodedVideoFrameProducer& EncodedVideoFrameProducer::ForceKeyFrame() {
+ next_frame_type_ = {VideoFrameType::kVideoFrameKey};
+ return *this;
+}
+
+inline EncodedVideoFrameProducer& EncodedVideoFrameProducer::SetResolution(
+ RenderResolution value) {
+ resolution_ = value;
+ return *this;
+}
+
+inline EncodedVideoFrameProducer& EncodedVideoFrameProducer::SetFramerateFps(
+ int value) {
+ RTC_DCHECK_GT(value, 0);
+ framerate_fps_ = value;
+ return *this;
+}
+
+inline EncodedVideoFrameProducer& EncodedVideoFrameProducer::SetRtpTimestamp(
+ uint32_t value) {
+ rtp_timestamp_ = value;
+ return *this;
+}
+
+} // namespace webrtc
+#endif // MODULES_VIDEO_CODING_CODECS_TEST_ENCODED_VIDEO_FRAME_PRODUCER_H_
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/test/objc_codec_factory_helper.h b/third_party/libwebrtc/modules/video_coding/codecs/test/objc_codec_factory_helper.h
new file mode 100644
index 0000000000..475d0fdd08
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/test/objc_codec_factory_helper.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_CODECS_TEST_OBJC_CODEC_FACTORY_HELPER_H_
+#define MODULES_VIDEO_CODING_CODECS_TEST_OBJC_CODEC_FACTORY_HELPER_H_
+
+#include <memory>
+
+#include "api/video_codecs/video_decoder_factory.h"
+#include "api/video_codecs/video_encoder_factory.h"
+
+namespace webrtc {
+namespace test {
+
+std::unique_ptr<VideoEncoderFactory> CreateObjCEncoderFactory();
+std::unique_ptr<VideoDecoderFactory> CreateObjCDecoderFactory();
+
+} // namespace test
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_CODECS_TEST_OBJC_CODEC_FACTORY_HELPER_H_
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/test/objc_codec_factory_helper.mm b/third_party/libwebrtc/modules/video_coding/codecs/test/objc_codec_factory_helper.mm
new file mode 100644
index 0000000000..ed82376251
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/test/objc_codec_factory_helper.mm
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/codecs/test/objc_codec_factory_helper.h"
+
+#import "sdk/objc/components/video_codec/RTCVideoDecoderFactoryH264.h"
+#import "sdk/objc/components/video_codec/RTCVideoEncoderFactoryH264.h"
+#include "sdk/objc/native/api/video_decoder_factory.h"
+#include "sdk/objc/native/api/video_encoder_factory.h"
+
+namespace webrtc {
+namespace test {
+
+std::unique_ptr<VideoEncoderFactory> CreateObjCEncoderFactory() {
+ return ObjCToNativeVideoEncoderFactory([[RTC_OBJC_TYPE(RTCVideoEncoderFactoryH264) alloc] init]);
+}
+
+std::unique_ptr<VideoDecoderFactory> CreateObjCDecoderFactory() {
+ return ObjCToNativeVideoDecoderFactory([[RTC_OBJC_TYPE(RTCVideoDecoderFactoryH264) alloc] init]);
+}
+
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/test/plot_webrtc_test_logs.py b/third_party/libwebrtc/modules/video_coding/codecs/test/plot_webrtc_test_logs.py
new file mode 100755
index 0000000000..29e2d6f65a
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/test/plot_webrtc_test_logs.py
@@ -0,0 +1,438 @@
+# Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+"""Plots statistics from WebRTC integration test logs.
+
+Usage: $ python plot_webrtc_test_logs.py filename.txt
+"""
+
+import numpy
+import sys
+import re
+
+import matplotlib.pyplot as plt
+
+# Log events.
+EVENT_START = 'RUN ] CodecSettings/VideoCodecTestParameterized.'
+EVENT_END = 'OK ] CodecSettings/VideoCodecTestParameterized.'
+
+# Metrics to plot, tuple: (name to parse in file, label to use when plotting).
+WIDTH = ('width', 'width')
+HEIGHT = ('height', 'height')
+FILENAME = ('filename', 'clip')
+CODEC_TYPE = ('codec_type', 'Codec')
+ENCODER_IMPLEMENTATION_NAME = ('enc_impl_name', 'enc name')
+DECODER_IMPLEMENTATION_NAME = ('dec_impl_name', 'dec name')
+CODEC_IMPLEMENTATION_NAME = ('codec_impl_name', 'codec name')
+CORES = ('num_cores', 'CPU cores used')
+DENOISING = ('denoising', 'denoising')
+RESILIENCE = ('resilience', 'resilience')
+ERROR_CONCEALMENT = ('error_concealment', 'error concealment')
+CPU_USAGE = ('cpu_usage_percent', 'CPU usage (%)')
+BITRATE = ('target_bitrate_kbps', 'target bitrate (kbps)')
+FRAMERATE = ('input_framerate_fps', 'fps')
+QP = ('avg_qp', 'QP avg')
+PSNR = ('avg_psnr', 'PSNR (dB)')
+SSIM = ('avg_ssim', 'SSIM')
+ENC_BITRATE = ('bitrate_kbps', 'encoded bitrate (kbps)')
+NUM_FRAMES = ('num_input_frames', 'num frames')
+NUM_DROPPED_FRAMES = ('num_dropped_frames', 'num dropped frames')
+TIME_TO_TARGET = ('time_to_reach_target_bitrate_sec',
+ 'time to reach target rate (sec)')
+ENCODE_SPEED_FPS = ('enc_speed_fps', 'encode speed (fps)')
+DECODE_SPEED_FPS = ('dec_speed_fps', 'decode speed (fps)')
+AVG_KEY_FRAME_SIZE = ('avg_key_frame_size_bytes', 'avg key frame size (bytes)')
+AVG_DELTA_FRAME_SIZE = ('avg_delta_frame_size_bytes',
+ 'avg delta frame size (bytes)')
+
+# Settings.
+SETTINGS = [
+ WIDTH,
+ HEIGHT,
+ FILENAME,
+ NUM_FRAMES,
+]
+
+# Settings, options for x-axis.
+X_SETTINGS = [
+ CORES,
+ FRAMERATE,
+ DENOISING,
+ RESILIENCE,
+ ERROR_CONCEALMENT,
+ BITRATE, # TODO(asapersson): Needs to be last.
+]
+
+# Settings, options for subplots.
+SUBPLOT_SETTINGS = [
+ CODEC_TYPE,
+ ENCODER_IMPLEMENTATION_NAME,
+ DECODER_IMPLEMENTATION_NAME,
+ CODEC_IMPLEMENTATION_NAME,
+] + X_SETTINGS
+
+# Results.
+RESULTS = [
+ PSNR,
+ SSIM,
+ ENC_BITRATE,
+ NUM_DROPPED_FRAMES,
+ TIME_TO_TARGET,
+ ENCODE_SPEED_FPS,
+ DECODE_SPEED_FPS,
+ QP,
+ CPU_USAGE,
+ AVG_KEY_FRAME_SIZE,
+ AVG_DELTA_FRAME_SIZE,
+]
+
+METRICS_TO_PARSE = SETTINGS + SUBPLOT_SETTINGS + RESULTS
+
+Y_METRICS = [res[1] for res in RESULTS]
+
+# Parameters for plotting.
+FIG_SIZE_SCALE_FACTOR_X = 1.6
+FIG_SIZE_SCALE_FACTOR_Y = 1.8
+GRID_COLOR = [0.45, 0.45, 0.45]
+
+
+def ParseSetting(filename, setting):
+ """Parses setting from file.
+
+ Args:
+ filename: The name of the file.
+ setting: Name of setting to parse (e.g. width).
+
+ Returns:
+ A list holding parsed settings, e.g. ['width: 128.0', 'width: 160.0'] """
+
+ settings = []
+
+ settings_file = open(filename)
+ while True:
+ line = settings_file.readline()
+ if not line:
+ break
+ if re.search(r'%s' % EVENT_START, line):
+ # Parse event.
+ parsed = {}
+ while True:
+ line = settings_file.readline()
+ if not line:
+ break
+ if re.search(r'%s' % EVENT_END, line):
+ # Add parsed setting to list.
+ if setting in parsed:
+ s = setting + ': ' + str(parsed[setting])
+ if s not in settings:
+ settings.append(s)
+ break
+
+ TryFindMetric(parsed, line)
+
+ settings_file.close()
+ return settings
+
+
+def ParseMetrics(filename, setting1, setting2):
+ """Parses metrics from file.
+
+ Args:
+ filename: The name of the file.
+ setting1: First setting for sorting metrics (e.g. width).
+ setting2: Second setting for sorting metrics (e.g. CPU cores used).
+
+ Returns:
+ A dictionary holding parsed metrics.
+
+ For example:
+ metrics[key1][key2][measurement]
+
+ metrics = {
+ "width: 352": {
+ "CPU cores used: 1.0": {
+ "encode time (us)": [0.718005, 0.806925, 0.909726, 0.931835, 0.953642],
+ "PSNR (dB)": [25.546029, 29.465518, 34.723535, 36.428493, 38.686551],
+ "bitrate (kbps)": [50, 100, 300, 500, 1000]
+ },
+ "CPU cores used: 2.0": {
+ "encode time (us)": [0.718005, 0.806925, 0.909726, 0.931835, 0.953642],
+ "PSNR (dB)": [25.546029, 29.465518, 34.723535, 36.428493, 38.686551],
+ "bitrate (kbps)": [50, 100, 300, 500, 1000]
+ },
+ },
+ "width: 176": {
+ "CPU cores used: 1.0": {
+ "encode time (us)": [0.857897, 0.91608, 0.959173, 0.971116, 0.980961],
+ "PSNR (dB)": [30.243646, 33.375592, 37.574387, 39.42184, 41.437897],
+ "bitrate (kbps)": [50, 100, 300, 500, 1000]
+ },
+ }
+ } """
+
+ metrics = {}
+
+ # Parse events.
+ settings_file = open(filename)
+ while True:
+ line = settings_file.readline()
+ if not line:
+ break
+ if re.search(r'%s' % EVENT_START, line):
+ # Parse event.
+ parsed = {}
+ while True:
+ line = settings_file.readline()
+ if not line:
+ break
+ if re.search(r'%s' % EVENT_END, line):
+ # Add parsed values to metrics.
+ key1 = setting1 + ': ' + str(parsed[setting1])
+ key2 = setting2 + ': ' + str(parsed[setting2])
+ if key1 not in metrics:
+ metrics[key1] = {}
+ if key2 not in metrics[key1]:
+ metrics[key1][key2] = {}
+
+ for label in parsed:
+ if label not in metrics[key1][key2]:
+ metrics[key1][key2][label] = []
+ metrics[key1][key2][label].append(parsed[label])
+
+ break
+
+ TryFindMetric(parsed, line)
+
+ settings_file.close()
+ return metrics
+
+
+def TryFindMetric(parsed, line):
+ for metric in METRICS_TO_PARSE:
+ name = metric[0]
+ label = metric[1]
+ if re.search(r'%s' % name, line):
+ found, value = GetMetric(name, line)
+ if found:
+ parsed[label] = value
+ return
+
+
+def GetMetric(name, string):
+ # Float (e.g. bitrate = 98.8253).
+ pattern = r'%s\s*[:=]\s*([+-]?\d+\.*\d*)' % name
+ m = re.search(r'%s' % pattern, string)
+ if m is not None:
+ return StringToFloat(m.group(1))
+
+ # Alphanumeric characters (e.g. codec type : VP8).
+ pattern = r'%s\s*[:=]\s*(\w+)' % name
+ m = re.search(r'%s' % pattern, string)
+ if m is not None:
+ return True, m.group(1)
+
+ return False, -1
+
+
+def StringToFloat(value):
+ try:
+ value = float(value)
+ except ValueError:
+ print "Not a float, skipped %s" % value
+ return False, -1
+
+ return True, value
+
+
+def Plot(y_metric, x_metric, metrics):
+ """Plots y_metric vs x_metric per key in metrics.
+
+ For example:
+ y_metric = 'PSNR (dB)'
+ x_metric = 'bitrate (kbps)'
+ metrics = {
+ "CPU cores used: 1.0": {
+ "PSNR (dB)": [25.546029, 29.465518, 34.723535, 36.428493, 38.686551],
+ "bitrate (kbps)": [50, 100, 300, 500, 1000]
+ },
+ "CPU cores used: 2.0": {
+ "PSNR (dB)": [25.546029, 29.465518, 34.723535, 36.428493, 38.686551],
+ "bitrate (kbps)": [50, 100, 300, 500, 1000]
+ },
+ }
+ """
+ for key in sorted(metrics):
+ data = metrics[key]
+ if y_metric not in data:
+ print "Failed to find metric: %s" % y_metric
+ continue
+
+ y = numpy.array(data[y_metric])
+ x = numpy.array(data[x_metric])
+ if len(y) != len(x):
+ print "Length mismatch for %s, %s" % (y, x)
+ continue
+
+ label = y_metric + ' - ' + str(key)
+
+ plt.plot(x,
+ y,
+ label=label,
+ linewidth=1.5,
+ marker='o',
+ markersize=5,
+ markeredgewidth=0.0)
+
+
+def PlotFigure(settings, y_metrics, x_metric, metrics, title):
+ """Plots metrics in y_metrics list. One figure is plotted and each entry
+ in the list is plotted in a subplot (and sorted per settings).
+
+ For example:
+ settings = ['width: 128.0', 'width: 160.0']. Sort subplot per setting.
+ y_metrics = ['PSNR (dB)', 'PSNR (dB)']. Metric to plot per subplot.
+ x_metric = 'bitrate (kbps)'
+
+ """
+
+ plt.figure()
+ plt.suptitle(title, fontsize='large', fontweight='bold')
+ settings.sort()
+ rows = len(settings)
+ cols = 1
+ pos = 1
+ while pos <= rows:
+ plt.rc('grid', color=GRID_COLOR)
+ ax = plt.subplot(rows, cols, pos)
+ plt.grid()
+ plt.setp(ax.get_xticklabels(), visible=(pos == rows), fontsize='large')
+ plt.setp(ax.get_yticklabels(), fontsize='large')
+ setting = settings[pos - 1]
+ Plot(y_metrics[pos - 1], x_metric, metrics[setting])
+ if setting.startswith(WIDTH[1]):
+ plt.title(setting, fontsize='medium')
+ plt.legend(fontsize='large', loc='best')
+ pos += 1
+
+ plt.xlabel(x_metric, fontsize='large')
+ plt.subplots_adjust(left=0.06,
+ right=0.98,
+ bottom=0.05,
+ top=0.94,
+ hspace=0.08)
+
+
+def GetTitle(filename, setting):
+ title = ''
+ if setting != CODEC_IMPLEMENTATION_NAME[1] and setting != CODEC_TYPE[1]:
+ codec_types = ParseSetting(filename, CODEC_TYPE[1])
+ for i in range(0, len(codec_types)):
+ title += codec_types[i] + ', '
+
+ if setting != CORES[1]:
+ cores = ParseSetting(filename, CORES[1])
+ for i in range(0, len(cores)):
+ title += cores[i].split('.')[0] + ', '
+
+ if setting != FRAMERATE[1]:
+ framerate = ParseSetting(filename, FRAMERATE[1])
+ for i in range(0, len(framerate)):
+ title += framerate[i].split('.')[0] + ', '
+
+ if (setting != CODEC_IMPLEMENTATION_NAME[1]
+ and setting != ENCODER_IMPLEMENTATION_NAME[1]):
+ enc_names = ParseSetting(filename, ENCODER_IMPLEMENTATION_NAME[1])
+ for i in range(0, len(enc_names)):
+ title += enc_names[i] + ', '
+
+ if (setting != CODEC_IMPLEMENTATION_NAME[1]
+ and setting != DECODER_IMPLEMENTATION_NAME[1]):
+ dec_names = ParseSetting(filename, DECODER_IMPLEMENTATION_NAME[1])
+ for i in range(0, len(dec_names)):
+ title += dec_names[i] + ', '
+
+ filenames = ParseSetting(filename, FILENAME[1])
+ title += filenames[0].split('_')[0]
+
+ num_frames = ParseSetting(filename, NUM_FRAMES[1])
+ for i in range(0, len(num_frames)):
+ title += ' (' + num_frames[i].split('.')[0] + ')'
+
+ return title
+
+
+def ToString(input_list):
+ return ToStringWithoutMetric(input_list, ('', ''))
+
+
+def ToStringWithoutMetric(input_list, metric):
+ i = 1
+ output_str = ""
+ for m in input_list:
+ if m != metric:
+ output_str = output_str + ("%s. %s\n" % (i, m[1]))
+ i += 1
+ return output_str
+
+
+def GetIdx(text_list):
+ return int(raw_input(text_list)) - 1
+
+
+def main():
+ filename = sys.argv[1]
+
+ # Setup.
+ idx_metric = GetIdx("Choose metric:\n0. All\n%s" % ToString(RESULTS))
+ if idx_metric == -1:
+ # Plot all metrics. One subplot for each metric.
+ # Per subplot: metric vs bitrate (per resolution).
+ cores = ParseSetting(filename, CORES[1])
+ setting1 = CORES[1]
+ setting2 = WIDTH[1]
+ sub_keys = [cores[0]] * len(Y_METRICS)
+ y_metrics = Y_METRICS
+ x_metric = BITRATE[1]
+ else:
+ resolutions = ParseSetting(filename, WIDTH[1])
+ idx = GetIdx("Select metric for x-axis:\n%s" % ToString(X_SETTINGS))
+ if X_SETTINGS[idx] == BITRATE:
+ idx = GetIdx("Plot per:\n%s" %
+ ToStringWithoutMetric(SUBPLOT_SETTINGS, BITRATE))
+ idx_setting = METRICS_TO_PARSE.index(SUBPLOT_SETTINGS[idx])
+ # Plot one metric. One subplot for each resolution.
+ # Per subplot: metric vs bitrate (per setting).
+ setting1 = WIDTH[1]
+ setting2 = METRICS_TO_PARSE[idx_setting][1]
+ sub_keys = resolutions
+ y_metrics = [RESULTS[idx_metric][1]] * len(sub_keys)
+ x_metric = BITRATE[1]
+ else:
+ # Plot one metric. One subplot for each resolution.
+ # Per subplot: metric vs setting (per bitrate).
+ setting1 = WIDTH[1]
+ setting2 = BITRATE[1]
+ sub_keys = resolutions
+ y_metrics = [RESULTS[idx_metric][1]] * len(sub_keys)
+ x_metric = X_SETTINGS[idx][1]
+
+ metrics = ParseMetrics(filename, setting1, setting2)
+
+ # Stretch fig size.
+ figsize = plt.rcParams["figure.figsize"]
+ figsize[0] *= FIG_SIZE_SCALE_FACTOR_X
+ figsize[1] *= FIG_SIZE_SCALE_FACTOR_Y
+ plt.rcParams["figure.figsize"] = figsize
+
+ PlotFigure(sub_keys, y_metrics, x_metric, metrics,
+ GetTitle(filename, setting2))
+
+ plt.show()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/test/video_codec_analyzer.cc b/third_party/libwebrtc/modules/video_coding/codecs/test/video_codec_analyzer.cc
new file mode 100644
index 0000000000..50af417bcf
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/test/video_codec_analyzer.cc
@@ -0,0 +1,186 @@
+/*
+ * Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/codecs/test/video_codec_analyzer.h"
+
+#include <memory>
+
+#include "api/task_queue/default_task_queue_factory.h"
+#include "api/test/video_codec_tester.h"
+#include "api/video/i420_buffer.h"
+#include "api/video/video_codec_constants.h"
+#include "api/video/video_frame.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/event.h"
+#include "rtc_base/time_utils.h"
+#include "third_party/libyuv/include/libyuv/compare.h"
+
+namespace webrtc {
+namespace test {
+
+namespace {
+
+struct Psnr {
+ double y;
+ double u;
+ double v;
+ double yuv;
+};
+
+Psnr CalcPsnr(const I420BufferInterface& ref_buffer,
+ const I420BufferInterface& dec_buffer) {
+ RTC_CHECK_EQ(ref_buffer.width(), dec_buffer.width());
+ RTC_CHECK_EQ(ref_buffer.height(), dec_buffer.height());
+
+ uint64_t sse_y = libyuv::ComputeSumSquareErrorPlane(
+ dec_buffer.DataY(), dec_buffer.StrideY(), ref_buffer.DataY(),
+ ref_buffer.StrideY(), dec_buffer.width(), dec_buffer.height());
+
+ uint64_t sse_u = libyuv::ComputeSumSquareErrorPlane(
+ dec_buffer.DataU(), dec_buffer.StrideU(), ref_buffer.DataU(),
+ ref_buffer.StrideU(), dec_buffer.width() / 2, dec_buffer.height() / 2);
+
+ uint64_t sse_v = libyuv::ComputeSumSquareErrorPlane(
+ dec_buffer.DataV(), dec_buffer.StrideV(), ref_buffer.DataV(),
+ ref_buffer.StrideV(), dec_buffer.width() / 2, dec_buffer.height() / 2);
+
+ int num_y_samples = dec_buffer.width() * dec_buffer.height();
+ Psnr psnr;
+ psnr.y = libyuv::SumSquareErrorToPsnr(sse_y, num_y_samples);
+ psnr.u = libyuv::SumSquareErrorToPsnr(sse_u, num_y_samples / 4);
+ psnr.v = libyuv::SumSquareErrorToPsnr(sse_v, num_y_samples / 4);
+ psnr.yuv = libyuv::SumSquareErrorToPsnr(sse_y + sse_u + sse_v,
+ num_y_samples + num_y_samples / 2);
+ return psnr;
+}
+
+} // namespace
+
+VideoCodecAnalyzer::VideoCodecAnalyzer(
+ rtc::TaskQueue& task_queue,
+ ReferenceVideoSource* reference_video_source)
+ : task_queue_(task_queue), reference_video_source_(reference_video_source) {
+ sequence_checker_.Detach();
+}
+
+void VideoCodecAnalyzer::StartEncode(const VideoFrame& input_frame) {
+ int64_t encode_started_ns = rtc::TimeNanos();
+ task_queue_.PostTask(
+ [this, timestamp_rtp = input_frame.timestamp(), encode_started_ns]() {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+ VideoCodecTestStats::FrameStatistics* fs =
+ stats_.GetOrAddFrame(timestamp_rtp, /*spatial_idx=*/0);
+ fs->encode_start_ns = encode_started_ns;
+ });
+}
+
+void VideoCodecAnalyzer::FinishEncode(const EncodedImage& frame) {
+ int64_t encode_finished_ns = rtc::TimeNanos();
+
+ task_queue_.PostTask([this, timestamp_rtp = frame.Timestamp(),
+ spatial_idx = frame.SpatialIndex().value_or(0),
+ temporal_idx = frame.TemporalIndex().value_or(0),
+ frame_type = frame._frameType, qp = frame.qp_,
+ frame_size_bytes = frame.size(), encode_finished_ns]() {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+ VideoCodecTestStats::FrameStatistics* fs =
+ stats_.GetOrAddFrame(timestamp_rtp, spatial_idx);
+ VideoCodecTestStats::FrameStatistics* fs_base =
+ stats_.GetOrAddFrame(timestamp_rtp, 0);
+
+ fs->encode_start_ns = fs_base->encode_start_ns;
+ fs->spatial_idx = spatial_idx;
+ fs->temporal_idx = temporal_idx;
+ fs->frame_type = frame_type;
+ fs->qp = qp;
+
+ fs->encode_time_us = (encode_finished_ns - fs->encode_start_ns) /
+ rtc::kNumNanosecsPerMicrosec;
+ fs->length_bytes = frame_size_bytes;
+
+ fs->encoding_successful = true;
+ });
+}
+
+void VideoCodecAnalyzer::StartDecode(const EncodedImage& frame) {
+ int64_t decode_start_ns = rtc::TimeNanos();
+ task_queue_.PostTask([this, timestamp_rtp = frame.Timestamp(),
+ spatial_idx = frame.SpatialIndex().value_or(0),
+ frame_size_bytes = frame.size(), decode_start_ns]() {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+ VideoCodecTestStats::FrameStatistics* fs =
+ stats_.GetOrAddFrame(timestamp_rtp, spatial_idx);
+ if (fs->length_bytes == 0) {
+ // In encode-decode test the frame size is set in EncodeFinished. In
+ // decode-only test set it here.
+ fs->length_bytes = frame_size_bytes;
+ }
+ fs->decode_start_ns = decode_start_ns;
+ });
+}
+
+void VideoCodecAnalyzer::FinishDecode(const VideoFrame& frame,
+ int spatial_idx) {
+ int64_t decode_finished_ns = rtc::TimeNanos();
+ task_queue_.PostTask([this, timestamp_rtp = frame.timestamp(), spatial_idx,
+ width = frame.width(), height = frame.height(),
+ decode_finished_ns]() {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+ VideoCodecTestStats::FrameStatistics* fs =
+ stats_.GetFrameWithTimestamp(timestamp_rtp, spatial_idx);
+ fs->decode_time_us = (decode_finished_ns - fs->decode_start_ns) /
+ rtc::kNumNanosecsPerMicrosec;
+ fs->decoded_width = width;
+ fs->decoded_height = height;
+ fs->decoding_successful = true;
+ });
+
+ if (reference_video_source_ != nullptr) {
+ // Copy hardware-backed frame into main memory to release output buffers
+ // which number may be limited in hardware decoders.
+ rtc::scoped_refptr<I420BufferInterface> decoded_buffer =
+ frame.video_frame_buffer()->ToI420();
+
+ task_queue_.PostTask([this, decoded_buffer,
+ timestamp_rtp = frame.timestamp(), spatial_idx]() {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+ VideoFrame ref_frame = reference_video_source_->GetFrame(
+ timestamp_rtp, {.width = decoded_buffer->width(),
+ .height = decoded_buffer->height()});
+ rtc::scoped_refptr<I420BufferInterface> ref_buffer =
+ ref_frame.video_frame_buffer()->ToI420();
+
+ Psnr psnr = CalcPsnr(*decoded_buffer, *ref_buffer);
+ VideoCodecTestStats::FrameStatistics* fs =
+ this->stats_.GetFrameWithTimestamp(timestamp_rtp, spatial_idx);
+ fs->psnr_y = static_cast<float>(psnr.y);
+ fs->psnr_u = static_cast<float>(psnr.u);
+ fs->psnr_v = static_cast<float>(psnr.v);
+ fs->psnr = static_cast<float>(psnr.yuv);
+
+ fs->quality_analysis_successful = true;
+ });
+ }
+}
+
+std::unique_ptr<VideoCodecTestStats> VideoCodecAnalyzer::GetStats() {
+ std::unique_ptr<VideoCodecTestStats> stats;
+ rtc::Event ready;
+ task_queue_.PostTask([this, &stats, &ready]() mutable {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+ stats.reset(new VideoCodecTestStatsImpl(stats_));
+ ready.Set();
+ });
+ ready.Wait(rtc::Event::kForever);
+ return stats;
+}
+
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/test/video_codec_analyzer.h b/third_party/libwebrtc/modules/video_coding/codecs/test/video_codec_analyzer.h
new file mode 100644
index 0000000000..63a864e810
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/test/video_codec_analyzer.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_CODECS_TEST_VIDEO_CODEC_ANALYZER_H_
+#define MODULES_VIDEO_CODING_CODECS_TEST_VIDEO_CODEC_ANALYZER_H_
+
+#include <memory>
+
+#include "absl/types/optional.h"
+#include "api/sequence_checker.h"
+#include "api/video/encoded_image.h"
+#include "api/video/resolution.h"
+#include "api/video/video_frame.h"
+#include "modules/video_coding/codecs/test/videocodec_test_stats_impl.h"
+#include "rtc_base/synchronization/mutex.h"
+#include "rtc_base/system/no_unique_address.h"
+#include "rtc_base/task_queue_for_test.h"
+
+namespace webrtc {
+namespace test {
+
+// Analyzer measures and collects metrics necessary for evaluation of video
+// codec quality and performance. This class is thread-safe.
+class VideoCodecAnalyzer {
+ public:
+ // An interface that provides reference frames for spatial quality analysis.
+ class ReferenceVideoSource {
+ public:
+ virtual ~ReferenceVideoSource() = default;
+
+ virtual VideoFrame GetFrame(uint32_t timestamp_rtp,
+ Resolution resolution) = 0;
+ };
+
+ VideoCodecAnalyzer(rtc::TaskQueue& task_queue,
+ ReferenceVideoSource* reference_video_source = nullptr);
+
+ void StartEncode(const VideoFrame& frame);
+
+ void FinishEncode(const EncodedImage& frame);
+
+ void StartDecode(const EncodedImage& frame);
+
+ void FinishDecode(const VideoFrame& frame, int spatial_idx);
+
+ std::unique_ptr<VideoCodecTestStats> GetStats();
+
+ protected:
+ rtc::TaskQueue& task_queue_;
+ ReferenceVideoSource* const reference_video_source_;
+ VideoCodecTestStatsImpl stats_ RTC_GUARDED_BY(sequence_checker_);
+ RTC_NO_UNIQUE_ADDRESS SequenceChecker sequence_checker_;
+};
+
+} // namespace test
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_CODECS_TEST_VIDEO_CODEC_ANALYZER_H_
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/test/video_codec_analyzer_unittest.cc b/third_party/libwebrtc/modules/video_coding/codecs/test/video_codec_analyzer_unittest.cc
new file mode 100644
index 0000000000..3f9de6dac2
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/test/video_codec_analyzer_unittest.cc
@@ -0,0 +1,141 @@
+/*
+ * Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/codecs/test/video_codec_analyzer.h"
+
+#include "absl/types/optional.h"
+#include "api/video/i420_buffer.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+#include "third_party/libyuv/include/libyuv/planar_functions.h"
+
+namespace webrtc {
+namespace test {
+
+namespace {
+using ::testing::Return;
+using ::testing::Values;
+
+const size_t kTimestamp = 3000;
+const size_t kSpatialIdx = 2;
+
+class MockReferenceVideoSource
+ : public VideoCodecAnalyzer::ReferenceVideoSource {
+ public:
+ MOCK_METHOD(VideoFrame, GetFrame, (uint32_t, Resolution), (override));
+};
+
+VideoFrame CreateVideoFrame(uint32_t timestamp_rtp,
+ uint8_t y = 0,
+ uint8_t u = 0,
+ uint8_t v = 0) {
+ rtc::scoped_refptr<I420Buffer> buffer(I420Buffer::Create(2, 2));
+
+ libyuv::I420Rect(buffer->MutableDataY(), buffer->StrideY(),
+ buffer->MutableDataU(), buffer->StrideU(),
+ buffer->MutableDataV(), buffer->StrideV(), 0, 0,
+ buffer->width(), buffer->height(), y, u, v);
+
+ return VideoFrame::Builder()
+ .set_video_frame_buffer(buffer)
+ .set_timestamp_rtp(timestamp_rtp)
+ .build();
+}
+
+EncodedImage CreateEncodedImage(uint32_t timestamp_rtp, int spatial_idx = 0) {
+ EncodedImage encoded_image;
+ encoded_image.SetTimestamp(timestamp_rtp);
+ encoded_image.SetSpatialIndex(spatial_idx);
+ return encoded_image;
+}
+} // namespace
+
+TEST(VideoCodecAnalyzerTest, EncodeStartedCreatesFrameStats) {
+ TaskQueueForTest task_queue;
+ VideoCodecAnalyzer analyzer(task_queue);
+ analyzer.StartEncode(CreateVideoFrame(kTimestamp));
+
+ auto fs = analyzer.GetStats()->GetFrameStatistics();
+ EXPECT_EQ(1u, fs.size());
+ EXPECT_EQ(fs[0].rtp_timestamp, kTimestamp);
+}
+
+TEST(VideoCodecAnalyzerTest, EncodeFinishedUpdatesFrameStats) {
+ TaskQueueForTest task_queue;
+ VideoCodecAnalyzer analyzer(task_queue);
+ analyzer.StartEncode(CreateVideoFrame(kTimestamp));
+
+ EncodedImage encoded_frame = CreateEncodedImage(kTimestamp, kSpatialIdx);
+ analyzer.FinishEncode(encoded_frame);
+
+ auto fs = analyzer.GetStats()->GetFrameStatistics();
+ EXPECT_EQ(2u, fs.size());
+ EXPECT_TRUE(fs[1].encoding_successful);
+}
+
+TEST(VideoCodecAnalyzerTest, DecodeStartedNoFrameStatsCreatesFrameStats) {
+ TaskQueueForTest task_queue;
+ VideoCodecAnalyzer analyzer(task_queue);
+ analyzer.StartDecode(CreateEncodedImage(kTimestamp, kSpatialIdx));
+
+ auto fs = analyzer.GetStats()->GetFrameStatistics();
+ EXPECT_EQ(1u, fs.size());
+ EXPECT_EQ(fs[0].rtp_timestamp, kTimestamp);
+}
+
+TEST(VideoCodecAnalyzerTest, DecodeStartedFrameStatsExistsReusesFrameStats) {
+ TaskQueueForTest task_queue;
+ VideoCodecAnalyzer analyzer(task_queue);
+ analyzer.StartEncode(CreateVideoFrame(kTimestamp));
+ analyzer.StartDecode(CreateEncodedImage(kTimestamp, /*spatial_idx=*/0));
+
+ auto fs = analyzer.GetStats()->GetFrameStatistics();
+ EXPECT_EQ(1u, fs.size());
+}
+
+TEST(VideoCodecAnalyzerTest, DecodeFinishedUpdatesFrameStats) {
+ TaskQueueForTest task_queue;
+ VideoCodecAnalyzer analyzer(task_queue);
+ analyzer.StartDecode(CreateEncodedImage(kTimestamp, kSpatialIdx));
+ VideoFrame decoded_frame = CreateVideoFrame(kTimestamp);
+ analyzer.FinishDecode(decoded_frame, kSpatialIdx);
+
+ auto fs = analyzer.GetStats()->GetFrameStatistics();
+ EXPECT_EQ(1u, fs.size());
+
+ EXPECT_TRUE(fs[0].decoding_successful);
+ EXPECT_EQ(static_cast<int>(fs[0].decoded_width), decoded_frame.width());
+ EXPECT_EQ(static_cast<int>(fs[0].decoded_height), decoded_frame.height());
+}
+
+TEST(VideoCodecAnalyzerTest, DecodeFinishedComputesPsnr) {
+ TaskQueueForTest task_queue;
+ MockReferenceVideoSource reference_video_source;
+ VideoCodecAnalyzer analyzer(task_queue, &reference_video_source);
+ analyzer.StartDecode(CreateEncodedImage(kTimestamp, kSpatialIdx));
+
+ EXPECT_CALL(reference_video_source, GetFrame)
+ .WillOnce(Return(CreateVideoFrame(kTimestamp, /*y=*/0,
+ /*u=*/0, /*v=*/0)));
+
+ analyzer.FinishDecode(
+ CreateVideoFrame(kTimestamp, /*value_y=*/1, /*value_u=*/2, /*value_v=*/3),
+ kSpatialIdx);
+
+ auto fs = analyzer.GetStats()->GetFrameStatistics();
+ EXPECT_EQ(1u, fs.size());
+
+ EXPECT_NEAR(fs[0].psnr_y, 48, 1);
+ EXPECT_NEAR(fs[0].psnr_u, 42, 1);
+ EXPECT_NEAR(fs[0].psnr_v, 38, 1);
+}
+
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/test/video_codec_test.cc b/third_party/libwebrtc/modules/video_coding/codecs/test/video_codec_test.cc
new file mode 100644
index 0000000000..bd4c8e07f2
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/test/video_codec_test.cc
@@ -0,0 +1,456 @@
+/*
+ * Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "api/video_codecs/video_codec.h"
+
+#include <cstddef>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "absl/functional/any_invocable.h"
+#include "api/test/create_video_codec_tester.h"
+#include "api/test/videocodec_test_stats.h"
+#include "api/units/data_rate.h"
+#include "api/units/frequency.h"
+#include "api/video/i420_buffer.h"
+#include "api/video/resolution.h"
+#include "api/video_codecs/builtin_video_decoder_factory.h"
+#include "api/video_codecs/builtin_video_encoder_factory.h"
+#include "api/video_codecs/scalability_mode.h"
+#include "api/video_codecs/video_decoder.h"
+#include "api/video_codecs/video_encoder.h"
+#include "common_video/libyuv/include/webrtc_libyuv.h"
+#include "media/base/media_constants.h"
+#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
+#include "modules/video_coding/include/video_error_codes.h"
+#include "modules/video_coding/svc/scalability_mode_util.h"
+#include "rtc_base/strings/string_builder.h"
+#include "test/gtest.h"
+#include "test/testsupport/file_utils.h"
+#include "test/testsupport/frame_reader.h"
+
+namespace webrtc {
+namespace test {
+
+namespace {
+using ::testing::Combine;
+using ::testing::Values;
+using Layer = std::pair<int, int>;
+
+struct VideoInfo {
+ std::string name;
+ Resolution resolution;
+};
+
+struct CodecInfo {
+ std::string type;
+ std::string encoder;
+ std::string decoder;
+};
+
+struct EncodingSettings {
+ ScalabilityMode scalability_mode;
+ // Spatial layer resolution.
+ std::map<int, Resolution> resolution;
+ // Top temporal layer frame rate.
+ Frequency framerate;
+ // Bitrate of spatial and temporal layers.
+ std::map<Layer, DataRate> bitrate;
+};
+
+struct EncodingTestSettings {
+ std::string name;
+ int num_frames = 1;
+ std::map<int, EncodingSettings> frame_settings;
+};
+
+struct DecodingTestSettings {
+ std::string name;
+};
+
+struct QualityExpectations {
+ double min_apsnr_y;
+};
+
+struct EncodeDecodeTestParams {
+ CodecInfo codec;
+ VideoInfo video;
+ VideoCodecTester::EncoderSettings encoder_settings;
+ VideoCodecTester::DecoderSettings decoder_settings;
+ EncodingTestSettings encoding_settings;
+ DecodingTestSettings decoding_settings;
+ QualityExpectations quality_expectations;
+};
+
+const EncodingSettings kQvga64Kbps30Fps = {
+ .scalability_mode = ScalabilityMode::kL1T1,
+ .resolution = {{0, {.width = 320, .height = 180}}},
+ .framerate = Frequency::Hertz(30),
+ .bitrate = {{Layer(0, 0), DataRate::KilobitsPerSec(64)}}};
+
+const EncodingTestSettings kConstantRateQvga64Kbps30Fps = {
+ .name = "ConstantRateQvga64Kbps30Fps",
+ .num_frames = 300,
+ .frame_settings = {{/*frame_num=*/0, kQvga64Kbps30Fps}}};
+
+const QualityExpectations kLowQuality = {.min_apsnr_y = 30};
+
+const VideoInfo kFourPeople_1280x720_30 = {
+ .name = "FourPeople_1280x720_30",
+ .resolution = {.width = 1280, .height = 720}};
+
+const CodecInfo kLibvpxVp8 = {.type = "VP8",
+ .encoder = "libvpx",
+ .decoder = "libvpx"};
+
+const CodecInfo kLibvpxVp9 = {.type = "VP9",
+ .encoder = "libvpx",
+ .decoder = "libvpx"};
+
+const CodecInfo kOpenH264 = {.type = "H264",
+ .encoder = "openh264",
+ .decoder = "ffmpeg"};
+
+class TestRawVideoSource : public VideoCodecTester::RawVideoSource {
+ public:
+ static constexpr Frequency k90kHz = Frequency::Hertz(90000);
+
+ TestRawVideoSource(std::unique_ptr<FrameReader> frame_reader,
+ const EncodingTestSettings& test_settings)
+ : frame_reader_(std::move(frame_reader)),
+ test_settings_(test_settings),
+ frame_num_(0),
+ timestamp_rtp_(0) {
+ // Ensure settings for the first frame are provided.
+ RTC_CHECK_GT(test_settings_.frame_settings.size(), 0u);
+ RTC_CHECK_EQ(test_settings_.frame_settings.begin()->first, 0);
+ }
+
+ // Pulls next frame. Frame RTP timestamp is set accordingly to
+ // `EncodingSettings::framerate`.
+ absl::optional<VideoFrame> PullFrame() override {
+ if (frame_num_ >= test_settings_.num_frames) {
+ // End of stream.
+ return absl::nullopt;
+ }
+
+ EncodingSettings frame_settings =
+ std::prev(test_settings_.frame_settings.upper_bound(frame_num_))
+ ->second;
+
+ int pulled_frame;
+ auto buffer = frame_reader_->PullFrame(
+ &pulled_frame, frame_settings.resolution.rbegin()->second,
+ {.num = 30, .den = static_cast<int>(frame_settings.framerate.hertz())});
+ RTC_CHECK(buffer) << "Cannot pull frame " << frame_num_;
+
+ auto frame = VideoFrame::Builder()
+ .set_video_frame_buffer(buffer)
+ .set_timestamp_rtp(timestamp_rtp_)
+ .build();
+
+ pulled_frames_[timestamp_rtp_] = pulled_frame;
+ timestamp_rtp_ += k90kHz / frame_settings.framerate;
+ ++frame_num_;
+
+ return frame;
+ }
+
+ // Reads frame specified by `timestamp_rtp`, scales it to `resolution` and
+ // returns. Frame with the given `timestamp_rtp` is expected to be pulled
+ // before.
+ VideoFrame GetFrame(uint32_t timestamp_rtp, Resolution resolution) override {
+ RTC_CHECK(pulled_frames_.find(timestamp_rtp) != pulled_frames_.end())
+ << "Frame with RTP timestamp " << timestamp_rtp
+ << " was not pulled before";
+ auto buffer =
+ frame_reader_->ReadFrame(pulled_frames_[timestamp_rtp], resolution);
+ return VideoFrame::Builder()
+ .set_video_frame_buffer(buffer)
+ .set_timestamp_rtp(timestamp_rtp)
+ .build();
+ }
+
+ protected:
+ std::unique_ptr<FrameReader> frame_reader_;
+ const EncodingTestSettings& test_settings_;
+ int frame_num_;
+ uint32_t timestamp_rtp_;
+ std::map<uint32_t, int> pulled_frames_;
+};
+
+class TestEncoder : public VideoCodecTester::Encoder,
+ public EncodedImageCallback {
+ public:
+ TestEncoder(std::unique_ptr<VideoEncoder> encoder,
+ const CodecInfo& codec_info,
+ const std::map<int, EncodingSettings>& frame_settings)
+ : encoder_(std::move(encoder)),
+ codec_info_(codec_info),
+ frame_settings_(frame_settings),
+ frame_num_(0) {
+ // Ensure settings for the first frame is provided.
+ RTC_CHECK_GT(frame_settings_.size(), 0u);
+ RTC_CHECK_EQ(frame_settings_.begin()->first, 0);
+
+ encoder_->RegisterEncodeCompleteCallback(this);
+ }
+
+ void Encode(const VideoFrame& frame, EncodeCallback callback) override {
+ callbacks_[frame.timestamp()] = std::move(callback);
+
+ if (auto fs = frame_settings_.find(frame_num_);
+ fs != frame_settings_.end()) {
+ if (fs == frame_settings_.begin() ||
+ ConfigChanged(fs->second, std::prev(fs)->second)) {
+ Configure(fs->second);
+ }
+ if (fs == frame_settings_.begin() ||
+ RateChanged(fs->second, std::prev(fs)->second)) {
+ SetRates(fs->second);
+ }
+ }
+
+ int result = encoder_->Encode(frame, nullptr);
+ RTC_CHECK_EQ(result, WEBRTC_VIDEO_CODEC_OK);
+ ++frame_num_;
+ }
+
+ protected:
+ Result OnEncodedImage(const EncodedImage& encoded_image,
+ const CodecSpecificInfo* codec_specific_info) override {
+ auto cb = callbacks_.find(encoded_image.Timestamp());
+ RTC_CHECK(cb != callbacks_.end());
+ cb->second(encoded_image);
+
+ callbacks_.erase(callbacks_.begin(), cb);
+ return Result(Result::Error::OK);
+ }
+
+ void Configure(const EncodingSettings& es) {
+ VideoCodec vc;
+ const Resolution& resolution = es.resolution.rbegin()->second;
+ vc.width = resolution.width;
+ vc.height = resolution.height;
+ const DataRate& bitrate = es.bitrate.rbegin()->second;
+ vc.startBitrate = bitrate.kbps();
+ vc.maxBitrate = bitrate.kbps();
+ vc.minBitrate = 0;
+ vc.maxFramerate = static_cast<uint32_t>(es.framerate.hertz());
+ vc.active = true;
+ vc.qpMax = 0;
+ vc.numberOfSimulcastStreams = 0;
+ vc.mode = webrtc::VideoCodecMode::kRealtimeVideo;
+ vc.SetFrameDropEnabled(true);
+
+ vc.codecType = PayloadStringToCodecType(codec_info_.type);
+ if (vc.codecType == kVideoCodecVP8) {
+ *(vc.VP8()) = VideoEncoder::GetDefaultVp8Settings();
+ } else if (vc.codecType == kVideoCodecVP9) {
+ *(vc.VP9()) = VideoEncoder::GetDefaultVp9Settings();
+ } else if (vc.codecType == kVideoCodecH264) {
+ *(vc.H264()) = VideoEncoder::GetDefaultH264Settings();
+ }
+
+ VideoEncoder::Settings ves(
+ VideoEncoder::Capabilities(/*loss_notification=*/false),
+ /*number_of_cores=*/1,
+ /*max_payload_size=*/1440);
+
+ int result = encoder_->InitEncode(&vc, ves);
+ RTC_CHECK_EQ(result, WEBRTC_VIDEO_CODEC_OK);
+ }
+
+ void SetRates(const EncodingSettings& es) {
+ VideoEncoder::RateControlParameters rc;
+ int num_spatial_layers =
+ ScalabilityModeToNumSpatialLayers(es.scalability_mode);
+ int num_temporal_layers =
+ ScalabilityModeToNumSpatialLayers(es.scalability_mode);
+ for (int sidx = 0; sidx < num_spatial_layers; ++sidx) {
+ for (int tidx = 0; tidx < num_temporal_layers; ++tidx) {
+ RTC_CHECK(es.bitrate.find(Layer(sidx, tidx)) != es.bitrate.end())
+ << "Bitrate for layer S=" << sidx << " T=" << tidx << " is not set";
+ rc.bitrate.SetBitrate(sidx, tidx,
+ es.bitrate.at(Layer(sidx, tidx)).bps());
+ }
+ }
+
+ rc.framerate_fps = es.framerate.millihertz() / 1000.0;
+ encoder_->SetRates(rc);
+ }
+
+ bool ConfigChanged(const EncodingSettings& es,
+ const EncodingSettings& prev_es) const {
+ return es.scalability_mode != prev_es.scalability_mode ||
+ es.resolution != prev_es.resolution;
+ }
+
+ bool RateChanged(const EncodingSettings& es,
+ const EncodingSettings& prev_es) const {
+ return es.bitrate != prev_es.bitrate || es.framerate != prev_es.framerate;
+ }
+
+ std::unique_ptr<VideoEncoder> encoder_;
+ const CodecInfo& codec_info_;
+ const std::map<int, EncodingSettings>& frame_settings_;
+ int frame_num_;
+ std::map<uint32_t, EncodeCallback> callbacks_;
+};
+
+class TestDecoder : public VideoCodecTester::Decoder,
+ public DecodedImageCallback {
+ public:
+ TestDecoder(std::unique_ptr<VideoDecoder> decoder,
+ const CodecInfo& codec_info)
+ : decoder_(std::move(decoder)), codec_info_(codec_info), frame_num_(0) {
+ decoder_->RegisterDecodeCompleteCallback(this);
+ }
+ void Decode(const EncodedImage& frame, DecodeCallback callback) override {
+ callbacks_[frame.Timestamp()] = std::move(callback);
+
+ if (frame_num_ == 0) {
+ Configure();
+ }
+
+ decoder_->Decode(frame, /*missing_frames=*/false,
+ /*render_time_ms=*/0);
+ ++frame_num_;
+ }
+
+ void Configure() {
+ VideoDecoder::Settings ds;
+ ds.set_codec_type(PayloadStringToCodecType(codec_info_.type));
+ ds.set_number_of_cores(1);
+
+ bool result = decoder_->Configure(ds);
+ RTC_CHECK(result);
+ }
+
+ protected:
+ int Decoded(VideoFrame& decoded_frame) override {
+ auto cb = callbacks_.find(decoded_frame.timestamp());
+ RTC_CHECK(cb != callbacks_.end());
+ cb->second(decoded_frame);
+
+ callbacks_.erase(callbacks_.begin(), cb);
+ return WEBRTC_VIDEO_CODEC_OK;
+ }
+
+ std::unique_ptr<VideoDecoder> decoder_;
+ const CodecInfo& codec_info_;
+ int frame_num_;
+ std::map<uint32_t, DecodeCallback> callbacks_;
+};
+
+std::unique_ptr<VideoCodecTester::Encoder> CreateEncoder(
+ const CodecInfo& codec_info,
+ const std::map<int, EncodingSettings>& frame_settings) {
+ auto factory = CreateBuiltinVideoEncoderFactory();
+ auto encoder = factory->CreateVideoEncoder(SdpVideoFormat(codec_info.type));
+ return std::make_unique<TestEncoder>(std::move(encoder), codec_info,
+ frame_settings);
+}
+
+std::unique_ptr<VideoCodecTester::Decoder> CreateDecoder(
+ const CodecInfo& codec_info) {
+ auto factory = CreateBuiltinVideoDecoderFactory();
+ auto decoder = factory->CreateVideoDecoder(SdpVideoFormat(codec_info.type));
+ return std::make_unique<TestDecoder>(std::move(decoder), codec_info);
+}
+
+} // namespace
+
+class EncodeDecodeTest
+ : public ::testing::TestWithParam<EncodeDecodeTestParams> {
+ public:
+ EncodeDecodeTest() : test_params_(GetParam()) {}
+
+ void SetUp() override {
+ std::unique_ptr<FrameReader> frame_reader =
+ CreateYuvFrameReader(ResourcePath(test_params_.video.name, "yuv"),
+ test_params_.video.resolution,
+ YuvFrameReaderImpl::RepeatMode::kPingPong);
+ video_source_ = std::make_unique<TestRawVideoSource>(
+ std::move(frame_reader), test_params_.encoding_settings);
+
+ encoder_ = CreateEncoder(test_params_.codec,
+ test_params_.encoding_settings.frame_settings);
+ decoder_ = CreateDecoder(test_params_.codec);
+
+ tester_ = CreateVideoCodecTester();
+ }
+
+ static std::string TestParametersToStr(
+ const ::testing::TestParamInfo<EncodeDecodeTest::ParamType>& info) {
+ return std::string(info.param.encoding_settings.name +
+ info.param.codec.type + info.param.codec.encoder +
+ info.param.codec.decoder);
+ }
+
+ protected:
+ EncodeDecodeTestParams test_params_;
+ std::unique_ptr<TestRawVideoSource> video_source_;
+ std::unique_ptr<VideoCodecTester::Encoder> encoder_;
+ std::unique_ptr<VideoCodecTester::Decoder> decoder_;
+ std::unique_ptr<VideoCodecTester> tester_;
+};
+
+TEST_P(EncodeDecodeTest, DISABLED_TestEncodeDecode) {
+ std::unique_ptr<VideoCodecTestStats> stats = tester_->RunEncodeDecodeTest(
+ std::move(video_source_), std::move(encoder_), std::move(decoder_),
+ test_params_.encoder_settings, test_params_.decoder_settings);
+
+ const auto& frame_settings = test_params_.encoding_settings.frame_settings;
+ for (auto fs = frame_settings.begin(); fs != frame_settings.end(); ++fs) {
+ int first_frame = fs->first;
+ int last_frame = std::next(fs) != frame_settings.end()
+ ? std::next(fs)->first - 1
+ : test_params_.encoding_settings.num_frames - 1;
+
+ const EncodingSettings& encoding_settings = fs->second;
+ auto metrics = stats->CalcVideoStatistic(
+ first_frame, last_frame, encoding_settings.bitrate.rbegin()->second,
+ encoding_settings.framerate);
+
+ EXPECT_GE(metrics.avg_psnr_y,
+ test_params_.quality_expectations.min_apsnr_y);
+ }
+}
+
+std::list<EncodeDecodeTestParams> ConstantRateTestParameters() {
+ std::list<EncodeDecodeTestParams> test_params;
+ std::vector<CodecInfo> codecs = {kLibvpxVp8};
+ std::vector<VideoInfo> videos = {kFourPeople_1280x720_30};
+ std::vector<std::pair<EncodingTestSettings, QualityExpectations>>
+ encoding_settings = {{kConstantRateQvga64Kbps30Fps, kLowQuality}};
+ for (const CodecInfo& codec : codecs) {
+ for (const VideoInfo& video : videos) {
+ for (const auto& es : encoding_settings) {
+ EncodeDecodeTestParams p;
+ p.codec = codec;
+ p.video = video;
+ p.encoding_settings = es.first;
+ p.quality_expectations = es.second;
+ test_params.push_back(p);
+ }
+ }
+ }
+ return test_params;
+}
+
+INSTANTIATE_TEST_SUITE_P(ConstantRate,
+ EncodeDecodeTest,
+ ::testing::ValuesIn(ConstantRateTestParameters()),
+ EncodeDecodeTest::TestParametersToStr);
+} // namespace test
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/test/video_codec_tester_impl.cc b/third_party/libwebrtc/modules/video_coding/codecs/test/video_codec_tester_impl.cc
new file mode 100644
index 0000000000..3000c1adee
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/test/video_codec_tester_impl.cc
@@ -0,0 +1,325 @@
+/*
+ * Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/codecs/test/video_codec_tester_impl.h"
+
+#include <map>
+#include <memory>
+#include <utility>
+
+#include "api/task_queue/default_task_queue_factory.h"
+#include "api/units/frequency.h"
+#include "api/units/time_delta.h"
+#include "api/units/timestamp.h"
+#include "api/video/encoded_image.h"
+#include "api/video/i420_buffer.h"
+#include "api/video/video_frame.h"
+#include "modules/video_coding/codecs/test/video_codec_analyzer.h"
+#include "rtc_base/event.h"
+#include "rtc_base/time_utils.h"
+#include "system_wrappers/include/sleep.h"
+
+namespace webrtc {
+namespace test {
+
+namespace {
+using RawVideoSource = VideoCodecTester::RawVideoSource;
+using CodedVideoSource = VideoCodecTester::CodedVideoSource;
+using Decoder = VideoCodecTester::Decoder;
+using Encoder = VideoCodecTester::Encoder;
+using EncoderSettings = VideoCodecTester::EncoderSettings;
+using DecoderSettings = VideoCodecTester::DecoderSettings;
+using PacingSettings = VideoCodecTester::PacingSettings;
+using PacingMode = PacingSettings::PacingMode;
+
+constexpr Frequency k90kHz = Frequency::Hertz(90000);
+
+// A thread-safe wrapper for video source to be shared with the quality analyzer
+// that reads reference frames from a separate thread.
+class SyncRawVideoSource : public VideoCodecAnalyzer::ReferenceVideoSource {
+ public:
+ explicit SyncRawVideoSource(std::unique_ptr<RawVideoSource> video_source)
+ : video_source_(std::move(video_source)) {}
+
+ absl::optional<VideoFrame> PullFrame() {
+ MutexLock lock(&mutex_);
+ return video_source_->PullFrame();
+ }
+
+ VideoFrame GetFrame(uint32_t timestamp_rtp, Resolution resolution) override {
+ MutexLock lock(&mutex_);
+ return video_source_->GetFrame(timestamp_rtp, resolution);
+ }
+
+ protected:
+ std::unique_ptr<RawVideoSource> video_source_ RTC_GUARDED_BY(mutex_);
+ Mutex mutex_;
+};
+
+// Pacer calculates delay necessary to keep frame encode or decode call spaced
+// from the previous calls by the pacing time. `Delay` is expected to be called
+// as close as possible to posting frame encode or decode task. This class is
+// not thread safe.
+class Pacer {
+ public:
+ explicit Pacer(PacingSettings settings)
+ : settings_(settings), delay_(TimeDelta::Zero()) {}
+ TimeDelta Delay(Timestamp beat) {
+ if (settings_.mode == PacingMode::kNoPacing) {
+ return TimeDelta::Zero();
+ }
+
+ Timestamp now = Timestamp::Micros(rtc::TimeMicros());
+ if (prev_time_.has_value()) {
+ delay_ += PacingTime(beat);
+ delay_ -= (now - *prev_time_);
+ if (delay_.ns() < 0) {
+ delay_ = TimeDelta::Zero();
+ }
+ }
+
+ prev_beat_ = beat;
+ prev_time_ = now;
+ return delay_;
+ }
+
+ private:
+ TimeDelta PacingTime(Timestamp beat) {
+ if (settings_.mode == PacingMode::kRealTime) {
+ return beat - *prev_beat_;
+ }
+ RTC_CHECK_EQ(PacingMode::kConstantRate, settings_.mode);
+ return 1 / settings_.constant_rate;
+ }
+
+ PacingSettings settings_;
+ absl::optional<Timestamp> prev_beat_;
+ absl::optional<Timestamp> prev_time_;
+ TimeDelta delay_;
+};
+
+// Task queue that keeps the number of queued tasks below a certain limit. If
+// the limit is reached, posting of a next task is blocked until execution of a
+// previously posted task starts. This class is not thread-safe.
+class LimitedTaskQueue {
+ public:
+ // The codec tester reads frames from video source in the main thread.
+ // Encoding and decoding are done in separate threads. If encoding or
+ // decoding is slow, the reading may go far ahead and may buffer too many
+ // frames in memory. To prevent this we limit the encoding/decoding queue
+ // size. When the queue is full, the main thread and, hence, reading frames
+ // from video source is blocked until a previously posted encoding/decoding
+ // task starts.
+ static constexpr int kMaxTaskQueueSize = 3;
+
+ explicit LimitedTaskQueue(rtc::TaskQueue& task_queue)
+ : task_queue_(task_queue), queue_size_(0) {}
+
+ void PostDelayedTask(absl::AnyInvocable<void() &&> task, TimeDelta delay) {
+ ++queue_size_;
+ task_queue_.PostDelayedTask(
+ [this, task = std::move(task)]() mutable {
+ std::move(task)();
+ --queue_size_;
+ task_executed_.Set();
+ },
+ delay);
+
+ task_executed_.Reset();
+ if (queue_size_ > kMaxTaskQueueSize) {
+ task_executed_.Wait(rtc::Event::kForever);
+ }
+ RTC_CHECK(queue_size_ <= kMaxTaskQueueSize);
+ }
+
+ void WaitForPreviouslyPostedTasks() {
+ while (queue_size_ > 0) {
+ task_executed_.Wait(rtc::Event::kForever);
+ task_executed_.Reset();
+ }
+ }
+
+ rtc::TaskQueue& task_queue_;
+ std::atomic_int queue_size_;
+ rtc::Event task_executed_;
+};
+
+class TesterDecoder {
+ public:
+ TesterDecoder(std::unique_ptr<Decoder> decoder,
+ VideoCodecAnalyzer* analyzer,
+ const DecoderSettings& settings,
+ rtc::TaskQueue& task_queue)
+ : decoder_(std::move(decoder)),
+ analyzer_(analyzer),
+ settings_(settings),
+ pacer_(settings.pacing),
+ task_queue_(task_queue) {
+ RTC_CHECK(analyzer_) << "Analyzer must be provided";
+ }
+
+ void Decode(const EncodedImage& frame) {
+ Timestamp timestamp = Timestamp::Micros((frame.Timestamp() / k90kHz).us());
+
+ task_queue_.PostDelayedTask(
+ [this, frame] {
+ analyzer_->StartDecode(frame);
+ decoder_->Decode(frame, [this](const VideoFrame& decoded_frame) {
+ this->analyzer_->FinishDecode(decoded_frame, /*spatial_idx=*/0);
+ });
+ },
+ pacer_.Delay(timestamp));
+ }
+
+ void Flush() { task_queue_.WaitForPreviouslyPostedTasks(); }
+
+ protected:
+ std::unique_ptr<Decoder> decoder_;
+ VideoCodecAnalyzer* const analyzer_;
+ const DecoderSettings& settings_;
+ Pacer pacer_;
+ LimitedTaskQueue task_queue_;
+};
+
+class TesterEncoder {
+ public:
+ TesterEncoder(std::unique_ptr<Encoder> encoder,
+ TesterDecoder* decoder,
+ VideoCodecAnalyzer* analyzer,
+ const EncoderSettings& settings,
+ rtc::TaskQueue& task_queue)
+ : encoder_(std::move(encoder)),
+ decoder_(decoder),
+ analyzer_(analyzer),
+ settings_(settings),
+ pacer_(settings.pacing),
+ task_queue_(task_queue) {
+ RTC_CHECK(analyzer_) << "Analyzer must be provided";
+ }
+
+ void Encode(const VideoFrame& frame) {
+ Timestamp timestamp = Timestamp::Micros((frame.timestamp() / k90kHz).us());
+
+ task_queue_.PostDelayedTask(
+ [this, frame] {
+ analyzer_->StartEncode(frame);
+ encoder_->Encode(frame, [this](const EncodedImage& encoded_frame) {
+ this->analyzer_->FinishEncode(encoded_frame);
+ if (decoder_ != nullptr) {
+ this->decoder_->Decode(encoded_frame);
+ }
+ });
+ },
+ pacer_.Delay(timestamp));
+ }
+
+ void Flush() { task_queue_.WaitForPreviouslyPostedTasks(); }
+
+ protected:
+ std::unique_ptr<Encoder> encoder_;
+ TesterDecoder* const decoder_;
+ VideoCodecAnalyzer* const analyzer_;
+ const EncoderSettings& settings_;
+ Pacer pacer_;
+ LimitedTaskQueue task_queue_;
+};
+
+} // namespace
+
+VideoCodecTesterImpl::VideoCodecTesterImpl()
+ : VideoCodecTesterImpl(/*task_queue_factory=*/nullptr) {}
+
+VideoCodecTesterImpl::VideoCodecTesterImpl(TaskQueueFactory* task_queue_factory)
+ : task_queue_factory_(task_queue_factory) {
+ if (task_queue_factory_ == nullptr) {
+ owned_task_queue_factory_ = CreateDefaultTaskQueueFactory();
+ task_queue_factory_ = owned_task_queue_factory_.get();
+ }
+}
+
+std::unique_ptr<VideoCodecTestStats> VideoCodecTesterImpl::RunDecodeTest(
+ std::unique_ptr<CodedVideoSource> video_source,
+ std::unique_ptr<Decoder> decoder,
+ const DecoderSettings& decoder_settings) {
+ rtc::TaskQueue analyser_task_queue(task_queue_factory_->CreateTaskQueue(
+ "Analyzer", TaskQueueFactory::Priority::NORMAL));
+ rtc::TaskQueue decoder_task_queue(task_queue_factory_->CreateTaskQueue(
+ "Decoder", TaskQueueFactory::Priority::NORMAL));
+
+ VideoCodecAnalyzer perf_analyzer(analyser_task_queue);
+ TesterDecoder tester_decoder(std::move(decoder), &perf_analyzer,
+ decoder_settings, decoder_task_queue);
+
+ while (auto frame = video_source->PullFrame()) {
+ tester_decoder.Decode(*frame);
+ }
+
+ tester_decoder.Flush();
+
+ return perf_analyzer.GetStats();
+}
+
+std::unique_ptr<VideoCodecTestStats> VideoCodecTesterImpl::RunEncodeTest(
+ std::unique_ptr<RawVideoSource> video_source,
+ std::unique_ptr<Encoder> encoder,
+ const EncoderSettings& encoder_settings) {
+ rtc::TaskQueue analyser_task_queue(task_queue_factory_->CreateTaskQueue(
+ "Analyzer", TaskQueueFactory::Priority::NORMAL));
+ rtc::TaskQueue encoder_task_queue(task_queue_factory_->CreateTaskQueue(
+ "Encoder", TaskQueueFactory::Priority::NORMAL));
+
+ SyncRawVideoSource sync_source(std::move(video_source));
+ VideoCodecAnalyzer perf_analyzer(analyser_task_queue);
+ TesterEncoder tester_encoder(std::move(encoder), /*decoder=*/nullptr,
+ &perf_analyzer, encoder_settings,
+ encoder_task_queue);
+
+ while (auto frame = sync_source.PullFrame()) {
+ tester_encoder.Encode(*frame);
+ }
+
+ tester_encoder.Flush();
+
+ return perf_analyzer.GetStats();
+}
+
+std::unique_ptr<VideoCodecTestStats> VideoCodecTesterImpl::RunEncodeDecodeTest(
+ std::unique_ptr<RawVideoSource> video_source,
+ std::unique_ptr<Encoder> encoder,
+ std::unique_ptr<Decoder> decoder,
+ const EncoderSettings& encoder_settings,
+ const DecoderSettings& decoder_settings) {
+ rtc::TaskQueue analyser_task_queue(task_queue_factory_->CreateTaskQueue(
+ "Analyzer", TaskQueueFactory::Priority::NORMAL));
+ rtc::TaskQueue decoder_task_queue(task_queue_factory_->CreateTaskQueue(
+ "Decoder", TaskQueueFactory::Priority::NORMAL));
+ rtc::TaskQueue encoder_task_queue(task_queue_factory_->CreateTaskQueue(
+ "Encoder", TaskQueueFactory::Priority::NORMAL));
+
+ SyncRawVideoSource sync_source(std::move(video_source));
+ VideoCodecAnalyzer perf_analyzer(analyser_task_queue, &sync_source);
+ TesterDecoder tester_decoder(std::move(decoder), &perf_analyzer,
+ decoder_settings, decoder_task_queue);
+ TesterEncoder tester_encoder(std::move(encoder), &tester_decoder,
+ &perf_analyzer, encoder_settings,
+ encoder_task_queue);
+
+ while (auto frame = sync_source.PullFrame()) {
+ tester_encoder.Encode(*frame);
+ }
+
+ tester_encoder.Flush();
+ tester_decoder.Flush();
+
+ return perf_analyzer.GetStats();
+}
+
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/test/video_codec_tester_impl.h b/third_party/libwebrtc/modules/video_coding/codecs/test/video_codec_tester_impl.h
new file mode 100644
index 0000000000..b64adeb882
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/test/video_codec_tester_impl.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_CODECS_TEST_VIDEO_CODEC_TESTER_IMPL_H_
+#define MODULES_VIDEO_CODING_CODECS_TEST_VIDEO_CODEC_TESTER_IMPL_H_
+
+#include <memory>
+
+#include "api/task_queue/task_queue_factory.h"
+#include "api/test/video_codec_tester.h"
+
+namespace webrtc {
+namespace test {
+
+// A stateless implementation of `VideoCodecTester`. This class is thread safe.
+class VideoCodecTesterImpl : public VideoCodecTester {
+ public:
+ VideoCodecTesterImpl();
+ explicit VideoCodecTesterImpl(TaskQueueFactory* task_queue_factory);
+
+ std::unique_ptr<VideoCodecTestStats> RunDecodeTest(
+ std::unique_ptr<CodedVideoSource> video_source,
+ std::unique_ptr<Decoder> decoder,
+ const DecoderSettings& decoder_settings) override;
+
+ std::unique_ptr<VideoCodecTestStats> RunEncodeTest(
+ std::unique_ptr<RawVideoSource> video_source,
+ std::unique_ptr<Encoder> encoder,
+ const EncoderSettings& encoder_settings) override;
+
+ std::unique_ptr<VideoCodecTestStats> RunEncodeDecodeTest(
+ std::unique_ptr<RawVideoSource> video_source,
+ std::unique_ptr<Encoder> encoder,
+ std::unique_ptr<Decoder> decoder,
+ const EncoderSettings& encoder_settings,
+ const DecoderSettings& decoder_settings) override;
+
+ protected:
+ std::unique_ptr<TaskQueueFactory> owned_task_queue_factory_;
+ TaskQueueFactory* task_queue_factory_;
+};
+
+} // namespace test
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_CODECS_TEST_VIDEO_CODEC_TESTER_IMPL_H_
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/test/video_codec_tester_impl_unittest.cc b/third_party/libwebrtc/modules/video_coding/codecs/test/video_codec_tester_impl_unittest.cc
new file mode 100644
index 0000000000..29fb006fb5
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/test/video_codec_tester_impl_unittest.cc
@@ -0,0 +1,259 @@
+/*
+ * Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/codecs/test/video_codec_tester_impl.h"
+
+#include <memory>
+#include <tuple>
+#include <utility>
+#include <vector>
+
+#include "api/task_queue/task_queue_factory.h"
+#include "api/task_queue/test/mock_task_queue_base.h"
+#include "api/units/frequency.h"
+#include "api/units/time_delta.h"
+#include "api/video/encoded_image.h"
+#include "api/video/i420_buffer.h"
+#include "api/video/video_frame.h"
+#include "rtc_base/fake_clock.h"
+#include "rtc_base/gunit.h"
+#include "rtc_base/task_queue_for_test.h"
+#include "rtc_base/time_utils.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace test {
+
+namespace {
+using ::testing::_;
+using ::testing::Invoke;
+using ::testing::InvokeWithoutArgs;
+using ::testing::Return;
+
+using Decoder = VideoCodecTester::Decoder;
+using Encoder = VideoCodecTester::Encoder;
+using CodedVideoSource = VideoCodecTester::CodedVideoSource;
+using RawVideoSource = VideoCodecTester::RawVideoSource;
+using DecoderSettings = VideoCodecTester::DecoderSettings;
+using EncoderSettings = VideoCodecTester::EncoderSettings;
+using PacingSettings = VideoCodecTester::PacingSettings;
+using PacingMode = PacingSettings::PacingMode;
+
+constexpr Frequency k90kHz = Frequency::Hertz(90000);
+
+VideoFrame CreateVideoFrame(uint32_t timestamp_rtp) {
+ rtc::scoped_refptr<I420Buffer> buffer(I420Buffer::Create(2, 2));
+ return VideoFrame::Builder()
+ .set_video_frame_buffer(buffer)
+ .set_timestamp_rtp(timestamp_rtp)
+ .build();
+}
+
+EncodedImage CreateEncodedImage(uint32_t timestamp_rtp) {
+ EncodedImage encoded_image;
+ encoded_image.SetTimestamp(timestamp_rtp);
+ return encoded_image;
+}
+
+class MockRawVideoSource : public RawVideoSource {
+ public:
+ MOCK_METHOD(absl::optional<VideoFrame>, PullFrame, (), (override));
+ MOCK_METHOD(VideoFrame,
+ GetFrame,
+ (uint32_t timestamp_rtp, Resolution),
+ (override));
+};
+
+class MockCodedVideoSource : public CodedVideoSource {
+ public:
+ MOCK_METHOD(absl::optional<EncodedImage>, PullFrame, (), (override));
+};
+
+class MockDecoder : public Decoder {
+ public:
+ MOCK_METHOD(void,
+ Decode,
+ (const EncodedImage& frame, DecodeCallback callback),
+ (override));
+};
+
+class MockEncoder : public Encoder {
+ public:
+ MOCK_METHOD(void,
+ Encode,
+ (const VideoFrame& frame, EncodeCallback callback),
+ (override));
+};
+
+class MockTaskQueueFactory : public TaskQueueFactory {
+ public:
+ explicit MockTaskQueueFactory(TaskQueueBase& task_queue)
+ : task_queue_(task_queue) {}
+
+ std::unique_ptr<TaskQueueBase, TaskQueueDeleter> CreateTaskQueue(
+ absl::string_view name,
+ Priority priority) const override {
+ return std::unique_ptr<TaskQueueBase, TaskQueueDeleter>(&task_queue_);
+ }
+
+ protected:
+ TaskQueueBase& task_queue_;
+};
+} // namespace
+
+class VideoCodecTesterImplPacingTest
+ : public ::testing::TestWithParam<std::tuple<PacingSettings,
+ std::vector<int>,
+ std::vector<int>,
+ std::vector<int>>> {
+ public:
+ VideoCodecTesterImplPacingTest()
+ : pacing_settings_(std::get<0>(GetParam())),
+ frame_timestamp_ms_(std::get<1>(GetParam())),
+ frame_capture_delay_ms_(std::get<2>(GetParam())),
+ expected_frame_start_ms_(std::get<3>(GetParam())),
+ num_frames_(frame_timestamp_ms_.size()),
+ task_queue_factory_(task_queue_) {}
+
+ void SetUp() override {
+ ON_CALL(task_queue_, PostTask)
+ .WillByDefault(Invoke(
+ [](absl::AnyInvocable<void() &&> task) { std::move(task)(); }));
+
+ ON_CALL(task_queue_, PostDelayedTask)
+ .WillByDefault(
+ Invoke([&](absl::AnyInvocable<void() &&> task, TimeDelta delay) {
+ clock_.AdvanceTime(delay);
+ std::move(task)();
+ }));
+ }
+
+ protected:
+ PacingSettings pacing_settings_;
+ std::vector<int> frame_timestamp_ms_;
+ std::vector<int> frame_capture_delay_ms_;
+ std::vector<int> expected_frame_start_ms_;
+ size_t num_frames_;
+
+ rtc::ScopedFakeClock clock_;
+ MockTaskQueueBase task_queue_;
+ MockTaskQueueFactory task_queue_factory_;
+};
+
+TEST_P(VideoCodecTesterImplPacingTest, PaceEncode) {
+ auto video_source = std::make_unique<MockRawVideoSource>();
+
+ size_t frame_num = 0;
+ EXPECT_CALL(*video_source, PullFrame).WillRepeatedly(Invoke([&]() mutable {
+ if (frame_num >= num_frames_) {
+ return absl::optional<VideoFrame>();
+ }
+ clock_.AdvanceTime(TimeDelta::Millis(frame_capture_delay_ms_[frame_num]));
+
+ uint32_t timestamp_rtp = frame_timestamp_ms_[frame_num] * k90kHz.hertz() /
+ rtc::kNumMillisecsPerSec;
+ ++frame_num;
+ return absl::optional<VideoFrame>(CreateVideoFrame(timestamp_rtp));
+ }));
+
+ auto encoder = std::make_unique<MockEncoder>();
+ EncoderSettings encoder_settings;
+ encoder_settings.pacing = pacing_settings_;
+
+ VideoCodecTesterImpl tester(&task_queue_factory_);
+ auto fs = tester
+ .RunEncodeTest(std::move(video_source), std::move(encoder),
+ encoder_settings)
+ ->GetFrameStatistics();
+ ASSERT_EQ(fs.size(), num_frames_);
+
+ for (size_t i = 0; i < fs.size(); ++i) {
+ int encode_start_ms = (fs[i].encode_start_ns - fs[0].encode_start_ns) /
+ rtc::kNumNanosecsPerMillisec;
+ EXPECT_NEAR(encode_start_ms, expected_frame_start_ms_[i], 10);
+ }
+}
+
+TEST_P(VideoCodecTesterImplPacingTest, PaceDecode) {
+ auto video_source = std::make_unique<MockCodedVideoSource>();
+
+ size_t frame_num = 0;
+ EXPECT_CALL(*video_source, PullFrame).WillRepeatedly(Invoke([&]() mutable {
+ if (frame_num >= num_frames_) {
+ return absl::optional<EncodedImage>();
+ }
+ clock_.AdvanceTime(TimeDelta::Millis(frame_capture_delay_ms_[frame_num]));
+
+ uint32_t timestamp_rtp = frame_timestamp_ms_[frame_num] * k90kHz.hertz() /
+ rtc::kNumMillisecsPerSec;
+ ++frame_num;
+ return absl::optional<EncodedImage>(CreateEncodedImage(timestamp_rtp));
+ }));
+
+ auto decoder = std::make_unique<MockDecoder>();
+ DecoderSettings decoder_settings;
+ decoder_settings.pacing = pacing_settings_;
+
+ VideoCodecTesterImpl tester(&task_queue_factory_);
+ auto fs = tester
+ .RunDecodeTest(std::move(video_source), std::move(decoder),
+ decoder_settings)
+ ->GetFrameStatistics();
+ ASSERT_EQ(fs.size(), num_frames_);
+
+ for (size_t i = 0; i < fs.size(); ++i) {
+ int decode_start_ms = (fs[i].decode_start_ns - fs[0].decode_start_ns) /
+ rtc::kNumNanosecsPerMillisec;
+ EXPECT_NEAR(decode_start_ms, expected_frame_start_ms_[i], 10);
+ }
+}
+
+INSTANTIATE_TEST_SUITE_P(
+ All,
+ VideoCodecTesterImplPacingTest,
+ ::testing::ValuesIn(
+ {std::make_tuple(PacingSettings({.mode = PacingMode::kNoPacing}),
+ /*frame_timestamp_ms=*/std::vector<int>{0, 100},
+ /*frame_capture_delay_ms=*/std::vector<int>{0, 0},
+ /*expected_frame_start_ms=*/std::vector<int>{0, 0}),
+ // Pace with rate equal to the source frame rate. Frames are captured
+ // instantly. Verify that frames are paced with the source frame rate.
+ std::make_tuple(PacingSettings({.mode = PacingMode::kRealTime}),
+ /*frame_timestamp_ms=*/std::vector<int>{0, 100},
+ /*frame_capture_delay_ms=*/std::vector<int>{0, 0},
+ /*expected_frame_start_ms=*/std::vector<int>{0, 100}),
+ // Pace with rate equal to the source frame rate. Frame capture is
+ // delayed by more than pacing time. Verify that no extra delay is
+ // added.
+ std::make_tuple(PacingSettings({.mode = PacingMode::kRealTime}),
+ /*frame_timestamp_ms=*/std::vector<int>{0, 100},
+ /*frame_capture_delay_ms=*/std::vector<int>{0, 200},
+ /*expected_frame_start_ms=*/std::vector<int>{0, 200}),
+ // Pace with constant rate less then source frame rate. Frames are
+ // captured instantly. Verify that frames are paced with the requested
+ // constant rate.
+ std::make_tuple(
+ PacingSettings({.mode = PacingMode::kConstantRate,
+ .constant_rate = Frequency::Hertz(20)}),
+ /*frame_timestamp_ms=*/std::vector<int>{0, 100},
+ /*frame_capture_delay_ms=*/std::vector<int>{0, 0},
+ /*expected_frame_start_ms=*/std::vector<int>{0, 50}),
+ // Pace with constant rate less then source frame rate. Frame capture
+ // is delayed by more than the pacing time. Verify that no extra delay
+ // is added.
+ std::make_tuple(
+ PacingSettings({.mode = PacingMode::kConstantRate,
+ .constant_rate = Frequency::Hertz(20)}),
+ /*frame_timestamp_ms=*/std::vector<int>{0, 100},
+ /*frame_capture_delay_ms=*/std::vector<int>{0, 200},
+ /*expected_frame_start_ms=*/std::vector<int>{0, 200})}));
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/test/video_codec_unittest.cc b/third_party/libwebrtc/modules/video_coding/codecs/test/video_codec_unittest.cc
new file mode 100644
index 0000000000..a4a8b253fc
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/test/video_codec_unittest.cc
@@ -0,0 +1,182 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/codecs/test/video_codec_unittest.h"
+
+#include <utility>
+
+#include "api/test/create_frame_generator.h"
+#include "api/video_codecs/video_encoder.h"
+#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
+#include "modules/video_coding/include/video_error_codes.h"
+#include "test/video_codec_settings.h"
+
+static constexpr webrtc::TimeDelta kEncodeTimeout =
+ webrtc::TimeDelta::Millis(100);
+static constexpr webrtc::TimeDelta kDecodeTimeout =
+ webrtc::TimeDelta::Millis(25);
+// Set bitrate to get higher quality.
+static const int kStartBitrate = 300;
+static const int kMaxBitrate = 4000;
+static const int kWidth = 176; // Width of the input image.
+static const int kHeight = 144; // Height of the input image.
+static const int kMaxFramerate = 30; // Arbitrary value.
+
+namespace webrtc {
+namespace {
+const VideoEncoder::Capabilities kCapabilities(false);
+}
+
+EncodedImageCallback::Result
+VideoCodecUnitTest::FakeEncodeCompleteCallback::OnEncodedImage(
+ const EncodedImage& frame,
+ const CodecSpecificInfo* codec_specific_info) {
+ MutexLock lock(&test_->encoded_frame_section_);
+ test_->encoded_frames_.push_back(frame);
+ RTC_DCHECK(codec_specific_info);
+ test_->codec_specific_infos_.push_back(*codec_specific_info);
+ if (!test_->wait_for_encoded_frames_threshold_) {
+ test_->encoded_frame_event_.Set();
+ return Result(Result::OK);
+ }
+
+ if (test_->encoded_frames_.size() ==
+ test_->wait_for_encoded_frames_threshold_) {
+ test_->wait_for_encoded_frames_threshold_ = 1;
+ test_->encoded_frame_event_.Set();
+ }
+ return Result(Result::OK);
+}
+
+void VideoCodecUnitTest::FakeDecodeCompleteCallback::Decoded(
+ VideoFrame& frame,
+ absl::optional<int32_t> decode_time_ms,
+ absl::optional<uint8_t> qp) {
+ MutexLock lock(&test_->decoded_frame_section_);
+ test_->decoded_frame_.emplace(frame);
+ test_->decoded_qp_ = qp;
+ test_->decoded_frame_event_.Set();
+}
+
+void VideoCodecUnitTest::SetUp() {
+ webrtc::test::CodecSettings(kVideoCodecVP8, &codec_settings_);
+ codec_settings_.startBitrate = kStartBitrate;
+ codec_settings_.maxBitrate = kMaxBitrate;
+ codec_settings_.maxFramerate = kMaxFramerate;
+ codec_settings_.width = kWidth;
+ codec_settings_.height = kHeight;
+
+ ModifyCodecSettings(&codec_settings_);
+
+ input_frame_generator_ = test::CreateSquareFrameGenerator(
+ codec_settings_.width, codec_settings_.height,
+ test::FrameGeneratorInterface::OutputType::kI420, absl::optional<int>());
+
+ encoder_ = CreateEncoder();
+ decoder_ = CreateDecoder();
+ encoder_->RegisterEncodeCompleteCallback(&encode_complete_callback_);
+ decoder_->RegisterDecodeCompleteCallback(&decode_complete_callback_);
+
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->InitEncode(
+ &codec_settings_,
+ VideoEncoder::Settings(kCapabilities, 1 /* number of cores */,
+ 0 /* max payload size (unused) */)));
+
+ VideoDecoder::Settings decoder_settings;
+ decoder_settings.set_codec_type(codec_settings_.codecType);
+ decoder_settings.set_max_render_resolution(
+ {codec_settings_.width, codec_settings_.height});
+ EXPECT_TRUE(decoder_->Configure(decoder_settings));
+}
+
+void VideoCodecUnitTest::ModifyCodecSettings(VideoCodec* codec_settings) {}
+
+VideoFrame VideoCodecUnitTest::NextInputFrame() {
+ test::FrameGeneratorInterface::VideoFrameData frame_data =
+ input_frame_generator_->NextFrame();
+ VideoFrame input_frame = VideoFrame::Builder()
+ .set_video_frame_buffer(frame_data.buffer)
+ .set_update_rect(frame_data.update_rect)
+ .build();
+
+ const uint32_t timestamp =
+ last_input_frame_timestamp_ +
+ kVideoPayloadTypeFrequency / codec_settings_.maxFramerate;
+ input_frame.set_timestamp(timestamp);
+
+ last_input_frame_timestamp_ = timestamp;
+ return input_frame;
+}
+
+bool VideoCodecUnitTest::WaitForEncodedFrame(
+ EncodedImage* frame,
+ CodecSpecificInfo* codec_specific_info) {
+ std::vector<EncodedImage> frames;
+ std::vector<CodecSpecificInfo> codec_specific_infos;
+ if (!WaitForEncodedFrames(&frames, &codec_specific_infos))
+ return false;
+ EXPECT_EQ(frames.size(), static_cast<size_t>(1));
+ EXPECT_EQ(frames.size(), codec_specific_infos.size());
+ *frame = frames[0];
+ *codec_specific_info = codec_specific_infos[0];
+ return true;
+}
+
+void VideoCodecUnitTest::SetWaitForEncodedFramesThreshold(size_t num_frames) {
+ MutexLock lock(&encoded_frame_section_);
+ wait_for_encoded_frames_threshold_ = num_frames;
+}
+
+bool VideoCodecUnitTest::WaitForEncodedFrames(
+ std::vector<EncodedImage>* frames,
+ std::vector<CodecSpecificInfo>* codec_specific_info) {
+ EXPECT_TRUE(encoded_frame_event_.Wait(kEncodeTimeout))
+ << "Timed out while waiting for encoded frame.";
+ // This becomes unsafe if there are multiple threads waiting for frames.
+ MutexLock lock(&encoded_frame_section_);
+ EXPECT_FALSE(encoded_frames_.empty());
+ EXPECT_FALSE(codec_specific_infos_.empty());
+ EXPECT_EQ(encoded_frames_.size(), codec_specific_infos_.size());
+ if (!encoded_frames_.empty()) {
+ *frames = encoded_frames_;
+ encoded_frames_.clear();
+ RTC_DCHECK(!codec_specific_infos_.empty());
+ *codec_specific_info = codec_specific_infos_;
+ codec_specific_infos_.clear();
+ return true;
+ } else {
+ return false;
+ }
+}
+
+bool VideoCodecUnitTest::WaitForDecodedFrame(std::unique_ptr<VideoFrame>* frame,
+ absl::optional<uint8_t>* qp) {
+ bool ret = decoded_frame_event_.Wait(kDecodeTimeout);
+ EXPECT_TRUE(ret) << "Timed out while waiting for a decoded frame.";
+ // This becomes unsafe if there are multiple threads waiting for frames.
+ MutexLock lock(&decoded_frame_section_);
+ EXPECT_TRUE(decoded_frame_);
+ if (decoded_frame_) {
+ frame->reset(new VideoFrame(std::move(*decoded_frame_)));
+ *qp = decoded_qp_;
+ decoded_frame_.reset();
+ return true;
+ } else {
+ return false;
+ }
+}
+
+size_t VideoCodecUnitTest::GetNumEncodedFrames() {
+ MutexLock lock(&encoded_frame_section_);
+ return encoded_frames_.size();
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/test/video_codec_unittest.h b/third_party/libwebrtc/modules/video_coding/codecs/test/video_codec_unittest.h
new file mode 100644
index 0000000000..7d05882b63
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/test/video_codec_unittest.h
@@ -0,0 +1,128 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_CODECS_TEST_VIDEO_CODEC_UNITTEST_H_
+#define MODULES_VIDEO_CODING_CODECS_TEST_VIDEO_CODEC_UNITTEST_H_
+
+#include <memory>
+#include <vector>
+
+#include "api/test/frame_generator_interface.h"
+#include "api/video_codecs/video_decoder.h"
+#include "api/video_codecs/video_encoder.h"
+#include "modules/video_coding/include/video_codec_interface.h"
+#include "modules/video_coding/utility/vp8_header_parser.h"
+#include "modules/video_coding/utility/vp9_uncompressed_header_parser.h"
+#include "rtc_base/event.h"
+#include "rtc_base/synchronization/mutex.h"
+#include "rtc_base/thread_annotations.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+class VideoCodecUnitTest : public ::testing::Test {
+ public:
+ VideoCodecUnitTest()
+ : encode_complete_callback_(this),
+ decode_complete_callback_(this),
+ wait_for_encoded_frames_threshold_(1),
+ last_input_frame_timestamp_(0) {}
+
+ protected:
+ class FakeEncodeCompleteCallback : public webrtc::EncodedImageCallback {
+ public:
+ explicit FakeEncodeCompleteCallback(VideoCodecUnitTest* test)
+ : test_(test) {}
+
+ Result OnEncodedImage(const EncodedImage& frame,
+ const CodecSpecificInfo* codec_specific_info);
+
+ private:
+ VideoCodecUnitTest* const test_;
+ };
+
+ class FakeDecodeCompleteCallback : public webrtc::DecodedImageCallback {
+ public:
+ explicit FakeDecodeCompleteCallback(VideoCodecUnitTest* test)
+ : test_(test) {}
+
+ int32_t Decoded(VideoFrame& frame) override {
+ RTC_DCHECK_NOTREACHED();
+ return -1;
+ }
+ int32_t Decoded(VideoFrame& frame, int64_t decode_time_ms) override {
+ RTC_DCHECK_NOTREACHED();
+ return -1;
+ }
+ void Decoded(VideoFrame& frame,
+ absl::optional<int32_t> decode_time_ms,
+ absl::optional<uint8_t> qp) override;
+
+ private:
+ VideoCodecUnitTest* const test_;
+ };
+
+ virtual std::unique_ptr<VideoEncoder> CreateEncoder() = 0;
+ virtual std::unique_ptr<VideoDecoder> CreateDecoder() = 0;
+
+ void SetUp() override;
+
+ virtual void ModifyCodecSettings(VideoCodec* codec_settings);
+
+ VideoFrame NextInputFrame();
+
+ // Helper method for waiting a single encoded frame.
+ bool WaitForEncodedFrame(EncodedImage* frame,
+ CodecSpecificInfo* codec_specific_info);
+
+ // Helper methods for waiting for multiple encoded frames. Caller must
+ // define how many frames are to be waited for via `num_frames` before calling
+ // Encode(). Then, they can expect to retrive them via WaitForEncodedFrames().
+ void SetWaitForEncodedFramesThreshold(size_t num_frames);
+ bool WaitForEncodedFrames(
+ std::vector<EncodedImage>* frames,
+ std::vector<CodecSpecificInfo>* codec_specific_info);
+
+ // Helper method for waiting a single decoded frame.
+ bool WaitForDecodedFrame(std::unique_ptr<VideoFrame>* frame,
+ absl::optional<uint8_t>* qp);
+
+ size_t GetNumEncodedFrames();
+
+ VideoCodec codec_settings_;
+
+ std::unique_ptr<VideoEncoder> encoder_;
+ std::unique_ptr<VideoDecoder> decoder_;
+ std::unique_ptr<test::FrameGeneratorInterface> input_frame_generator_;
+
+ private:
+ FakeEncodeCompleteCallback encode_complete_callback_;
+ FakeDecodeCompleteCallback decode_complete_callback_;
+
+ rtc::Event encoded_frame_event_;
+ Mutex encoded_frame_section_;
+ size_t wait_for_encoded_frames_threshold_;
+ std::vector<EncodedImage> encoded_frames_
+ RTC_GUARDED_BY(encoded_frame_section_);
+ std::vector<CodecSpecificInfo> codec_specific_infos_
+ RTC_GUARDED_BY(encoded_frame_section_);
+
+ rtc::Event decoded_frame_event_;
+ Mutex decoded_frame_section_;
+ absl::optional<VideoFrame> decoded_frame_
+ RTC_GUARDED_BY(decoded_frame_section_);
+ absl::optional<uint8_t> decoded_qp_ RTC_GUARDED_BY(decoded_frame_section_);
+
+ uint32_t last_input_frame_timestamp_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_CODECS_TEST_VIDEO_CODEC_UNITTEST_H_
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/test/video_encoder_decoder_instantiation_tests.cc b/third_party/libwebrtc/modules/video_coding/codecs/test/video_encoder_decoder_instantiation_tests.cc
new file mode 100644
index 0000000000..41f2304748
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/test/video_encoder_decoder_instantiation_tests.cc
@@ -0,0 +1,155 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <memory>
+#include <vector>
+
+#include "api/video_codecs/sdp_video_format.h"
+#include "api/video_codecs/video_decoder.h"
+#include "api/video_codecs/video_decoder_factory.h"
+#include "api/video_codecs/video_encoder.h"
+#include "api/video_codecs/video_encoder_factory.h"
+#if defined(WEBRTC_ANDROID)
+#include "modules/video_coding/codecs/test/android_codec_factory_helper.h"
+#elif defined(WEBRTC_IOS)
+#include "modules/video_coding/codecs/test/objc_codec_factory_helper.h"
+#endif
+#include "test/gmock.h"
+#include "test/gtest.h"
+#include "test/video_codec_settings.h"
+
+namespace webrtc {
+namespace test {
+
+namespace {
+
+using ::testing::NotNull;
+
+const VideoEncoder::Capabilities kCapabilities(false);
+
+int32_t InitEncoder(VideoCodecType codec_type, VideoEncoder* encoder) {
+ VideoCodec codec;
+ CodecSettings(codec_type, &codec);
+ codec.width = 640;
+ codec.height = 480;
+ codec.maxFramerate = 30;
+ RTC_CHECK(encoder);
+ return encoder->InitEncode(
+ &codec, VideoEncoder::Settings(kCapabilities, 1 /* number_of_cores */,
+ 1200 /* max_payload_size */));
+}
+
+VideoDecoder::Settings DecoderSettings(VideoCodecType codec_type) {
+ VideoDecoder::Settings settings;
+ settings.set_max_render_resolution({640, 480});
+ settings.set_codec_type(codec_type);
+ return settings;
+}
+
+} // namespace
+
+class VideoEncoderDecoderInstantiationTest
+ : public ::testing::Test,
+ public ::testing::WithParamInterface<::testing::tuple<int, int>> {
+ protected:
+ VideoEncoderDecoderInstantiationTest()
+ : vp8_format_("VP8"),
+ vp9_format_("VP9"),
+ h264cbp_format_("H264"),
+ num_encoders_(::testing::get<0>(GetParam())),
+ num_decoders_(::testing::get<1>(GetParam())) {
+#if defined(WEBRTC_ANDROID)
+ InitializeAndroidObjects();
+ encoder_factory_ = CreateAndroidEncoderFactory();
+ decoder_factory_ = CreateAndroidDecoderFactory();
+#elif defined(WEBRTC_IOS)
+ encoder_factory_ = CreateObjCEncoderFactory();
+ decoder_factory_ = CreateObjCDecoderFactory();
+#else
+ RTC_DCHECK_NOTREACHED() << "Only support Android and iOS.";
+#endif
+ }
+
+ ~VideoEncoderDecoderInstantiationTest() {
+ for (auto& encoder : encoders_) {
+ encoder->Release();
+ }
+ for (auto& decoder : decoders_) {
+ decoder->Release();
+ }
+ }
+
+ const SdpVideoFormat vp8_format_;
+ const SdpVideoFormat vp9_format_;
+ const SdpVideoFormat h264cbp_format_;
+ std::unique_ptr<VideoEncoderFactory> encoder_factory_;
+ std::unique_ptr<VideoDecoderFactory> decoder_factory_;
+
+ const int num_encoders_;
+ const int num_decoders_;
+ std::vector<std::unique_ptr<VideoEncoder>> encoders_;
+ std::vector<std::unique_ptr<VideoDecoder>> decoders_;
+};
+
+INSTANTIATE_TEST_SUITE_P(MultipleEncoders,
+ VideoEncoderDecoderInstantiationTest,
+ ::testing::Combine(::testing::Range(1, 4),
+ ::testing::Range(1, 2)));
+
+INSTANTIATE_TEST_SUITE_P(MultipleDecoders,
+ VideoEncoderDecoderInstantiationTest,
+ ::testing::Combine(::testing::Range(1, 2),
+ ::testing::Range(1, 9)));
+
+INSTANTIATE_TEST_SUITE_P(MultipleEncodersDecoders,
+ VideoEncoderDecoderInstantiationTest,
+ ::testing::Combine(::testing::Range(1, 4),
+ ::testing::Range(1, 9)));
+
+// TODO(brandtr): Check that the factories actually support the codecs before
+// trying to instantiate. Currently, we will just crash with a Java exception
+// if the factory does not support the codec.
+TEST_P(VideoEncoderDecoderInstantiationTest, DISABLED_InstantiateVp8Codecs) {
+ for (int i = 0; i < num_encoders_; ++i) {
+ std::unique_ptr<VideoEncoder> encoder =
+ encoder_factory_->CreateVideoEncoder(vp8_format_);
+ EXPECT_EQ(0, InitEncoder(kVideoCodecVP8, encoder.get()));
+ encoders_.emplace_back(std::move(encoder));
+ }
+
+ for (int i = 0; i < num_decoders_; ++i) {
+ std::unique_ptr<VideoDecoder> decoder =
+ decoder_factory_->CreateVideoDecoder(vp8_format_);
+ ASSERT_THAT(decoder, NotNull());
+ EXPECT_TRUE(decoder->Configure(DecoderSettings(kVideoCodecVP8)));
+ decoders_.emplace_back(std::move(decoder));
+ }
+}
+
+TEST_P(VideoEncoderDecoderInstantiationTest,
+ DISABLED_InstantiateH264CBPCodecs) {
+ for (int i = 0; i < num_encoders_; ++i) {
+ std::unique_ptr<VideoEncoder> encoder =
+ encoder_factory_->CreateVideoEncoder(h264cbp_format_);
+ EXPECT_EQ(0, InitEncoder(kVideoCodecH264, encoder.get()));
+ encoders_.emplace_back(std::move(encoder));
+ }
+
+ for (int i = 0; i < num_decoders_; ++i) {
+ std::unique_ptr<VideoDecoder> decoder =
+ decoder_factory_->CreateVideoDecoder(h264cbp_format_);
+ ASSERT_THAT(decoder, NotNull());
+ EXPECT_TRUE(decoder->Configure(DecoderSettings(kVideoCodecH264)));
+ decoders_.push_back(std::move(decoder));
+ }
+}
+
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/test/videocodec_test_av1.cc b/third_party/libwebrtc/modules/video_coding/codecs/test/videocodec_test_av1.cc
new file mode 100644
index 0000000000..9189f5abe5
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/test/videocodec_test_av1.cc
@@ -0,0 +1,101 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <memory>
+#include <vector>
+
+#include "api/test/create_videocodec_test_fixture.h"
+#include "api/test/video/function_video_encoder_factory.h"
+#include "api/video_codecs/sdp_video_format.h"
+#include "media/base/media_constants.h"
+#include "media/engine/internal_decoder_factory.h"
+#include "media/engine/internal_encoder_factory.h"
+#include "media/engine/simulcast_encoder_adapter.h"
+#include "test/gtest.h"
+#include "test/testsupport/file_utils.h"
+
+namespace webrtc {
+namespace test {
+namespace {
+// Test clips settings.
+constexpr int kCifWidth = 352;
+constexpr int kCifHeight = 288;
+constexpr int kNumFramesLong = 300;
+
+VideoCodecTestFixture::Config CreateConfig(std::string filename) {
+ VideoCodecTestFixture::Config config;
+ config.filename = filename;
+ config.filepath = ResourcePath(config.filename, "yuv");
+ config.num_frames = kNumFramesLong;
+ config.use_single_core = true;
+ return config;
+}
+
+TEST(VideoCodecTestAv1, HighBitrate) {
+ auto config = CreateConfig("foreman_cif");
+ config.SetCodecSettings(cricket::kAv1CodecName, 1, 1, 1, false, true, true,
+ kCifWidth, kCifHeight);
+ config.codec_settings.SetScalabilityMode(ScalabilityMode::kL1T1);
+ config.num_frames = kNumFramesLong;
+ auto fixture = CreateVideoCodecTestFixture(config);
+
+ std::vector<RateProfile> rate_profiles = {{500, 30, 0}};
+
+ std::vector<RateControlThresholds> rc_thresholds = {
+ {12, 1, 0, 1, 0.3, 0.1, 0, 1}};
+
+ std::vector<QualityThresholds> quality_thresholds = {{37, 34, 0.94, 0.91}};
+
+ fixture->RunTest(rate_profiles, &rc_thresholds, &quality_thresholds, nullptr);
+}
+
+TEST(VideoCodecTestAv1, VeryLowBitrate) {
+ auto config = CreateConfig("foreman_cif");
+ config.SetCodecSettings(cricket::kAv1CodecName, 1, 1, 1, false, true, true,
+ kCifWidth, kCifHeight);
+ config.codec_settings.SetScalabilityMode(ScalabilityMode::kL1T1);
+ auto fixture = CreateVideoCodecTestFixture(config);
+
+ std::vector<RateProfile> rate_profiles = {{50, 30, 0}};
+
+ std::vector<RateControlThresholds> rc_thresholds = {
+ {15, 8, 75, 2, 2, 2, 2, 1}};
+
+ std::vector<QualityThresholds> quality_thresholds = {{28, 24.8, 0.70, 0.55}};
+
+ fixture->RunTest(rate_profiles, &rc_thresholds, &quality_thresholds, nullptr);
+}
+
+#if !defined(WEBRTC_ANDROID)
+constexpr int kHdWidth = 1280;
+constexpr int kHdHeight = 720;
+TEST(VideoCodecTestAv1, Hd) {
+ auto config = CreateConfig("ConferenceMotion_1280_720_50");
+ config.SetCodecSettings(cricket::kAv1CodecName, 1, 1, 1, false, true, true,
+ kHdWidth, kHdHeight);
+ config.codec_settings.SetScalabilityMode(ScalabilityMode::kL1T1);
+ config.num_frames = kNumFramesLong;
+ auto fixture = CreateVideoCodecTestFixture(config);
+
+ std::vector<RateProfile> rate_profiles = {{1000, 50, 0}};
+
+ std::vector<RateControlThresholds> rc_thresholds = {
+ {13, 3, 0, 1, 0.3, 0.1, 0, 1}};
+
+ std::vector<QualityThresholds> quality_thresholds = {
+ {35.9, 31.5, 0.925, 0.865}};
+
+ fixture->RunTest(rate_profiles, &rc_thresholds, &quality_thresholds, nullptr);
+}
+#endif
+
+} // namespace
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/test/videocodec_test_fixture_config_unittest.cc b/third_party/libwebrtc/modules/video_coding/codecs/test/videocodec_test_fixture_config_unittest.cc
new file mode 100644
index 0000000000..126aa93ee8
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/test/videocodec_test_fixture_config_unittest.cc
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <stddef.h>
+
+#include "api/test/videocodec_test_fixture.h"
+#include "api/video_codecs/video_codec.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+#include "test/video_codec_settings.h"
+
+using ::testing::ElementsAre;
+
+namespace webrtc {
+namespace test {
+
+using Config = VideoCodecTestFixture::Config;
+
+namespace {
+const size_t kNumTemporalLayers = 2;
+} // namespace
+
+TEST(Config, NumberOfCoresWithUseSingleCore) {
+ Config config;
+ config.use_single_core = true;
+ EXPECT_EQ(1u, config.NumberOfCores());
+}
+
+TEST(Config, NumberOfCoresWithoutUseSingleCore) {
+ Config config;
+ config.use_single_core = false;
+ EXPECT_GE(config.NumberOfCores(), 1u);
+}
+
+TEST(Config, NumberOfTemporalLayersIsOne) {
+ Config config;
+ webrtc::test::CodecSettings(kVideoCodecH264, &config.codec_settings);
+ EXPECT_EQ(1u, config.NumberOfTemporalLayers());
+}
+
+TEST(Config, NumberOfTemporalLayers_Vp8) {
+ Config config;
+ webrtc::test::CodecSettings(kVideoCodecVP8, &config.codec_settings);
+ config.codec_settings.VP8()->numberOfTemporalLayers = kNumTemporalLayers;
+ EXPECT_EQ(kNumTemporalLayers, config.NumberOfTemporalLayers());
+}
+
+TEST(Config, NumberOfTemporalLayers_Vp9) {
+ Config config;
+ webrtc::test::CodecSettings(kVideoCodecVP9, &config.codec_settings);
+ config.codec_settings.VP9()->numberOfTemporalLayers = kNumTemporalLayers;
+ EXPECT_EQ(kNumTemporalLayers, config.NumberOfTemporalLayers());
+}
+
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/test/videocodec_test_fixture_impl.cc b/third_party/libwebrtc/modules/video_coding/codecs/test/videocodec_test_fixture_impl.cc
new file mode 100644
index 0000000000..e56e8a92af
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/test/videocodec_test_fixture_impl.cc
@@ -0,0 +1,860 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/codecs/test/videocodec_test_fixture_impl.h"
+
+#include <stdint.h>
+#include <stdio.h>
+
+#include <algorithm>
+#include <cmath>
+#include <memory>
+#include <string>
+#include <utility>
+#include <vector>
+
+#include "absl/strings/str_replace.h"
+#include "absl/strings/string_view.h"
+#include "absl/types/optional.h"
+#include "api/array_view.h"
+#include "api/test/metrics/global_metrics_logger_and_exporter.h"
+#include "api/test/metrics/metric.h"
+#include "api/transport/field_trial_based_config.h"
+#include "api/video/video_bitrate_allocation.h"
+#include "api/video_codecs/h264_profile_level_id.h"
+#include "api/video_codecs/sdp_video_format.h"
+#include "api/video_codecs/video_codec.h"
+#include "api/video_codecs/video_decoder.h"
+#include "api/video_codecs/video_decoder_factory_template.h"
+#include "api/video_codecs/video_decoder_factory_template_dav1d_adapter.h"
+#include "api/video_codecs/video_decoder_factory_template_libvpx_vp8_adapter.h"
+#include "api/video_codecs/video_decoder_factory_template_libvpx_vp9_adapter.h"
+#include "api/video_codecs/video_decoder_factory_template_open_h264_adapter.h"
+#include "api/video_codecs/video_encoder_factory.h"
+#include "api/video_codecs/video_encoder_factory_template.h"
+#include "api/video_codecs/video_encoder_factory_template_libaom_av1_adapter.h"
+#include "api/video_codecs/video_encoder_factory_template_libvpx_vp8_adapter.h"
+#include "api/video_codecs/video_encoder_factory_template_libvpx_vp9_adapter.h"
+#include "api/video_codecs/video_encoder_factory_template_open_h264_adapter.h"
+#include "common_video/h264/h264_common.h"
+#include "media/base/media_constants.h"
+#include "modules/video_coding/codecs/h264/include/h264_globals.h"
+#include "modules/video_coding/codecs/vp9/svc_config.h"
+#include "modules/video_coding/utility/ivf_file_writer.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/cpu_time.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/strings/string_builder.h"
+#include "rtc_base/time_utils.h"
+#include "system_wrappers/include/cpu_info.h"
+#include "system_wrappers/include/sleep.h"
+#include "test/gtest.h"
+#include "test/testsupport/file_utils.h"
+#include "test/testsupport/frame_writer.h"
+#include "test/video_codec_settings.h"
+#include "video/config/simulcast.h"
+#include "video/config/video_encoder_config.h"
+
+namespace webrtc {
+namespace test {
+namespace {
+
+using VideoStatistics = VideoCodecTestStats::VideoStatistics;
+
+const int kBaseKeyFrameInterval = 3000;
+const double kBitratePriority = 1.0;
+const int kDefaultMaxFramerateFps = 30;
+const int kMaxQp = 56;
+
+void ConfigureSimulcast(VideoCodec* codec_settings) {
+ FieldTrialBasedConfig trials;
+ const std::vector<webrtc::VideoStream> streams = cricket::GetSimulcastConfig(
+ /*min_layer=*/1, codec_settings->numberOfSimulcastStreams,
+ codec_settings->width, codec_settings->height, kBitratePriority, kMaxQp,
+ /* is_screenshare = */ false, true, trials);
+
+ for (size_t i = 0; i < streams.size(); ++i) {
+ SimulcastStream* ss = &codec_settings->simulcastStream[i];
+ ss->width = static_cast<uint16_t>(streams[i].width);
+ ss->height = static_cast<uint16_t>(streams[i].height);
+ ss->numberOfTemporalLayers =
+ static_cast<unsigned char>(*streams[i].num_temporal_layers);
+ ss->maxBitrate = streams[i].max_bitrate_bps / 1000;
+ ss->targetBitrate = streams[i].target_bitrate_bps / 1000;
+ ss->minBitrate = streams[i].min_bitrate_bps / 1000;
+ ss->qpMax = streams[i].max_qp;
+ ss->active = true;
+ }
+}
+
+void ConfigureSvc(VideoCodec* codec_settings) {
+ RTC_CHECK_EQ(kVideoCodecVP9, codec_settings->codecType);
+
+ const std::vector<SpatialLayer> layers = GetSvcConfig(
+ codec_settings->width, codec_settings->height, kDefaultMaxFramerateFps,
+ /*first_active_layer=*/0, codec_settings->VP9()->numberOfSpatialLayers,
+ codec_settings->VP9()->numberOfTemporalLayers,
+ /* is_screen_sharing = */ false);
+ ASSERT_EQ(codec_settings->VP9()->numberOfSpatialLayers, layers.size())
+ << "GetSvcConfig returned fewer spatial layers than configured.";
+
+ for (size_t i = 0; i < layers.size(); ++i) {
+ codec_settings->spatialLayers[i] = layers[i];
+ }
+}
+
+std::string CodecSpecificToString(const VideoCodec& codec) {
+ char buf[1024];
+ rtc::SimpleStringBuilder ss(buf);
+ switch (codec.codecType) {
+ case kVideoCodecVP8:
+ ss << "\nnum_temporal_layers: "
+ << static_cast<int>(codec.VP8().numberOfTemporalLayers);
+ ss << "\ndenoising: " << codec.VP8().denoisingOn;
+ ss << "\nautomatic_resize: " << codec.VP8().automaticResizeOn;
+ ss << "\nkey_frame_interval: " << codec.VP8().keyFrameInterval;
+ break;
+ case kVideoCodecVP9:
+ ss << "\nnum_temporal_layers: "
+ << static_cast<int>(codec.VP9().numberOfTemporalLayers);
+ ss << "\nnum_spatial_layers: "
+ << static_cast<int>(codec.VP9().numberOfSpatialLayers);
+ ss << "\ndenoising: " << codec.VP9().denoisingOn;
+ ss << "\nkey_frame_interval: " << codec.VP9().keyFrameInterval;
+ ss << "\nadaptive_qp_mode: " << codec.VP9().adaptiveQpMode;
+ ss << "\nautomatic_resize: " << codec.VP9().automaticResizeOn;
+ ss << "\nflexible_mode: " << codec.VP9().flexibleMode;
+ break;
+ case kVideoCodecH264:
+ ss << "\nkey_frame_interval: " << codec.H264().keyFrameInterval;
+ ss << "\nnum_temporal_layers: "
+ << static_cast<int>(codec.H264().numberOfTemporalLayers);
+ break;
+ default:
+ break;
+ }
+ return ss.str();
+}
+
+bool RunEncodeInRealTime(const VideoCodecTestFixtureImpl::Config& config) {
+ if (config.measure_cpu || config.encode_in_real_time) {
+ return true;
+ }
+ return false;
+}
+
+std::string FilenameWithParams(
+ const VideoCodecTestFixtureImpl::Config& config) {
+ return config.filename + "_" + config.CodecName() + "_" +
+ std::to_string(config.codec_settings.startBitrate);
+}
+
+SdpVideoFormat CreateSdpVideoFormat(
+ const VideoCodecTestFixtureImpl::Config& config) {
+ if (config.codec_settings.codecType == kVideoCodecH264) {
+ const char* packetization_mode =
+ config.h264_codec_settings.packetization_mode ==
+ H264PacketizationMode::NonInterleaved
+ ? "1"
+ : "0";
+ SdpVideoFormat::Parameters codec_params = {
+ {cricket::kH264FmtpProfileLevelId,
+ *H264ProfileLevelIdToString(H264ProfileLevelId(
+ config.h264_codec_settings.profile, H264Level::kLevel3_1))},
+ {cricket::kH264FmtpPacketizationMode, packetization_mode},
+ {cricket::kH264FmtpLevelAsymmetryAllowed, "1"}};
+
+ return SdpVideoFormat(config.codec_name, codec_params);
+ } else if (config.codec_settings.codecType == kVideoCodecVP9) {
+ return SdpVideoFormat(config.codec_name, {{"profile-id", "0"}});
+ }
+
+ return SdpVideoFormat(config.codec_name);
+}
+
+} // namespace
+
+VideoCodecTestFixtureImpl::Config::Config() = default;
+
+void VideoCodecTestFixtureImpl::Config::SetCodecSettings(
+ std::string codec_name,
+ size_t num_simulcast_streams,
+ size_t num_spatial_layers,
+ size_t num_temporal_layers,
+ bool denoising_on,
+ bool frame_dropper_on,
+ bool spatial_resize_on,
+ size_t width,
+ size_t height) {
+ this->codec_name = codec_name;
+ VideoCodecType codec_type = PayloadStringToCodecType(codec_name);
+ webrtc::test::CodecSettings(codec_type, &codec_settings);
+
+ // TODO(brandtr): Move the setting of `width` and `height` to the tests, and
+ // DCHECK that they are set before initializing the codec instead.
+ codec_settings.width = static_cast<uint16_t>(width);
+ codec_settings.height = static_cast<uint16_t>(height);
+
+ RTC_CHECK(num_simulcast_streams >= 1 &&
+ num_simulcast_streams <= kMaxSimulcastStreams);
+ RTC_CHECK(num_spatial_layers >= 1 && num_spatial_layers <= kMaxSpatialLayers);
+ RTC_CHECK(num_temporal_layers >= 1 &&
+ num_temporal_layers <= kMaxTemporalStreams);
+
+ // Simulcast is only available with VP8.
+ RTC_CHECK(num_simulcast_streams < 2 || codec_type == kVideoCodecVP8);
+
+ // Spatial scalability is only available with VP9.
+ RTC_CHECK(num_spatial_layers < 2 || codec_type == kVideoCodecVP9);
+
+ // Some base code requires numberOfSimulcastStreams to be set to zero
+ // when simulcast is not used.
+ codec_settings.numberOfSimulcastStreams =
+ num_simulcast_streams <= 1 ? 0
+ : static_cast<uint8_t>(num_simulcast_streams);
+
+ codec_settings.SetFrameDropEnabled(frame_dropper_on);
+ switch (codec_settings.codecType) {
+ case kVideoCodecVP8:
+ codec_settings.VP8()->numberOfTemporalLayers =
+ static_cast<uint8_t>(num_temporal_layers);
+ codec_settings.VP8()->denoisingOn = denoising_on;
+ codec_settings.VP8()->automaticResizeOn = spatial_resize_on;
+ codec_settings.VP8()->keyFrameInterval = kBaseKeyFrameInterval;
+ break;
+ case kVideoCodecVP9:
+ codec_settings.VP9()->numberOfTemporalLayers =
+ static_cast<uint8_t>(num_temporal_layers);
+ codec_settings.VP9()->denoisingOn = denoising_on;
+ codec_settings.VP9()->keyFrameInterval = kBaseKeyFrameInterval;
+ codec_settings.VP9()->automaticResizeOn = spatial_resize_on;
+ codec_settings.VP9()->numberOfSpatialLayers =
+ static_cast<uint8_t>(num_spatial_layers);
+ break;
+ case kVideoCodecAV1:
+ codec_settings.qpMax = 63;
+ break;
+ case kVideoCodecH264:
+ codec_settings.H264()->keyFrameInterval = kBaseKeyFrameInterval;
+ codec_settings.H264()->numberOfTemporalLayers =
+ static_cast<uint8_t>(num_temporal_layers);
+ break;
+ default:
+ break;
+ }
+
+ if (codec_settings.numberOfSimulcastStreams > 1) {
+ ConfigureSimulcast(&codec_settings);
+ } else if (codec_settings.codecType == kVideoCodecVP9 &&
+ codec_settings.VP9()->numberOfSpatialLayers > 1) {
+ ConfigureSvc(&codec_settings);
+ }
+}
+
+size_t VideoCodecTestFixtureImpl::Config::NumberOfCores() const {
+ return use_single_core ? 1 : CpuInfo::DetectNumberOfCores();
+}
+
+size_t VideoCodecTestFixtureImpl::Config::NumberOfTemporalLayers() const {
+ if (codec_settings.codecType == kVideoCodecVP8) {
+ return codec_settings.VP8().numberOfTemporalLayers;
+ } else if (codec_settings.codecType == kVideoCodecVP9) {
+ return codec_settings.VP9().numberOfTemporalLayers;
+ } else if (codec_settings.codecType == kVideoCodecH264) {
+ return codec_settings.H264().numberOfTemporalLayers;
+ } else {
+ return 1;
+ }
+}
+
+size_t VideoCodecTestFixtureImpl::Config::NumberOfSpatialLayers() const {
+ if (codec_settings.codecType == kVideoCodecVP9) {
+ return codec_settings.VP9().numberOfSpatialLayers;
+ } else {
+ return 1;
+ }
+}
+
+size_t VideoCodecTestFixtureImpl::Config::NumberOfSimulcastStreams() const {
+ return codec_settings.numberOfSimulcastStreams;
+}
+
+std::string VideoCodecTestFixtureImpl::Config::ToString() const {
+ std::string codec_type = CodecTypeToPayloadString(codec_settings.codecType);
+ rtc::StringBuilder ss;
+ ss << "test_name: " << test_name;
+ ss << "\nfilename: " << filename;
+ ss << "\nnum_frames: " << num_frames;
+ ss << "\nmax_payload_size_bytes: " << max_payload_size_bytes;
+ ss << "\ndecode: " << decode;
+ ss << "\nuse_single_core: " << use_single_core;
+ ss << "\nmeasure_cpu: " << measure_cpu;
+ ss << "\nnum_cores: " << NumberOfCores();
+ ss << "\ncodec_type: " << codec_type;
+ ss << "\n\n--> codec_settings";
+ ss << "\nwidth: " << codec_settings.width;
+ ss << "\nheight: " << codec_settings.height;
+ ss << "\nmax_framerate_fps: " << codec_settings.maxFramerate;
+ ss << "\nstart_bitrate_kbps: " << codec_settings.startBitrate;
+ ss << "\nmax_bitrate_kbps: " << codec_settings.maxBitrate;
+ ss << "\nmin_bitrate_kbps: " << codec_settings.minBitrate;
+ ss << "\nmax_qp: " << codec_settings.qpMax;
+ ss << "\nnum_simulcast_streams: "
+ << static_cast<int>(codec_settings.numberOfSimulcastStreams);
+ ss << "\n\n--> codec_settings." << codec_type;
+ ss << "complexity: "
+ << static_cast<int>(codec_settings.GetVideoEncoderComplexity());
+ ss << "\nframe_dropping: " << codec_settings.GetFrameDropEnabled();
+ ss << "\n" << CodecSpecificToString(codec_settings);
+ if (codec_settings.numberOfSimulcastStreams > 1) {
+ for (int i = 0; i < codec_settings.numberOfSimulcastStreams; ++i) {
+ ss << "\n\n--> codec_settings.simulcastStream[" << i << "]";
+ const SimulcastStream& simulcast_stream =
+ codec_settings.simulcastStream[i];
+ ss << "\nwidth: " << simulcast_stream.width;
+ ss << "\nheight: " << simulcast_stream.height;
+ ss << "\nnum_temporal_layers: "
+ << static_cast<int>(simulcast_stream.numberOfTemporalLayers);
+ ss << "\nmin_bitrate_kbps: " << simulcast_stream.minBitrate;
+ ss << "\ntarget_bitrate_kbps: " << simulcast_stream.targetBitrate;
+ ss << "\nmax_bitrate_kbps: " << simulcast_stream.maxBitrate;
+ ss << "\nmax_qp: " << simulcast_stream.qpMax;
+ ss << "\nactive: " << simulcast_stream.active;
+ }
+ }
+ ss << "\n";
+ return ss.Release();
+}
+
+std::string VideoCodecTestFixtureImpl::Config::CodecName() const {
+ std::string name = codec_name;
+ if (name.empty()) {
+ name = CodecTypeToPayloadString(codec_settings.codecType);
+ }
+ if (codec_settings.codecType == kVideoCodecH264) {
+ if (h264_codec_settings.profile == H264Profile::kProfileConstrainedHigh) {
+ return name + "-CHP";
+ } else {
+ RTC_DCHECK_EQ(h264_codec_settings.profile,
+ H264Profile::kProfileConstrainedBaseline);
+ return name + "-CBP";
+ }
+ }
+ return name;
+}
+
+// TODO(kthelgason): Move this out of the test fixture impl and
+// make available as a shared utility class.
+void VideoCodecTestFixtureImpl::H264KeyframeChecker::CheckEncodedFrame(
+ webrtc::VideoCodecType codec,
+ const EncodedImage& encoded_frame) const {
+ EXPECT_EQ(kVideoCodecH264, codec);
+ bool contains_sps = false;
+ bool contains_pps = false;
+ bool contains_idr = false;
+ const std::vector<webrtc::H264::NaluIndex> nalu_indices =
+ webrtc::H264::FindNaluIndices(encoded_frame.data(), encoded_frame.size());
+ for (const webrtc::H264::NaluIndex& index : nalu_indices) {
+ webrtc::H264::NaluType nalu_type = webrtc::H264::ParseNaluType(
+ encoded_frame.data()[index.payload_start_offset]);
+ if (nalu_type == webrtc::H264::NaluType::kSps) {
+ contains_sps = true;
+ } else if (nalu_type == webrtc::H264::NaluType::kPps) {
+ contains_pps = true;
+ } else if (nalu_type == webrtc::H264::NaluType::kIdr) {
+ contains_idr = true;
+ }
+ }
+ if (encoded_frame._frameType == VideoFrameType::kVideoFrameKey) {
+ EXPECT_TRUE(contains_sps) << "Keyframe should contain SPS.";
+ EXPECT_TRUE(contains_pps) << "Keyframe should contain PPS.";
+ EXPECT_TRUE(contains_idr) << "Keyframe should contain IDR.";
+ } else if (encoded_frame._frameType == VideoFrameType::kVideoFrameDelta) {
+ EXPECT_FALSE(contains_sps) << "Delta frame should not contain SPS.";
+ EXPECT_FALSE(contains_pps) << "Delta frame should not contain PPS.";
+ EXPECT_FALSE(contains_idr) << "Delta frame should not contain IDR.";
+ } else {
+ RTC_DCHECK_NOTREACHED();
+ }
+}
+
+class VideoCodecTestFixtureImpl::CpuProcessTime final {
+ public:
+ explicit CpuProcessTime(const Config& config) : config_(config) {}
+ ~CpuProcessTime() {}
+
+ void Start() {
+ if (config_.measure_cpu) {
+ cpu_time_ -= rtc::GetProcessCpuTimeNanos();
+ wallclock_time_ -= rtc::SystemTimeNanos();
+ }
+ }
+ void Stop() {
+ if (config_.measure_cpu) {
+ cpu_time_ += rtc::GetProcessCpuTimeNanos();
+ wallclock_time_ += rtc::SystemTimeNanos();
+ }
+ }
+ void Print() const {
+ if (config_.measure_cpu) {
+ RTC_LOG(LS_INFO) << "cpu_usage_percent: "
+ << GetUsagePercent() / config_.NumberOfCores();
+ }
+ }
+
+ private:
+ double GetUsagePercent() const {
+ return static_cast<double>(cpu_time_) / wallclock_time_ * 100.0;
+ }
+
+ const Config config_;
+ int64_t cpu_time_ = 0;
+ int64_t wallclock_time_ = 0;
+};
+
+VideoCodecTestFixtureImpl::VideoCodecTestFixtureImpl(Config config)
+ : encoder_factory_(std::make_unique<webrtc::VideoEncoderFactoryTemplate<
+ webrtc::LibvpxVp8EncoderTemplateAdapter,
+ webrtc::LibvpxVp9EncoderTemplateAdapter,
+ webrtc::OpenH264EncoderTemplateAdapter,
+ webrtc::LibaomAv1EncoderTemplateAdapter>>()),
+ decoder_factory_(std::make_unique<webrtc::VideoDecoderFactoryTemplate<
+ webrtc::LibvpxVp8DecoderTemplateAdapter,
+ webrtc::LibvpxVp9DecoderTemplateAdapter,
+ webrtc::OpenH264DecoderTemplateAdapter,
+ webrtc::Dav1dDecoderTemplateAdapter>>()),
+ config_(config) {}
+
+VideoCodecTestFixtureImpl::VideoCodecTestFixtureImpl(
+ Config config,
+ std::unique_ptr<VideoDecoderFactory> decoder_factory,
+ std::unique_ptr<VideoEncoderFactory> encoder_factory)
+ : encoder_factory_(std::move(encoder_factory)),
+ decoder_factory_(std::move(decoder_factory)),
+ config_(config) {}
+
+VideoCodecTestFixtureImpl::~VideoCodecTestFixtureImpl() = default;
+
+// Processes all frames in the clip and verifies the result.
+void VideoCodecTestFixtureImpl::RunTest(
+ const std::vector<RateProfile>& rate_profiles,
+ const std::vector<RateControlThresholds>* rc_thresholds,
+ const std::vector<QualityThresholds>* quality_thresholds,
+ const BitstreamThresholds* bs_thresholds) {
+ RTC_DCHECK(!rate_profiles.empty());
+
+ // To emulate operation on a production VideoStreamEncoder, we call the
+ // codecs on a task queue.
+ TaskQueueForTest task_queue("VidProc TQ");
+
+ bool is_setup_succeeded = SetUpAndInitObjects(
+ &task_queue, rate_profiles[0].target_kbps, rate_profiles[0].input_fps);
+ EXPECT_TRUE(is_setup_succeeded);
+ if (!is_setup_succeeded) {
+ ReleaseAndCloseObjects(&task_queue);
+ return;
+ }
+
+ PrintSettings(&task_queue);
+ ProcessAllFrames(&task_queue, rate_profiles);
+ ReleaseAndCloseObjects(&task_queue);
+
+ AnalyzeAllFrames(rate_profiles, rc_thresholds, quality_thresholds,
+ bs_thresholds);
+}
+
+void VideoCodecTestFixtureImpl::ProcessAllFrames(
+ TaskQueueForTest* task_queue,
+ const std::vector<RateProfile>& rate_profiles) {
+ // Set initial rates.
+ auto rate_profile = rate_profiles.begin();
+ task_queue->PostTask([this, rate_profile] {
+ processor_->SetRates(rate_profile->target_kbps, rate_profile->input_fps);
+ });
+
+ cpu_process_time_->Start();
+
+ for (size_t frame_num = 0; frame_num < config_.num_frames; ++frame_num) {
+ auto next_rate_profile = std::next(rate_profile);
+ if (next_rate_profile != rate_profiles.end() &&
+ frame_num == next_rate_profile->frame_num) {
+ rate_profile = next_rate_profile;
+ task_queue->PostTask([this, rate_profile] {
+ processor_->SetRates(rate_profile->target_kbps,
+ rate_profile->input_fps);
+ });
+ }
+
+ task_queue->PostTask([this] { processor_->ProcessFrame(); });
+
+ if (RunEncodeInRealTime(config_)) {
+ // Roughly pace the frames.
+ const int frame_duration_ms =
+ std::ceil(rtc::kNumMillisecsPerSec / rate_profile->input_fps);
+ SleepMs(frame_duration_ms);
+ }
+ }
+
+ task_queue->PostTask([this] { processor_->Finalize(); });
+
+ // Wait until we know that the last frame has been sent for encode.
+ task_queue->SendTask([] {});
+
+ // Give the VideoProcessor pipeline some time to process the last frame,
+ // and then release the codecs.
+ SleepMs(1 * rtc::kNumMillisecsPerSec);
+ cpu_process_time_->Stop();
+}
+
+void VideoCodecTestFixtureImpl::AnalyzeAllFrames(
+ const std::vector<RateProfile>& rate_profiles,
+ const std::vector<RateControlThresholds>* rc_thresholds,
+ const std::vector<QualityThresholds>* quality_thresholds,
+ const BitstreamThresholds* bs_thresholds) {
+
+ for (size_t rate_profile_idx = 0; rate_profile_idx < rate_profiles.size();
+ ++rate_profile_idx) {
+ const size_t first_frame_num = rate_profiles[rate_profile_idx].frame_num;
+ const size_t last_frame_num =
+ rate_profile_idx + 1 < rate_profiles.size()
+ ? rate_profiles[rate_profile_idx + 1].frame_num - 1
+ : config_.num_frames - 1;
+ RTC_CHECK(last_frame_num >= first_frame_num);
+
+ VideoStatistics send_stat = stats_.SliceAndCalcAggregatedVideoStatistic(
+ first_frame_num, last_frame_num);
+ RTC_LOG(LS_INFO) << "==> Send stats";
+ RTC_LOG(LS_INFO) << send_stat.ToString("send_") << "\n";
+
+ std::vector<VideoStatistics> layer_stats =
+ stats_.SliceAndCalcLayerVideoStatistic(first_frame_num, last_frame_num);
+ RTC_LOG(LS_INFO) << "==> Receive stats";
+ for (const auto& layer_stat : layer_stats) {
+ RTC_LOG(LS_INFO) << layer_stat.ToString("recv_") << "\n";
+
+ // For perf dashboard.
+ char modifier_buf[256];
+ rtc::SimpleStringBuilder modifier(modifier_buf);
+ modifier << "_r" << rate_profile_idx << "_sl" << layer_stat.spatial_idx;
+
+ auto PrintResultHelper = [&modifier, this](
+ absl::string_view measurement, double value,
+ Unit unit,
+ absl::string_view non_standard_unit_suffix,
+ ImprovementDirection improvement_direction) {
+ rtc::StringBuilder metric_name(measurement);
+ metric_name << modifier.str() << non_standard_unit_suffix;
+ GetGlobalMetricsLogger()->LogSingleValueMetric(
+ metric_name.str(), config_.test_name, value, unit,
+ improvement_direction);
+ };
+
+ if (layer_stat.temporal_idx == config_.NumberOfTemporalLayers() - 1) {
+ PrintResultHelper("enc_speed", layer_stat.enc_speed_fps,
+ Unit::kUnitless, /*non_standard_unit_suffix=*/"_fps",
+ ImprovementDirection::kBiggerIsBetter);
+ PrintResultHelper("avg_key_frame_size",
+ layer_stat.avg_key_frame_size_bytes, Unit::kBytes,
+ /*non_standard_unit_suffix=*/"",
+ ImprovementDirection::kNeitherIsBetter);
+ PrintResultHelper("num_key_frames", layer_stat.num_key_frames,
+ Unit::kCount,
+ /*non_standard_unit_suffix=*/"",
+ ImprovementDirection::kNeitherIsBetter);
+ printf("\n");
+ }
+
+ modifier << "tl" << layer_stat.temporal_idx;
+ PrintResultHelper("dec_speed", layer_stat.dec_speed_fps, Unit::kUnitless,
+ /*non_standard_unit_suffix=*/"_fps",
+ ImprovementDirection::kBiggerIsBetter);
+ PrintResultHelper("avg_delta_frame_size",
+ layer_stat.avg_delta_frame_size_bytes, Unit::kBytes,
+ /*non_standard_unit_suffix=*/"",
+ ImprovementDirection::kNeitherIsBetter);
+ PrintResultHelper("bitrate", layer_stat.bitrate_kbps,
+ Unit::kKilobitsPerSecond,
+ /*non_standard_unit_suffix=*/"",
+ ImprovementDirection::kNeitherIsBetter);
+ PrintResultHelper("framerate", layer_stat.framerate_fps, Unit::kUnitless,
+ /*non_standard_unit_suffix=*/"_fps",
+ ImprovementDirection::kNeitherIsBetter);
+ PrintResultHelper("avg_psnr_y", layer_stat.avg_psnr_y, Unit::kUnitless,
+ /*non_standard_unit_suffix=*/"_dB",
+ ImprovementDirection::kBiggerIsBetter);
+ PrintResultHelper("avg_psnr_u", layer_stat.avg_psnr_u, Unit::kUnitless,
+ /*non_standard_unit_suffix=*/"_dB",
+ ImprovementDirection::kBiggerIsBetter);
+ PrintResultHelper("avg_psnr_v", layer_stat.avg_psnr_v, Unit::kUnitless,
+ /*non_standard_unit_suffix=*/"_dB",
+ ImprovementDirection::kBiggerIsBetter);
+ PrintResultHelper("min_psnr_yuv", layer_stat.min_psnr, Unit::kUnitless,
+ /*non_standard_unit_suffix=*/"_dB",
+ ImprovementDirection::kBiggerIsBetter);
+ PrintResultHelper("avg_qp", layer_stat.avg_qp, Unit::kUnitless,
+ /*non_standard_unit_suffix=*/"",
+ ImprovementDirection::kSmallerIsBetter);
+ printf("\n");
+ if (layer_stat.temporal_idx == config_.NumberOfTemporalLayers() - 1) {
+ printf("\n");
+ }
+ }
+
+ const RateControlThresholds* rc_threshold =
+ rc_thresholds ? &(*rc_thresholds)[rate_profile_idx] : nullptr;
+ const QualityThresholds* quality_threshold =
+ quality_thresholds ? &(*quality_thresholds)[rate_profile_idx] : nullptr;
+
+ VerifyVideoStatistic(send_stat, rc_threshold, quality_threshold,
+ bs_thresholds,
+ rate_profiles[rate_profile_idx].target_kbps,
+ rate_profiles[rate_profile_idx].input_fps);
+ }
+
+ if (config_.print_frame_level_stats) {
+ RTC_LOG(LS_INFO) << "==> Frame stats";
+ std::vector<VideoCodecTestStats::FrameStatistics> frame_stats =
+ stats_.GetFrameStatistics();
+ for (const auto& frame_stat : frame_stats) {
+ RTC_LOG(LS_INFO) << frame_stat.ToString();
+ }
+ }
+
+ cpu_process_time_->Print();
+}
+
+void VideoCodecTestFixtureImpl::VerifyVideoStatistic(
+ const VideoStatistics& video_stat,
+ const RateControlThresholds* rc_thresholds,
+ const QualityThresholds* quality_thresholds,
+ const BitstreamThresholds* bs_thresholds,
+ size_t target_bitrate_kbps,
+ double input_framerate_fps) {
+ if (rc_thresholds) {
+ const float bitrate_mismatch_percent =
+ 100 * std::fabs(1.0f * video_stat.bitrate_kbps - target_bitrate_kbps) /
+ target_bitrate_kbps;
+ const float framerate_mismatch_percent =
+ 100 * std::fabs(video_stat.framerate_fps - input_framerate_fps) /
+ input_framerate_fps;
+ EXPECT_LE(bitrate_mismatch_percent,
+ rc_thresholds->max_avg_bitrate_mismatch_percent);
+ EXPECT_LE(video_stat.time_to_reach_target_bitrate_sec,
+ rc_thresholds->max_time_to_reach_target_bitrate_sec);
+ EXPECT_LE(framerate_mismatch_percent,
+ rc_thresholds->max_avg_framerate_mismatch_percent);
+ EXPECT_LE(video_stat.avg_delay_sec,
+ rc_thresholds->max_avg_buffer_level_sec);
+ EXPECT_LE(video_stat.max_key_frame_delay_sec,
+ rc_thresholds->max_max_key_frame_delay_sec);
+ EXPECT_LE(video_stat.max_delta_frame_delay_sec,
+ rc_thresholds->max_max_delta_frame_delay_sec);
+ EXPECT_LE(video_stat.num_spatial_resizes,
+ rc_thresholds->max_num_spatial_resizes);
+ EXPECT_LE(video_stat.num_key_frames, rc_thresholds->max_num_key_frames);
+ }
+
+ if (quality_thresholds) {
+ EXPECT_GT(video_stat.avg_psnr, quality_thresholds->min_avg_psnr);
+ EXPECT_GT(video_stat.min_psnr, quality_thresholds->min_min_psnr);
+
+ // SSIM calculation is not optimized and thus it is disabled in real-time
+ // mode.
+ if (!config_.encode_in_real_time) {
+ EXPECT_GT(video_stat.avg_ssim, quality_thresholds->min_avg_ssim);
+ EXPECT_GT(video_stat.min_ssim, quality_thresholds->min_min_ssim);
+ }
+ }
+
+ if (bs_thresholds) {
+ EXPECT_LE(video_stat.max_nalu_size_bytes,
+ bs_thresholds->max_max_nalu_size_bytes);
+ }
+}
+
+bool VideoCodecTestFixtureImpl::CreateEncoderAndDecoder() {
+ SdpVideoFormat encoder_format(CreateSdpVideoFormat(config_));
+ SdpVideoFormat decoder_format = encoder_format;
+
+ // Override encoder and decoder formats with explicitly provided ones.
+ if (config_.encoder_format) {
+ RTC_DCHECK_EQ(config_.encoder_format->name, config_.codec_name);
+ encoder_format = *config_.encoder_format;
+ }
+
+ if (config_.decoder_format) {
+ RTC_DCHECK_EQ(config_.decoder_format->name, config_.codec_name);
+ decoder_format = *config_.decoder_format;
+ }
+
+ encoder_ = encoder_factory_->CreateVideoEncoder(encoder_format);
+ EXPECT_TRUE(encoder_) << "Encoder not successfully created.";
+ if (encoder_ == nullptr) {
+ return false;
+ }
+
+ const size_t num_simulcast_or_spatial_layers = std::max(
+ config_.NumberOfSimulcastStreams(), config_.NumberOfSpatialLayers());
+ for (size_t i = 0; i < num_simulcast_or_spatial_layers; ++i) {
+ std::unique_ptr<VideoDecoder> decoder =
+ decoder_factory_->CreateVideoDecoder(decoder_format);
+ EXPECT_TRUE(decoder) << "Decoder not successfully created.";
+ if (decoder == nullptr) {
+ return false;
+ }
+ decoders_.push_back(std::move(decoder));
+ }
+
+ return true;
+}
+
+void VideoCodecTestFixtureImpl::DestroyEncoderAndDecoder() {
+ decoders_.clear();
+ encoder_.reset();
+}
+
+VideoCodecTestStats& VideoCodecTestFixtureImpl::GetStats() {
+ return stats_;
+}
+
+bool VideoCodecTestFixtureImpl::SetUpAndInitObjects(
+ TaskQueueForTest* task_queue,
+ size_t initial_bitrate_kbps,
+ double initial_framerate_fps) {
+ config_.codec_settings.minBitrate = 0;
+ config_.codec_settings.startBitrate = static_cast<int>(initial_bitrate_kbps);
+ config_.codec_settings.maxFramerate = std::ceil(initial_framerate_fps);
+
+ int clip_width = config_.clip_width.value_or(config_.codec_settings.width);
+ int clip_height = config_.clip_height.value_or(config_.codec_settings.height);
+
+ // Create file objects for quality analysis.
+ source_frame_reader_ = CreateYuvFrameReader(
+ config_.filepath,
+ Resolution({.width = clip_width, .height = clip_height}),
+ YuvFrameReaderImpl::RepeatMode::kPingPong);
+
+ RTC_DCHECK(encoded_frame_writers_.empty());
+ RTC_DCHECK(decoded_frame_writers_.empty());
+
+ stats_.Clear();
+
+ cpu_process_time_.reset(new CpuProcessTime(config_));
+
+ bool is_codec_created = false;
+ task_queue->SendTask([this, &is_codec_created]() {
+ is_codec_created = CreateEncoderAndDecoder();
+ });
+
+ if (!is_codec_created) {
+ return false;
+ }
+
+ if (config_.visualization_params.save_encoded_ivf ||
+ config_.visualization_params.save_decoded_y4m) {
+ std::string encoder_name = GetCodecName(task_queue, /*is_encoder=*/true);
+ encoder_name = absl::StrReplaceAll(encoder_name, {{":", ""}, {" ", "-"}});
+
+ const size_t num_simulcast_or_spatial_layers = std::max(
+ config_.NumberOfSimulcastStreams(), config_.NumberOfSpatialLayers());
+ const size_t num_temporal_layers = config_.NumberOfTemporalLayers();
+ for (size_t simulcast_svc_idx = 0;
+ simulcast_svc_idx < num_simulcast_or_spatial_layers;
+ ++simulcast_svc_idx) {
+ const std::string output_filename_base =
+ JoinFilename(config_.output_path,
+ FilenameWithParams(config_) + "_" + encoder_name +
+ "_sl" + std::to_string(simulcast_svc_idx));
+
+ if (config_.visualization_params.save_encoded_ivf) {
+ for (size_t temporal_idx = 0; temporal_idx < num_temporal_layers;
+ ++temporal_idx) {
+ const std::string output_file_path = output_filename_base + "tl" +
+ std::to_string(temporal_idx) +
+ ".ivf";
+ FileWrapper ivf_file = FileWrapper::OpenWriteOnly(output_file_path);
+
+ const VideoProcessor::LayerKey layer_key(simulcast_svc_idx,
+ temporal_idx);
+ encoded_frame_writers_[layer_key] =
+ IvfFileWriter::Wrap(std::move(ivf_file), /*byte_limit=*/0);
+ }
+ }
+
+ if (config_.visualization_params.save_decoded_y4m) {
+ FrameWriter* decoded_frame_writer = new Y4mFrameWriterImpl(
+ output_filename_base + ".y4m", config_.codec_settings.width,
+ config_.codec_settings.height, config_.codec_settings.maxFramerate);
+ EXPECT_TRUE(decoded_frame_writer->Init());
+ decoded_frame_writers_.push_back(
+ std::unique_ptr<FrameWriter>(decoded_frame_writer));
+ }
+ }
+ }
+
+ task_queue->SendTask(
+ [this]() {
+ processor_ = std::make_unique<VideoProcessor>(
+ encoder_.get(), &decoders_, source_frame_reader_.get(), config_,
+ &stats_, &encoded_frame_writers_,
+ decoded_frame_writers_.empty() ? nullptr : &decoded_frame_writers_);
+ });
+ return true;
+}
+
+void VideoCodecTestFixtureImpl::ReleaseAndCloseObjects(
+ TaskQueueForTest* task_queue) {
+ task_queue->SendTask([this]() {
+ processor_.reset();
+ // The VideoProcessor must be destroyed before the codecs.
+ DestroyEncoderAndDecoder();
+ });
+
+ source_frame_reader_.reset();
+
+ // Close visualization files.
+ for (auto& encoded_frame_writer : encoded_frame_writers_) {
+ EXPECT_TRUE(encoded_frame_writer.second->Close());
+ }
+ encoded_frame_writers_.clear();
+ for (auto& decoded_frame_writer : decoded_frame_writers_) {
+ decoded_frame_writer->Close();
+ }
+ decoded_frame_writers_.clear();
+}
+
+std::string VideoCodecTestFixtureImpl::GetCodecName(
+ TaskQueueForTest* task_queue,
+ bool is_encoder) const {
+ std::string codec_name;
+ task_queue->SendTask([this, is_encoder, &codec_name] {
+ if (is_encoder) {
+ codec_name = encoder_->GetEncoderInfo().implementation_name;
+ } else {
+ codec_name = decoders_.at(0)->ImplementationName();
+ }
+ });
+ return codec_name;
+}
+
+void VideoCodecTestFixtureImpl::PrintSettings(
+ TaskQueueForTest* task_queue) const {
+ RTC_LOG(LS_INFO) << "==> Config";
+ RTC_LOG(LS_INFO) << config_.ToString();
+
+ RTC_LOG(LS_INFO) << "==> Codec names";
+ RTC_LOG(LS_INFO) << "enc_impl_name: "
+ << GetCodecName(task_queue, /*is_encoder=*/true);
+ RTC_LOG(LS_INFO) << "dec_impl_name: "
+ << GetCodecName(task_queue, /*is_encoder=*/false);
+}
+
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/test/videocodec_test_fixture_impl.h b/third_party/libwebrtc/modules/video_coding/codecs/test/videocodec_test_fixture_impl.h
new file mode 100644
index 0000000000..005b7c0a8e
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/test/videocodec_test_fixture_impl.h
@@ -0,0 +1,107 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_CODECS_TEST_VIDEOCODEC_TEST_FIXTURE_IMPL_H_
+#define MODULES_VIDEO_CODING_CODECS_TEST_VIDEOCODEC_TEST_FIXTURE_IMPL_H_
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "api/test/videocodec_test_fixture.h"
+#include "api/video_codecs/video_decoder_factory.h"
+#include "api/video_codecs/video_encoder_factory.h"
+#include "common_video/h264/h264_common.h"
+#include "modules/video_coding/codecs/test/videocodec_test_stats_impl.h"
+#include "modules/video_coding/codecs/test/videoprocessor.h"
+#include "modules/video_coding/utility/ivf_file_writer.h"
+#include "rtc_base/task_queue_for_test.h"
+#include "test/testsupport/frame_reader.h"
+#include "test/testsupport/frame_writer.h"
+
+namespace webrtc {
+namespace test {
+
+// Integration test for video processor. It does rate control and frame quality
+// analysis using frame statistics collected by video processor and logs the
+// results. If thresholds are specified it checks that corresponding metrics
+// are in desirable range.
+class VideoCodecTestFixtureImpl : public VideoCodecTestFixture {
+ // Verifies that all H.264 keyframes contain SPS/PPS/IDR NALUs.
+ public:
+ class H264KeyframeChecker : public EncodedFrameChecker {
+ public:
+ void CheckEncodedFrame(webrtc::VideoCodecType codec,
+ const EncodedImage& encoded_frame) const override;
+ };
+
+ explicit VideoCodecTestFixtureImpl(Config config);
+ VideoCodecTestFixtureImpl(
+ Config config,
+ std::unique_ptr<VideoDecoderFactory> decoder_factory,
+ std::unique_ptr<VideoEncoderFactory> encoder_factory);
+ ~VideoCodecTestFixtureImpl() override;
+
+ void RunTest(const std::vector<RateProfile>& rate_profiles,
+ const std::vector<RateControlThresholds>* rc_thresholds,
+ const std::vector<QualityThresholds>* quality_thresholds,
+ const BitstreamThresholds* bs_thresholds) override;
+
+ VideoCodecTestStats& GetStats() override;
+
+ private:
+ class CpuProcessTime;
+
+ bool CreateEncoderAndDecoder();
+ void DestroyEncoderAndDecoder();
+ bool SetUpAndInitObjects(TaskQueueForTest* task_queue,
+ size_t initial_bitrate_kbps,
+ double initial_framerate_fps);
+ void ReleaseAndCloseObjects(TaskQueueForTest* task_queue);
+
+ void ProcessAllFrames(TaskQueueForTest* task_queue,
+ const std::vector<RateProfile>& rate_profiles);
+ void AnalyzeAllFrames(
+ const std::vector<RateProfile>& rate_profiles,
+ const std::vector<RateControlThresholds>* rc_thresholds,
+ const std::vector<QualityThresholds>* quality_thresholds,
+ const BitstreamThresholds* bs_thresholds);
+
+ void VerifyVideoStatistic(
+ const VideoCodecTestStats::VideoStatistics& video_stat,
+ const RateControlThresholds* rc_thresholds,
+ const QualityThresholds* quality_thresholds,
+ const BitstreamThresholds* bs_thresholds,
+ size_t target_bitrate_kbps,
+ double input_framerate_fps);
+
+ std::string GetCodecName(TaskQueueForTest* task_queue, bool is_encoder) const;
+ void PrintSettings(TaskQueueForTest* task_queue) const;
+
+ // Codecs.
+ const std::unique_ptr<VideoEncoderFactory> encoder_factory_;
+ std::unique_ptr<VideoEncoder> encoder_;
+ const std::unique_ptr<VideoDecoderFactory> decoder_factory_;
+ VideoProcessor::VideoDecoderList decoders_;
+
+ // Helper objects.
+ Config config_;
+ VideoCodecTestStatsImpl stats_;
+ std::unique_ptr<FrameReader> source_frame_reader_;
+ VideoProcessor::IvfFileWriterMap encoded_frame_writers_;
+ VideoProcessor::FrameWriterList decoded_frame_writers_;
+ std::unique_ptr<VideoProcessor> processor_;
+ std::unique_ptr<CpuProcessTime> cpu_process_time_;
+};
+
+} // namespace test
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_CODECS_TEST_VIDEOCODEC_TEST_FIXTURE_IMPL_H_
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/test/videocodec_test_libvpx.cc b/third_party/libwebrtc/modules/video_coding/codecs/test/videocodec_test_libvpx.cc
new file mode 100644
index 0000000000..062375bd60
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/test/videocodec_test_libvpx.cc
@@ -0,0 +1,465 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <memory>
+#include <vector>
+
+#include "api/test/create_videocodec_test_fixture.h"
+#include "api/test/video/function_video_encoder_factory.h"
+#include "api/video_codecs/sdp_video_format.h"
+#include "media/base/media_constants.h"
+#include "media/engine/internal_decoder_factory.h"
+#include "media/engine/internal_encoder_factory.h"
+#include "media/engine/simulcast_encoder_adapter.h"
+#include "modules/video_coding/utility/vp8_header_parser.h"
+#include "modules/video_coding/utility/vp9_uncompressed_header_parser.h"
+#include "test/gtest.h"
+#include "test/testsupport/file_utils.h"
+
+namespace webrtc {
+namespace test {
+
+using VideoStatistics = VideoCodecTestStats::VideoStatistics;
+
+namespace {
+// Codec settings.
+const int kCifWidth = 352;
+const int kCifHeight = 288;
+const int kNumFramesShort = 100;
+const int kNumFramesLong = 300;
+const size_t kBitrateRdPerfKbps[] = {100, 200, 300, 400, 500, 600,
+ 700, 800, 1000, 1250, 1400, 1600,
+ 1800, 2000, 2200, 2500};
+const size_t kNumFirstFramesToSkipAtRdPerfAnalysis = 60;
+
+class QpFrameChecker : public VideoCodecTestFixture::EncodedFrameChecker {
+ public:
+ void CheckEncodedFrame(webrtc::VideoCodecType codec,
+ const EncodedImage& encoded_frame) const override {
+ int qp;
+ if (codec == kVideoCodecVP8) {
+ EXPECT_TRUE(vp8::GetQp(encoded_frame.data(), encoded_frame.size(), &qp));
+ } else if (codec == kVideoCodecVP9) {
+ EXPECT_TRUE(vp9::GetQp(encoded_frame.data(), encoded_frame.size(), &qp));
+ } else {
+ RTC_DCHECK_NOTREACHED();
+ }
+ EXPECT_EQ(encoded_frame.qp_, qp) << "Encoder QP != parsed bitstream QP.";
+ }
+};
+
+VideoCodecTestFixture::Config CreateConfig() {
+ VideoCodecTestFixture::Config config;
+ config.filename = "foreman_cif";
+ config.filepath = ResourcePath(config.filename, "yuv");
+ config.num_frames = kNumFramesLong;
+ config.use_single_core = true;
+ return config;
+}
+
+void PrintRdPerf(std::map<size_t, std::vector<VideoStatistics>> rd_stats) {
+ printf("--> Summary\n");
+ printf("%11s %5s %6s %11s %12s %11s %13s %13s %5s %7s %7s %7s %13s %13s\n",
+ "uplink_kbps", "width", "height", "spatial_idx", "temporal_idx",
+ "target_kbps", "downlink_kbps", "framerate_fps", "psnr", "psnr_y",
+ "psnr_u", "psnr_v", "enc_speed_fps", "dec_speed_fps");
+ for (const auto& rd_stat : rd_stats) {
+ const size_t bitrate_kbps = rd_stat.first;
+ for (const auto& layer_stat : rd_stat.second) {
+ printf(
+ "%11zu %5zu %6zu %11zu %12zu %11zu %13zu %13.2f %5.2f %7.2f %7.2f "
+ "%7.2f"
+ "%13.2f %13.2f\n",
+ bitrate_kbps, layer_stat.width, layer_stat.height,
+ layer_stat.spatial_idx, layer_stat.temporal_idx,
+ layer_stat.target_bitrate_kbps, layer_stat.bitrate_kbps,
+ layer_stat.framerate_fps, layer_stat.avg_psnr, layer_stat.avg_psnr_y,
+ layer_stat.avg_psnr_u, layer_stat.avg_psnr_v,
+ layer_stat.enc_speed_fps, layer_stat.dec_speed_fps);
+ }
+ }
+}
+} // namespace
+
+#if defined(RTC_ENABLE_VP9)
+TEST(VideoCodecTestLibvpx, HighBitrateVP9) {
+ auto config = CreateConfig();
+ config.SetCodecSettings(cricket::kVp9CodecName, 1, 1, 1, false, true, false,
+ kCifWidth, kCifHeight);
+ config.num_frames = kNumFramesShort;
+ const auto frame_checker = std::make_unique<QpFrameChecker>();
+ config.encoded_frame_checker = frame_checker.get();
+ auto fixture = CreateVideoCodecTestFixture(config);
+
+ std::vector<RateProfile> rate_profiles = {{500, 30, 0}};
+
+ std::vector<RateControlThresholds> rc_thresholds = {
+ {5, 1, 0, 1, 0.3, 0.1, 0, 1}};
+
+ std::vector<QualityThresholds> quality_thresholds = {{37, 36, 0.94, 0.92}};
+
+ fixture->RunTest(rate_profiles, &rc_thresholds, &quality_thresholds, nullptr);
+}
+
+TEST(VideoCodecTestLibvpx, ChangeBitrateVP9) {
+ auto config = CreateConfig();
+ config.SetCodecSettings(cricket::kVp9CodecName, 1, 1, 1, false, true, false,
+ kCifWidth, kCifHeight);
+ const auto frame_checker = std::make_unique<QpFrameChecker>();
+ config.encoded_frame_checker = frame_checker.get();
+ auto fixture = CreateVideoCodecTestFixture(config);
+
+ std::vector<RateProfile> rate_profiles = {
+ {200, 30, 0}, // target_kbps, input_fps, frame_num
+ {700, 30, 100},
+ {500, 30, 200}};
+
+ std::vector<RateControlThresholds> rc_thresholds = {
+ {5, 2, 0, 1, 0.5, 0.1, 0, 1},
+ {15, 3, 0, 1, 0.5, 0.1, 0, 0},
+ {11, 2, 0, 1, 0.5, 0.1, 0, 0}};
+
+ std::vector<QualityThresholds> quality_thresholds = {
+ {34, 33, 0.90, 0.88}, {38, 35, 0.95, 0.91}, {35, 34, 0.93, 0.90}};
+
+ fixture->RunTest(rate_profiles, &rc_thresholds, &quality_thresholds, nullptr);
+}
+
+TEST(VideoCodecTestLibvpx, ChangeFramerateVP9) {
+ auto config = CreateConfig();
+ config.SetCodecSettings(cricket::kVp9CodecName, 1, 1, 1, false, true, false,
+ kCifWidth, kCifHeight);
+ const auto frame_checker = std::make_unique<QpFrameChecker>();
+ config.encoded_frame_checker = frame_checker.get();
+ auto fixture = CreateVideoCodecTestFixture(config);
+
+ std::vector<RateProfile> rate_profiles = {
+ {100, 24, 0}, // target_kbps, input_fps, frame_num
+ {100, 15, 100},
+ {100, 10, 200}};
+
+ // Framerate mismatch should be lower for lower framerate.
+ std::vector<RateControlThresholds> rc_thresholds = {
+ {10, 2, 40, 1, 0.5, 0.2, 0, 1},
+ {8, 2, 5, 1, 0.5, 0.2, 0, 0},
+ {5, 2, 0, 1, 0.5, 0.3, 0, 0}};
+
+ // Quality should be higher for lower framerates for the same content.
+ std::vector<QualityThresholds> quality_thresholds = {
+ {33, 32, 0.88, 0.86}, {33.5, 32, 0.90, 0.86}, {33.5, 31.5, 0.90, 0.85}};
+
+ fixture->RunTest(rate_profiles, &rc_thresholds, &quality_thresholds, nullptr);
+}
+
+TEST(VideoCodecTestLibvpx, DenoiserOnVP9) {
+ auto config = CreateConfig();
+ config.SetCodecSettings(cricket::kVp9CodecName, 1, 1, 1, true, true, false,
+ kCifWidth, kCifHeight);
+ config.num_frames = kNumFramesShort;
+ const auto frame_checker = std::make_unique<QpFrameChecker>();
+ config.encoded_frame_checker = frame_checker.get();
+ auto fixture = CreateVideoCodecTestFixture(config);
+
+ std::vector<RateProfile> rate_profiles = {{500, 30, 0}};
+
+ std::vector<RateControlThresholds> rc_thresholds = {
+ {5, 1, 0, 1, 0.3, 0.1, 0, 1}};
+
+ std::vector<QualityThresholds> quality_thresholds = {{37.5, 36, 0.94, 0.93}};
+
+ fixture->RunTest(rate_profiles, &rc_thresholds, &quality_thresholds, nullptr);
+}
+
+TEST(VideoCodecTestLibvpx, VeryLowBitrateVP9) {
+ auto config = CreateConfig();
+ config.SetCodecSettings(cricket::kVp9CodecName, 1, 1, 1, false, true, true,
+ kCifWidth, kCifHeight);
+ const auto frame_checker = std::make_unique<QpFrameChecker>();
+ config.encoded_frame_checker = frame_checker.get();
+ auto fixture = CreateVideoCodecTestFixture(config);
+
+ std::vector<RateProfile> rate_profiles = {{50, 30, 0}};
+
+ std::vector<RateControlThresholds> rc_thresholds = {
+ {15, 3, 75, 1, 0.5, 0.4, 2, 1}};
+
+ std::vector<QualityThresholds> quality_thresholds = {{28, 25, 0.80, 0.65}};
+
+ fixture->RunTest(rate_profiles, &rc_thresholds, &quality_thresholds, nullptr);
+}
+
+// TODO(marpan): Add temporal layer test for VP9, once changes are in
+// vp9 wrapper for this.
+
+#endif // defined(RTC_ENABLE_VP9)
+
+TEST(VideoCodecTestLibvpx, HighBitrateVP8) {
+ auto config = CreateConfig();
+ config.SetCodecSettings(cricket::kVp8CodecName, 1, 1, 1, true, true, false,
+ kCifWidth, kCifHeight);
+ config.num_frames = kNumFramesShort;
+ const auto frame_checker = std::make_unique<QpFrameChecker>();
+ config.encoded_frame_checker = frame_checker.get();
+ auto fixture = CreateVideoCodecTestFixture(config);
+
+ std::vector<RateProfile> rate_profiles = {{500, 30, 0}};
+
+ std::vector<RateControlThresholds> rc_thresholds = {
+ {5, 1, 0, 1, 0.2, 0.1, 0, 1}};
+
+#if defined(WEBRTC_ARCH_ARM) || defined(WEBRTC_ARCH_ARM64)
+ std::vector<QualityThresholds> quality_thresholds = {{35, 33, 0.91, 0.89}};
+#else
+ std::vector<QualityThresholds> quality_thresholds = {{37, 35, 0.93, 0.91}};
+#endif
+ fixture->RunTest(rate_profiles, &rc_thresholds, &quality_thresholds, nullptr);
+}
+
+TEST(VideoCodecTestLibvpx, MAYBE_ChangeBitrateVP8) {
+ auto config = CreateConfig();
+ config.SetCodecSettings(cricket::kVp8CodecName, 1, 1, 1, true, true, false,
+ kCifWidth, kCifHeight);
+ const auto frame_checker = std::make_unique<QpFrameChecker>();
+ config.encoded_frame_checker = frame_checker.get();
+ auto fixture = CreateVideoCodecTestFixture(config);
+
+ std::vector<RateProfile> rate_profiles = {
+ {200, 30, 0}, // target_kbps, input_fps, frame_num
+ {800, 30, 100},
+ {500, 30, 200}};
+
+ std::vector<RateControlThresholds> rc_thresholds = {
+ {5, 1, 0, 1, 0.2, 0.1, 0, 1},
+ {15.5, 1, 0, 1, 0.2, 0.1, 0, 0},
+ {15, 1, 0, 1, 0.2, 0.1, 0, 0}};
+
+#if defined(WEBRTC_ARCH_ARM) || defined(WEBRTC_ARCH_ARM64)
+ std::vector<QualityThresholds> quality_thresholds = {
+ {31.8, 31, 0.86, 0.85}, {36, 34.8, 0.92, 0.90}, {33.5, 32, 0.90, 0.88}};
+#else
+ std::vector<QualityThresholds> quality_thresholds = {
+ {33, 32, 0.89, 0.88}, {38, 36, 0.94, 0.93}, {35, 34, 0.92, 0.91}};
+#endif
+ fixture->RunTest(rate_profiles, &rc_thresholds, &quality_thresholds, nullptr);
+}
+
+TEST(VideoCodecTestLibvpx, MAYBE_ChangeFramerateVP8) {
+ auto config = CreateConfig();
+ config.SetCodecSettings(cricket::kVp8CodecName, 1, 1, 1, true, true, false,
+ kCifWidth, kCifHeight);
+ const auto frame_checker = std::make_unique<QpFrameChecker>();
+ config.encoded_frame_checker = frame_checker.get();
+ auto fixture = CreateVideoCodecTestFixture(config);
+
+ std::vector<RateProfile> rate_profiles = {
+ {80, 24, 0}, // target_kbps, input_fps, frame_index_rate_update
+ {80, 15, 100},
+ {80, 10, 200}};
+
+#if defined(WEBRTC_ARCH_ARM) || defined(WEBRTC_ARCH_ARM64)
+ std::vector<RateControlThresholds> rc_thresholds = {
+ {10, 2.42, 60, 1, 0.3, 0.3, 0, 1},
+ {10, 2, 30, 1, 0.3, 0.3, 0, 0},
+ {10, 2, 10, 1, 0.3, 0.2, 0, 0}};
+#else
+ std::vector<RateControlThresholds> rc_thresholds = {
+ {10, 2, 20, 1, 0.3, 0.15, 0, 1},
+ {5, 2, 5, 1, 0.3, 0.15, 0, 0},
+ {4, 2, 1, 1, 0.3, 0.2, 0, 0}};
+#endif
+
+#if defined(WEBRTC_ARCH_ARM) || defined(WEBRTC_ARCH_ARM64)
+ std::vector<QualityThresholds> quality_thresholds = {
+ {31, 30, 0.85, 0.84}, {31.4, 30.5, 0.86, 0.84}, {30.5, 29, 0.83, 0.78}};
+#else
+ std::vector<QualityThresholds> quality_thresholds = {
+ {31, 30, 0.87, 0.85}, {32, 31, 0.88, 0.85}, {32, 30, 0.87, 0.82}};
+#endif
+ fixture->RunTest(rate_profiles, &rc_thresholds, &quality_thresholds, nullptr);
+}
+
+#if defined(WEBRTC_ANDROID)
+#define MAYBE_TemporalLayersVP8 DISABLED_TemporalLayersVP8
+#else
+#define MAYBE_TemporalLayersVP8 TemporalLayersVP8
+#endif
+TEST(VideoCodecTestLibvpx, MAYBE_TemporalLayersVP8) {
+ auto config = CreateConfig();
+ config.SetCodecSettings(cricket::kVp8CodecName, 1, 1, 3, true, true, false,
+ kCifWidth, kCifHeight);
+ const auto frame_checker = std::make_unique<QpFrameChecker>();
+ config.encoded_frame_checker = frame_checker.get();
+ auto fixture = CreateVideoCodecTestFixture(config);
+
+ std::vector<RateProfile> rate_profiles = {{200, 30, 0}, {400, 30, 150}};
+
+#if defined(WEBRTC_ARCH_ARM) || defined(WEBRTC_ARCH_ARM64)
+ std::vector<RateControlThresholds> rc_thresholds = {
+ {10, 1, 2.1, 1, 0.2, 0.1, 0, 1}, {12, 2, 3, 1, 0.2, 0.1, 0, 1}};
+#else
+ std::vector<RateControlThresholds> rc_thresholds = {
+ {5, 1, 0, 1, 0.2, 0.1, 0, 1}, {10, 2, 0, 1, 0.2, 0.1, 0, 1}};
+#endif
+// Min SSIM drops because of high motion scene with complex backgound (trees).
+#if defined(WEBRTC_ARCH_ARM) || defined(WEBRTC_ARCH_ARM64)
+ std::vector<QualityThresholds> quality_thresholds = {{31, 30, 0.85, 0.83},
+ {31, 28, 0.85, 0.75}};
+#else
+ std::vector<QualityThresholds> quality_thresholds = {{32, 30, 0.88, 0.85},
+ {33, 30, 0.89, 0.83}};
+#endif
+ fixture->RunTest(rate_profiles, &rc_thresholds, &quality_thresholds, nullptr);
+}
+
+#if defined(WEBRTC_ANDROID)
+#define MAYBE_MultiresVP8 DISABLED_MultiresVP8
+#else
+#define MAYBE_MultiresVP8 MultiresVP8
+#endif
+TEST(VideoCodecTestLibvpx, MAYBE_MultiresVP8) {
+ auto config = CreateConfig();
+ config.filename = "ConferenceMotion_1280_720_50";
+ config.filepath = ResourcePath(config.filename, "yuv");
+ config.num_frames = 100;
+ config.SetCodecSettings(cricket::kVp8CodecName, 3, 1, 3, true, true, false,
+ 1280, 720);
+ const auto frame_checker = std::make_unique<QpFrameChecker>();
+ config.encoded_frame_checker = frame_checker.get();
+ auto fixture = CreateVideoCodecTestFixture(config);
+
+ std::vector<RateProfile> rate_profiles = {{1500, 30, 0}};
+#if defined(WEBRTC_ARCH_ARM) || defined(WEBRTC_ARCH_ARM64)
+ std::vector<RateControlThresholds> rc_thresholds = {
+ {4.1, 1.04, 7, 0.18, 0.14, 0.08, 0, 1}};
+#else
+ std::vector<RateControlThresholds> rc_thresholds = {
+ {5, 1, 5, 1, 0.3, 0.1, 0, 1}};
+#endif
+ std::vector<QualityThresholds> quality_thresholds = {{34, 32, 0.90, 0.88}};
+
+ fixture->RunTest(rate_profiles, &rc_thresholds, &quality_thresholds, nullptr);
+}
+
+#if defined(WEBRTC_ANDROID)
+#define MAYBE_SimulcastVP8 DISABLED_SimulcastVP8
+#else
+#define MAYBE_SimulcastVP8 SimulcastVP8
+#endif
+TEST(VideoCodecTestLibvpx, MAYBE_SimulcastVP8) {
+ auto config = CreateConfig();
+ config.filename = "ConferenceMotion_1280_720_50";
+ config.filepath = ResourcePath(config.filename, "yuv");
+ config.num_frames = 100;
+ config.SetCodecSettings(cricket::kVp8CodecName, 3, 1, 3, true, true, false,
+ 1280, 720);
+ const auto frame_checker = std::make_unique<QpFrameChecker>();
+ config.encoded_frame_checker = frame_checker.get();
+
+ InternalEncoderFactory internal_encoder_factory;
+ std::unique_ptr<VideoEncoderFactory> adapted_encoder_factory =
+ std::make_unique<FunctionVideoEncoderFactory>([&]() {
+ return std::make_unique<SimulcastEncoderAdapter>(
+ &internal_encoder_factory, SdpVideoFormat(cricket::kVp8CodecName));
+ });
+ std::unique_ptr<InternalDecoderFactory> internal_decoder_factory(
+ new InternalDecoderFactory());
+
+ auto fixture =
+ CreateVideoCodecTestFixture(config, std::move(internal_decoder_factory),
+ std::move(adapted_encoder_factory));
+
+ std::vector<RateProfile> rate_profiles = {{1500, 30, 0}};
+
+ std::vector<RateControlThresholds> rc_thresholds = {
+ {20, 5, 90, 1, 0.5, 0.3, 0, 1}};
+ std::vector<QualityThresholds> quality_thresholds = {{34, 32, 0.90, 0.88}};
+
+ fixture->RunTest(rate_profiles, &rc_thresholds, &quality_thresholds, nullptr);
+}
+
+#if defined(WEBRTC_ANDROID)
+#define MAYBE_SvcVP9 DISABLED_SvcVP9
+#else
+#define MAYBE_SvcVP9 SvcVP9
+#endif
+TEST(VideoCodecTestLibvpx, MAYBE_SvcVP9) {
+ auto config = CreateConfig();
+ config.filename = "ConferenceMotion_1280_720_50";
+ config.filepath = ResourcePath(config.filename, "yuv");
+ config.num_frames = 100;
+ config.SetCodecSettings(cricket::kVp9CodecName, 1, 3, 3, true, true, false,
+ 1280, 720);
+ const auto frame_checker = std::make_unique<QpFrameChecker>();
+ config.encoded_frame_checker = frame_checker.get();
+ auto fixture = CreateVideoCodecTestFixture(config);
+
+ std::vector<RateProfile> rate_profiles = {{1500, 30, 0}};
+
+ std::vector<RateControlThresholds> rc_thresholds = {
+ {5, 1, 5, 1, 0.3, 0.1, 0, 1}};
+ std::vector<QualityThresholds> quality_thresholds = {{36, 34, 0.93, 0.90}};
+
+ fixture->RunTest(rate_profiles, &rc_thresholds, &quality_thresholds, nullptr);
+}
+
+TEST(VideoCodecTestLibvpx, DISABLED_MultiresVP8RdPerf) {
+ auto config = CreateConfig();
+ config.filename = "FourPeople_1280x720_30";
+ config.filepath = ResourcePath(config.filename, "yuv");
+ config.num_frames = 300;
+ config.print_frame_level_stats = true;
+ config.SetCodecSettings(cricket::kVp8CodecName, 3, 1, 3, true, true, false,
+ 1280, 720);
+ const auto frame_checker = std::make_unique<QpFrameChecker>();
+ config.encoded_frame_checker = frame_checker.get();
+ auto fixture = CreateVideoCodecTestFixture(config);
+
+ std::map<size_t, std::vector<VideoStatistics>> rd_stats;
+ for (size_t bitrate_kbps : kBitrateRdPerfKbps) {
+ std::vector<RateProfile> rate_profiles = {{bitrate_kbps, 30, 0}};
+
+ fixture->RunTest(rate_profiles, nullptr, nullptr, nullptr);
+
+ rd_stats[bitrate_kbps] =
+ fixture->GetStats().SliceAndCalcLayerVideoStatistic(
+ kNumFirstFramesToSkipAtRdPerfAnalysis, config.num_frames - 1);
+ }
+
+ PrintRdPerf(rd_stats);
+}
+
+TEST(VideoCodecTestLibvpx, DISABLED_SvcVP9RdPerf) {
+ auto config = CreateConfig();
+ config.filename = "FourPeople_1280x720_30";
+ config.filepath = ResourcePath(config.filename, "yuv");
+ config.num_frames = 300;
+ config.print_frame_level_stats = true;
+ config.SetCodecSettings(cricket::kVp9CodecName, 1, 3, 3, true, true, false,
+ 1280, 720);
+ const auto frame_checker = std::make_unique<QpFrameChecker>();
+ config.encoded_frame_checker = frame_checker.get();
+ auto fixture = CreateVideoCodecTestFixture(config);
+
+ std::map<size_t, std::vector<VideoStatistics>> rd_stats;
+ for (size_t bitrate_kbps : kBitrateRdPerfKbps) {
+ std::vector<RateProfile> rate_profiles = {{bitrate_kbps, 30, 0}};
+
+ fixture->RunTest(rate_profiles, nullptr, nullptr, nullptr);
+
+ rd_stats[bitrate_kbps] =
+ fixture->GetStats().SliceAndCalcLayerVideoStatistic(
+ kNumFirstFramesToSkipAtRdPerfAnalysis, config.num_frames - 1);
+ }
+
+ PrintRdPerf(rd_stats);
+}
+
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/test/videocodec_test_mediacodec.cc b/third_party/libwebrtc/modules/video_coding/codecs/test/videocodec_test_mediacodec.cc
new file mode 100644
index 0000000000..fce21544b4
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/test/videocodec_test_mediacodec.cc
@@ -0,0 +1,267 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <memory>
+#include <string>
+#include <tuple>
+#include <vector>
+
+#include "api/test/create_videocodec_test_fixture.h"
+#include "media/base/media_constants.h"
+#include "modules/video_coding/codecs/test/android_codec_factory_helper.h"
+#include "modules/video_coding/codecs/test/videocodec_test_fixture_impl.h"
+#include "rtc_base/strings/string_builder.h"
+#include "test/gtest.h"
+#include "test/testsupport/file_utils.h"
+
+namespace webrtc {
+namespace test {
+
+namespace {
+const int kForemanNumFrames = 300;
+const int kForemanFramerateFps = 30;
+
+struct RateProfileData {
+ std::string name;
+ std::vector<webrtc::test::RateProfile> rate_profile;
+};
+
+const size_t kConstRateIntervalSec = 10;
+
+const RateProfileData kBitRateHighLowHigh = {
+ /*name=*/"BitRateHighLowHigh",
+ /*rate_profile=*/{
+ {/*target_kbps=*/3000, /*input_fps=*/30, /*frame_num=*/0},
+ {/*target_kbps=*/1500, /*input_fps=*/30, /*frame_num=*/300},
+ {/*target_kbps=*/750, /*input_fps=*/30, /*frame_num=*/600},
+ {/*target_kbps=*/1500, /*input_fps=*/30, /*frame_num=*/900},
+ {/*target_kbps=*/3000, /*input_fps=*/30, /*frame_num=*/1200}}};
+
+const RateProfileData kBitRateLowHighLow = {
+ /*name=*/"BitRateLowHighLow",
+ /*rate_profile=*/{
+ {/*target_kbps=*/750, /*input_fps=*/30, /*frame_num=*/0},
+ {/*target_kbps=*/1500, /*input_fps=*/30, /*frame_num=*/300},
+ {/*target_kbps=*/3000, /*input_fps=*/30, /*frame_num=*/600},
+ {/*target_kbps=*/1500, /*input_fps=*/30, /*frame_num=*/900},
+ {/*target_kbps=*/750, /*input_fps=*/30, /*frame_num=*/1200}}};
+
+const RateProfileData kFrameRateHighLowHigh = {
+ /*name=*/"FrameRateHighLowHigh",
+ /*rate_profile=*/{
+ {/*target_kbps=*/2000, /*input_fps=*/30, /*frame_num=*/0},
+ {/*target_kbps=*/2000, /*input_fps=*/15, /*frame_num=*/300},
+ {/*target_kbps=*/2000, /*input_fps=*/7.5, /*frame_num=*/450},
+ {/*target_kbps=*/2000, /*input_fps=*/15, /*frame_num=*/525},
+ {/*target_kbps=*/2000, /*input_fps=*/30, /*frame_num=*/675}}};
+
+const RateProfileData kFrameRateLowHighLow = {
+ /*name=*/"FrameRateLowHighLow",
+ /*rate_profile=*/{
+ {/*target_kbps=*/2000, /*input_fps=*/7.5, /*frame_num=*/0},
+ {/*target_kbps=*/2000, /*input_fps=*/15, /*frame_num=*/75},
+ {/*target_kbps=*/2000, /*input_fps=*/30, /*frame_num=*/225},
+ {/*target_kbps=*/2000, /*input_fps=*/15, /*frame_num=*/525},
+ {/*target_kbps=*/2000, /*input_fps=*/7.5, /*frame_num=*/775}}};
+
+VideoCodecTestFixture::Config CreateConfig() {
+ VideoCodecTestFixture::Config config;
+ config.filename = "foreman_cif";
+ config.filepath = ResourcePath(config.filename, "yuv");
+ config.num_frames = kForemanNumFrames;
+ // In order to not overwhelm the OpenMAX buffers in the Android MediaCodec.
+ config.encode_in_real_time = true;
+ return config;
+}
+
+std::unique_ptr<VideoCodecTestFixture> CreateTestFixtureWithConfig(
+ VideoCodecTestFixture::Config config) {
+ InitializeAndroidObjects(); // Idempotent.
+ auto encoder_factory = CreateAndroidEncoderFactory();
+ auto decoder_factory = CreateAndroidDecoderFactory();
+ return CreateVideoCodecTestFixture(config, std::move(decoder_factory),
+ std::move(encoder_factory));
+}
+} // namespace
+
+TEST(VideoCodecTestMediaCodec, ForemanCif500kbpsVp8) {
+ auto config = CreateConfig();
+ config.SetCodecSettings(cricket::kVp8CodecName, 1, 1, 1, false, false, false,
+ 352, 288);
+ auto fixture = CreateTestFixtureWithConfig(config);
+
+ std::vector<RateProfile> rate_profiles = {{500, kForemanFramerateFps, 0}};
+
+ // The thresholds below may have to be tweaked to let even poor MediaCodec
+ // implementations pass. If this test fails on the bots, disable it and
+ // ping brandtr@.
+ std::vector<RateControlThresholds> rc_thresholds = {
+ {10, 1, 1, 0.1, 0.2, 0.1, 0, 1}};
+
+ std::vector<QualityThresholds> quality_thresholds = {{36, 31, 0.92, 0.86}};
+
+ fixture->RunTest(rate_profiles, &rc_thresholds, &quality_thresholds, nullptr);
+}
+
+TEST(VideoCodecTestMediaCodec, ForemanCif500kbpsH264CBP) {
+ auto config = CreateConfig();
+ const auto frame_checker =
+ std::make_unique<VideoCodecTestFixtureImpl::H264KeyframeChecker>();
+ config.encoded_frame_checker = frame_checker.get();
+ config.SetCodecSettings(cricket::kH264CodecName, 1, 1, 1, false, false, false,
+ 352, 288);
+ auto fixture = CreateTestFixtureWithConfig(config);
+
+ std::vector<RateProfile> rate_profiles = {{500, kForemanFramerateFps, 0}};
+
+ // The thresholds below may have to be tweaked to let even poor MediaCodec
+ // implementations pass. If this test fails on the bots, disable it and
+ // ping brandtr@.
+ std::vector<RateControlThresholds> rc_thresholds = {
+ {10, 1, 1, 0.1, 0.2, 0.1, 0, 1}};
+
+ std::vector<QualityThresholds> quality_thresholds = {{36, 31, 0.92, 0.86}};
+
+ fixture->RunTest(rate_profiles, &rc_thresholds, &quality_thresholds, nullptr);
+}
+
+// TODO(brandtr): Enable this test when we have trybots/buildbots with
+// HW encoders that support CHP.
+TEST(VideoCodecTestMediaCodec, DISABLED_ForemanCif500kbpsH264CHP) {
+ auto config = CreateConfig();
+ const auto frame_checker =
+ std::make_unique<VideoCodecTestFixtureImpl::H264KeyframeChecker>();
+
+ config.h264_codec_settings.profile = H264Profile::kProfileConstrainedHigh;
+ config.encoded_frame_checker = frame_checker.get();
+ config.SetCodecSettings(cricket::kH264CodecName, 1, 1, 1, false, false, false,
+ 352, 288);
+ auto fixture = CreateTestFixtureWithConfig(config);
+
+ std::vector<RateProfile> rate_profiles = {{500, kForemanFramerateFps, 0}};
+
+ // The thresholds below may have to be tweaked to let even poor MediaCodec
+ // implementations pass. If this test fails on the bots, disable it and
+ // ping brandtr@.
+ std::vector<RateControlThresholds> rc_thresholds = {
+ {5, 1, 0, 0.1, 0.2, 0.1, 0, 1}};
+
+ std::vector<QualityThresholds> quality_thresholds = {{37, 35, 0.93, 0.91}};
+
+ fixture->RunTest(rate_profiles, &rc_thresholds, &quality_thresholds, nullptr);
+}
+
+TEST(VideoCodecTestMediaCodec, ForemanMixedRes100kbpsVp8H264) {
+ auto config = CreateConfig();
+ const int kNumFrames = 30;
+ const std::vector<std::string> codecs = {cricket::kVp8CodecName,
+ cricket::kH264CodecName};
+ const std::vector<std::tuple<int, int>> resolutions = {
+ {128, 96}, {176, 144}, {320, 240}, {480, 272}};
+ const std::vector<RateProfile> rate_profiles = {
+ {100, kForemanFramerateFps, 0}};
+ const std::vector<QualityThresholds> quality_thresholds = {
+ {29, 26, 0.8, 0.75}};
+
+ for (const auto& codec : codecs) {
+ for (const auto& resolution : resolutions) {
+ const int width = std::get<0>(resolution);
+ const int height = std::get<1>(resolution);
+ config.filename = std::string("foreman_") + std::to_string(width) + "x" +
+ std::to_string(height);
+ config.filepath = ResourcePath(config.filename, "yuv");
+ config.num_frames = kNumFrames;
+ config.SetCodecSettings(codec, 1, 1, 1, false, false, false, width,
+ height);
+
+ auto fixture = CreateTestFixtureWithConfig(config);
+ fixture->RunTest(rate_profiles, nullptr /* rc_thresholds */,
+ &quality_thresholds, nullptr /* bs_thresholds */);
+ }
+ }
+}
+
+class VideoCodecTestMediaCodecRateAdaptation
+ : public ::testing::TestWithParam<
+ std::tuple<RateProfileData, std::string>> {
+ public:
+ static std::string ParamInfoToStr(
+ const ::testing::TestParamInfo<
+ VideoCodecTestMediaCodecRateAdaptation::ParamType>& info) {
+ char buf[512];
+ rtc::SimpleStringBuilder ss(buf);
+ ss << std::get<0>(info.param).name << "_" << std::get<1>(info.param);
+ return ss.str();
+ }
+};
+
+TEST_P(VideoCodecTestMediaCodecRateAdaptation, DISABLED_RateAdaptation) {
+ const std::vector<webrtc::test::RateProfile> rate_profile =
+ std::get<0>(GetParam()).rate_profile;
+ const std::string codec_name = std::get<1>(GetParam());
+
+ VideoCodecTestFixture::Config config;
+ config.filename = "FourPeople_1280x720_30";
+ config.filepath = ResourcePath(config.filename, "yuv");
+ config.num_frames = rate_profile.back().frame_num +
+ static_cast<size_t>(kConstRateIntervalSec *
+ rate_profile.back().input_fps);
+ config.encode_in_real_time = true;
+ config.SetCodecSettings(codec_name, 1, 1, 1, false, false, false, 1280, 720);
+
+ auto fixture = CreateTestFixtureWithConfig(config);
+ fixture->RunTest(rate_profile, nullptr, nullptr, nullptr);
+
+ for (size_t i = 0; i < rate_profile.size(); ++i) {
+ const size_t num_frames =
+ static_cast<size_t>(rate_profile[i].input_fps * kConstRateIntervalSec);
+
+ auto stats = fixture->GetStats().SliceAndCalcLayerVideoStatistic(
+ rate_profile[i].frame_num, rate_profile[i].frame_num + num_frames - 1);
+ ASSERT_EQ(stats.size(), 1u);
+
+ // Bitrate mismatch is <= 10%.
+ EXPECT_LE(stats[0].avg_bitrate_mismatch_pct, 10);
+ EXPECT_GE(stats[0].avg_bitrate_mismatch_pct, -10);
+
+ // Avg frame transmission delay and processing latency is <=100..250ms
+ // depending on frame rate.
+ const double expected_delay_sec =
+ std::min(std::max(1 / rate_profile[i].input_fps, 0.1), 0.25);
+ EXPECT_LE(stats[0].avg_delay_sec, expected_delay_sec);
+ EXPECT_LE(stats[0].avg_encode_latency_sec, expected_delay_sec);
+ EXPECT_LE(stats[0].avg_decode_latency_sec, expected_delay_sec);
+
+ // Frame drops are not expected.
+ EXPECT_EQ(stats[0].num_encoded_frames, num_frames);
+ EXPECT_EQ(stats[0].num_decoded_frames, num_frames);
+
+ // Periodic keyframes are not expected.
+ EXPECT_EQ(stats[0].num_key_frames, i == 0 ? 1u : 0);
+
+ // Ensure codec delivers a reasonable spatial quality.
+ EXPECT_GE(stats[0].avg_psnr_y, 35);
+ }
+}
+
+INSTANTIATE_TEST_SUITE_P(
+ RateAdaptation,
+ VideoCodecTestMediaCodecRateAdaptation,
+ ::testing::Combine(::testing::Values(kBitRateLowHighLow,
+ kBitRateHighLowHigh,
+ kFrameRateLowHighLow,
+ kFrameRateHighLowHigh),
+ ::testing::Values(cricket::kVp8CodecName,
+ cricket::kVp9CodecName,
+ cricket::kH264CodecName)),
+ VideoCodecTestMediaCodecRateAdaptation::ParamInfoToStr);
+
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/test/videocodec_test_openh264.cc b/third_party/libwebrtc/modules/video_coding/codecs/test/videocodec_test_openh264.cc
new file mode 100644
index 0000000000..6513074bad
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/test/videocodec_test_openh264.cc
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <memory>
+#include <vector>
+
+#include "api/test/create_videocodec_test_fixture.h"
+#include "media/base/media_constants.h"
+#include "modules/video_coding/codecs/test/videocodec_test_fixture_impl.h"
+#include "test/gtest.h"
+#include "test/testsupport/file_utils.h"
+
+namespace webrtc {
+namespace test {
+
+namespace {
+// Codec settings.
+const int kCifWidth = 352;
+const int kCifHeight = 288;
+const int kNumFrames = 100;
+
+VideoCodecTestFixture::Config CreateConfig() {
+ VideoCodecTestFixture::Config config;
+ config.filename = "foreman_cif";
+ config.filepath = ResourcePath(config.filename, "yuv");
+ config.num_frames = kNumFrames;
+ // Only allow encoder/decoder to use single core, for predictability.
+ config.use_single_core = true;
+ return config;
+}
+} // namespace
+
+TEST(VideoCodecTestOpenH264, ConstantHighBitrate) {
+ auto frame_checker =
+ std::make_unique<VideoCodecTestFixtureImpl::H264KeyframeChecker>();
+ auto config = CreateConfig();
+ config.SetCodecSettings(cricket::kH264CodecName, 1, 1, 1, false, true, false,
+ kCifWidth, kCifHeight);
+ config.encoded_frame_checker = frame_checker.get();
+ auto fixture = CreateVideoCodecTestFixture(config);
+
+ std::vector<RateProfile> rate_profiles = {{500, 30, 0}};
+
+ std::vector<RateControlThresholds> rc_thresholds = {
+ {5, 1, 0, 0.1, 0.2, 0.1, 0, 1}};
+
+ std::vector<QualityThresholds> quality_thresholds = {{37, 35, 0.93, 0.91}};
+
+ fixture->RunTest(rate_profiles, &rc_thresholds, &quality_thresholds, nullptr);
+}
+
+// H264: Enable SingleNalUnit packetization mode. Encoder should split
+// large frames into multiple slices and limit length of NAL units.
+TEST(VideoCodecTestOpenH264, SingleNalUnit) {
+ auto frame_checker =
+ std::make_unique<VideoCodecTestFixtureImpl::H264KeyframeChecker>();
+ auto config = CreateConfig();
+ config.h264_codec_settings.packetization_mode =
+ H264PacketizationMode::SingleNalUnit;
+ config.max_payload_size_bytes = 500;
+ config.SetCodecSettings(cricket::kH264CodecName, 1, 1, 1, false, true, false,
+ kCifWidth, kCifHeight);
+ config.encoded_frame_checker = frame_checker.get();
+ auto fixture = CreateVideoCodecTestFixture(config);
+
+ std::vector<RateProfile> rate_profiles = {{500, 30, 0}};
+
+ std::vector<RateControlThresholds> rc_thresholds = {
+ {5, 1, 0, 0.1, 0.2, 0.1, 0, 1}};
+
+ std::vector<QualityThresholds> quality_thresholds = {{37, 35, 0.93, 0.91}};
+
+ BitstreamThresholds bs_thresholds = {config.max_payload_size_bytes};
+
+ fixture->RunTest(rate_profiles, &rc_thresholds, &quality_thresholds,
+ &bs_thresholds);
+}
+
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/test/videocodec_test_stats_impl.cc b/third_party/libwebrtc/modules/video_coding/codecs/test/videocodec_test_stats_impl.cc
new file mode 100644
index 0000000000..390348b97a
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/test/videocodec_test_stats_impl.cc
@@ -0,0 +1,441 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/codecs/test/videocodec_test_stats_impl.h"
+
+#include <algorithm>
+#include <cmath>
+#include <iterator>
+#include <limits>
+#include <numeric>
+
+#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/numerics/running_statistics.h"
+#include "rtc_base/strings/string_builder.h"
+
+namespace webrtc {
+namespace test {
+
+using FrameStatistics = VideoCodecTestStats::FrameStatistics;
+using VideoStatistics = VideoCodecTestStats::VideoStatistics;
+
+namespace {
+const int kMaxBitrateMismatchPercent = 20;
+}
+
+VideoCodecTestStatsImpl::VideoCodecTestStatsImpl() = default;
+VideoCodecTestStatsImpl::~VideoCodecTestStatsImpl() = default;
+
+void VideoCodecTestStatsImpl::AddFrame(const FrameStatistics& frame_stat) {
+ const size_t timestamp = frame_stat.rtp_timestamp;
+ const size_t layer_idx = frame_stat.spatial_idx;
+ RTC_DCHECK(rtp_timestamp_to_frame_num_[layer_idx].find(timestamp) ==
+ rtp_timestamp_to_frame_num_[layer_idx].end());
+ rtp_timestamp_to_frame_num_[layer_idx][timestamp] = frame_stat.frame_number;
+ layer_stats_[layer_idx].push_back(frame_stat);
+}
+
+FrameStatistics* VideoCodecTestStatsImpl::GetFrame(size_t frame_num,
+ size_t layer_idx) {
+ RTC_CHECK_LT(frame_num, layer_stats_[layer_idx].size());
+ return &layer_stats_[layer_idx][frame_num];
+}
+
+FrameStatistics* VideoCodecTestStatsImpl::GetFrameWithTimestamp(
+ size_t timestamp,
+ size_t layer_idx) {
+ RTC_DCHECK(rtp_timestamp_to_frame_num_[layer_idx].find(timestamp) !=
+ rtp_timestamp_to_frame_num_[layer_idx].end());
+
+ return GetFrame(rtp_timestamp_to_frame_num_[layer_idx][timestamp], layer_idx);
+}
+
+FrameStatistics* VideoCodecTestStatsImpl::GetOrAddFrame(size_t timestamp_rtp,
+ size_t spatial_idx) {
+ if (rtp_timestamp_to_frame_num_[spatial_idx].count(timestamp_rtp) > 0) {
+ return GetFrameWithTimestamp(timestamp_rtp, spatial_idx);
+ }
+
+ size_t frame_num = layer_stats_[spatial_idx].size();
+ AddFrame(FrameStatistics(frame_num, timestamp_rtp, spatial_idx));
+
+ return GetFrameWithTimestamp(timestamp_rtp, spatial_idx);
+}
+
+std::vector<FrameStatistics> VideoCodecTestStatsImpl::GetFrameStatistics()
+ const {
+ size_t capacity = 0;
+ for (const auto& layer_stat : layer_stats_) {
+ capacity += layer_stat.second.size();
+ }
+
+ std::vector<FrameStatistics> frame_statistics;
+ frame_statistics.reserve(capacity);
+ for (const auto& layer_stat : layer_stats_) {
+ std::copy(layer_stat.second.cbegin(), layer_stat.second.cend(),
+ std::back_inserter(frame_statistics));
+ }
+
+ return frame_statistics;
+}
+
+std::vector<VideoStatistics>
+VideoCodecTestStatsImpl::SliceAndCalcLayerVideoStatistic(
+ size_t first_frame_num,
+ size_t last_frame_num) {
+ std::vector<VideoStatistics> layer_stats;
+
+ size_t num_spatial_layers = 0;
+ size_t num_temporal_layers = 0;
+ GetNumberOfEncodedLayers(first_frame_num, last_frame_num, &num_spatial_layers,
+ &num_temporal_layers);
+ RTC_CHECK_GT(num_spatial_layers, 0);
+ RTC_CHECK_GT(num_temporal_layers, 0);
+
+ for (size_t spatial_idx = 0; spatial_idx < num_spatial_layers;
+ ++spatial_idx) {
+ for (size_t temporal_idx = 0; temporal_idx < num_temporal_layers;
+ ++temporal_idx) {
+ VideoStatistics layer_stat = SliceAndCalcVideoStatistic(
+ first_frame_num, last_frame_num, spatial_idx, temporal_idx, false,
+ /*target_bitrate=*/absl::nullopt, /*target_framerate=*/absl::nullopt);
+ layer_stats.push_back(layer_stat);
+ }
+ }
+
+ return layer_stats;
+}
+
+VideoStatistics VideoCodecTestStatsImpl::SliceAndCalcAggregatedVideoStatistic(
+ size_t first_frame_num,
+ size_t last_frame_num) {
+ size_t num_spatial_layers = 0;
+ size_t num_temporal_layers = 0;
+ GetNumberOfEncodedLayers(first_frame_num, last_frame_num, &num_spatial_layers,
+ &num_temporal_layers);
+ RTC_CHECK_GT(num_spatial_layers, 0);
+ RTC_CHECK_GT(num_temporal_layers, 0);
+
+ return SliceAndCalcVideoStatistic(
+ first_frame_num, last_frame_num, num_spatial_layers - 1,
+ num_temporal_layers - 1, true, /*target_bitrate=*/absl::nullopt,
+ /*target_framerate=*/absl::nullopt);
+}
+
+VideoStatistics VideoCodecTestStatsImpl::CalcVideoStatistic(
+ size_t first_frame_num,
+ size_t last_frame_num,
+ DataRate target_bitrate,
+ Frequency target_framerate) {
+ size_t num_spatial_layers = 0;
+ size_t num_temporal_layers = 0;
+ GetNumberOfEncodedLayers(first_frame_num, last_frame_num, &num_spatial_layers,
+ &num_temporal_layers);
+ return SliceAndCalcVideoStatistic(
+ first_frame_num, last_frame_num, num_spatial_layers - 1,
+ num_temporal_layers - 1, true, target_bitrate, target_framerate);
+}
+
+size_t VideoCodecTestStatsImpl::Size(size_t spatial_idx) {
+ return layer_stats_[spatial_idx].size();
+}
+
+void VideoCodecTestStatsImpl::Clear() {
+ layer_stats_.clear();
+ rtp_timestamp_to_frame_num_.clear();
+}
+
+FrameStatistics VideoCodecTestStatsImpl::AggregateFrameStatistic(
+ size_t frame_num,
+ size_t spatial_idx,
+ bool aggregate_independent_layers) {
+ FrameStatistics frame_stat = *GetFrame(frame_num, spatial_idx);
+ bool inter_layer_predicted = frame_stat.inter_layer_predicted;
+ while (spatial_idx-- > 0) {
+ if (aggregate_independent_layers || inter_layer_predicted) {
+ FrameStatistics* base_frame_stat = GetFrame(frame_num, spatial_idx);
+ frame_stat.length_bytes += base_frame_stat->length_bytes;
+ frame_stat.target_bitrate_kbps += base_frame_stat->target_bitrate_kbps;
+
+ inter_layer_predicted = base_frame_stat->inter_layer_predicted;
+ }
+ }
+
+ return frame_stat;
+}
+
+size_t VideoCodecTestStatsImpl::CalcLayerTargetBitrateKbps(
+ size_t first_frame_num,
+ size_t last_frame_num,
+ size_t spatial_idx,
+ size_t temporal_idx,
+ bool aggregate_independent_layers) {
+ size_t target_bitrate_kbps = 0;
+
+ // We don't know if superframe includes all required spatial layers because
+ // of possible frame drops. Run through all frames in specified range, find
+ // and return maximum target bitrate. Assume that target bitrate in frame
+ // statistic is specified per temporal layer.
+ for (size_t frame_num = first_frame_num; frame_num <= last_frame_num;
+ ++frame_num) {
+ FrameStatistics superframe = AggregateFrameStatistic(
+ frame_num, spatial_idx, aggregate_independent_layers);
+
+ if (superframe.temporal_idx <= temporal_idx) {
+ target_bitrate_kbps =
+ std::max(target_bitrate_kbps, superframe.target_bitrate_kbps);
+ }
+ }
+
+ RTC_DCHECK_GT(target_bitrate_kbps, 0);
+ return target_bitrate_kbps;
+}
+
+VideoStatistics VideoCodecTestStatsImpl::SliceAndCalcVideoStatistic(
+ size_t first_frame_num,
+ size_t last_frame_num,
+ size_t spatial_idx,
+ size_t temporal_idx,
+ bool aggregate_independent_layers,
+ absl::optional<DataRate> target_bitrate,
+ absl::optional<Frequency> target_framerate) {
+ VideoStatistics video_stat;
+
+ float buffer_level_bits = 0.0f;
+ webrtc_impl::RunningStatistics<float> buffer_level_sec;
+
+ webrtc_impl::RunningStatistics<size_t> key_frame_size_bytes;
+ webrtc_impl::RunningStatistics<size_t> delta_frame_size_bytes;
+
+ webrtc_impl::RunningStatistics<size_t> frame_encoding_time_us;
+ webrtc_impl::RunningStatistics<size_t> frame_decoding_time_us;
+
+ webrtc_impl::RunningStatistics<float> psnr_y;
+ webrtc_impl::RunningStatistics<float> psnr_u;
+ webrtc_impl::RunningStatistics<float> psnr_v;
+ webrtc_impl::RunningStatistics<float> psnr;
+ webrtc_impl::RunningStatistics<float> ssim;
+ webrtc_impl::RunningStatistics<int> qp;
+
+ size_t rtp_timestamp_first_frame = 0;
+ size_t rtp_timestamp_prev_frame = 0;
+
+ FrameStatistics last_successfully_decoded_frame(0, 0, 0);
+
+ const size_t target_bitrate_kbps =
+ target_bitrate.has_value()
+ ? target_bitrate->kbps()
+ : CalcLayerTargetBitrateKbps(first_frame_num, last_frame_num,
+ spatial_idx, temporal_idx,
+ aggregate_independent_layers);
+ const size_t target_bitrate_bps = 1000 * target_bitrate_kbps;
+ RTC_CHECK_GT(target_bitrate_kbps, 0); // We divide by `target_bitrate_kbps`.
+
+ for (size_t frame_num = first_frame_num; frame_num <= last_frame_num;
+ ++frame_num) {
+ FrameStatistics frame_stat = AggregateFrameStatistic(
+ frame_num, spatial_idx, aggregate_independent_layers);
+
+ float time_since_first_frame_sec =
+ 1.0f * (frame_stat.rtp_timestamp - rtp_timestamp_first_frame) /
+ kVideoPayloadTypeFrequency;
+ float time_since_prev_frame_sec =
+ 1.0f * (frame_stat.rtp_timestamp - rtp_timestamp_prev_frame) /
+ kVideoPayloadTypeFrequency;
+
+ if (frame_stat.temporal_idx > temporal_idx) {
+ continue;
+ }
+
+ buffer_level_bits -= time_since_prev_frame_sec * 1000 * target_bitrate_kbps;
+ buffer_level_bits = std::max(0.0f, buffer_level_bits);
+ buffer_level_bits += 8.0 * frame_stat.length_bytes;
+ buffer_level_sec.AddSample(buffer_level_bits /
+ (1000 * target_bitrate_kbps));
+
+ video_stat.length_bytes += frame_stat.length_bytes;
+
+ if (frame_stat.encoding_successful) {
+ ++video_stat.num_encoded_frames;
+
+ if (frame_stat.frame_type == VideoFrameType::kVideoFrameKey) {
+ key_frame_size_bytes.AddSample(frame_stat.length_bytes);
+ ++video_stat.num_key_frames;
+ } else {
+ delta_frame_size_bytes.AddSample(frame_stat.length_bytes);
+ }
+
+ frame_encoding_time_us.AddSample(frame_stat.encode_time_us);
+ qp.AddSample(frame_stat.qp);
+
+ video_stat.max_nalu_size_bytes = std::max(video_stat.max_nalu_size_bytes,
+ frame_stat.max_nalu_size_bytes);
+ }
+
+ if (frame_stat.decoding_successful) {
+ ++video_stat.num_decoded_frames;
+
+ video_stat.width = std::max(video_stat.width, frame_stat.decoded_width);
+ video_stat.height =
+ std::max(video_stat.height, frame_stat.decoded_height);
+
+ if (video_stat.num_decoded_frames > 1) {
+ if (last_successfully_decoded_frame.decoded_width !=
+ frame_stat.decoded_width ||
+ last_successfully_decoded_frame.decoded_height !=
+ frame_stat.decoded_height) {
+ ++video_stat.num_spatial_resizes;
+ }
+ }
+
+ frame_decoding_time_us.AddSample(frame_stat.decode_time_us);
+ last_successfully_decoded_frame = frame_stat;
+ }
+
+ if (frame_stat.quality_analysis_successful) {
+ psnr_y.AddSample(frame_stat.psnr_y);
+ psnr_u.AddSample(frame_stat.psnr_u);
+ psnr_v.AddSample(frame_stat.psnr_v);
+ psnr.AddSample(frame_stat.psnr);
+ ssim.AddSample(frame_stat.ssim);
+ }
+
+ if (video_stat.num_input_frames > 0) {
+ if (video_stat.time_to_reach_target_bitrate_sec == 0.0f) {
+ RTC_CHECK_GT(time_since_first_frame_sec, 0);
+ const float curr_kbps =
+ 8.0 * video_stat.length_bytes / 1000 / time_since_first_frame_sec;
+ const float bitrate_mismatch_percent =
+ 100 * std::fabs(curr_kbps - target_bitrate_kbps) /
+ target_bitrate_kbps;
+ if (bitrate_mismatch_percent < kMaxBitrateMismatchPercent) {
+ video_stat.time_to_reach_target_bitrate_sec =
+ time_since_first_frame_sec;
+ }
+ }
+ }
+
+ rtp_timestamp_prev_frame = frame_stat.rtp_timestamp;
+ if (video_stat.num_input_frames == 0) {
+ rtp_timestamp_first_frame = frame_stat.rtp_timestamp;
+ }
+
+ ++video_stat.num_input_frames;
+ }
+
+ const size_t num_frames = last_frame_num - first_frame_num + 1;
+ const size_t timestamp_delta =
+ GetFrame(first_frame_num + 1, spatial_idx)->rtp_timestamp -
+ GetFrame(first_frame_num, spatial_idx)->rtp_timestamp;
+ RTC_CHECK_GT(timestamp_delta, 0);
+ const float input_framerate_fps =
+ target_framerate.has_value()
+ ? target_framerate->millihertz() / 1000.0
+ : 1.0 * kVideoPayloadTypeFrequency / timestamp_delta;
+ RTC_CHECK_GT(input_framerate_fps, 0);
+ const float duration_sec = num_frames / input_framerate_fps;
+
+ video_stat.target_bitrate_kbps = target_bitrate_kbps;
+ video_stat.input_framerate_fps = input_framerate_fps;
+
+ video_stat.spatial_idx = spatial_idx;
+ video_stat.temporal_idx = temporal_idx;
+
+ RTC_CHECK_GT(duration_sec, 0);
+ const float bitrate_bps = 8 * video_stat.length_bytes / duration_sec;
+ video_stat.bitrate_kbps = static_cast<size_t>((bitrate_bps + 500) / 1000);
+ video_stat.framerate_fps = video_stat.num_encoded_frames / duration_sec;
+
+ // http://bugs.webrtc.org/10400: On Windows, we only get millisecond
+ // granularity in the frame encode/decode timing measurements.
+ // So we need to softly avoid a div-by-zero here.
+ const float mean_encode_time_us =
+ frame_encoding_time_us.GetMean().value_or(0);
+ video_stat.enc_speed_fps = mean_encode_time_us > 0.0f
+ ? 1000000.0f / mean_encode_time_us
+ : std::numeric_limits<float>::max();
+ const float mean_decode_time_us =
+ frame_decoding_time_us.GetMean().value_or(0);
+ video_stat.dec_speed_fps = mean_decode_time_us > 0.0f
+ ? 1000000.0f / mean_decode_time_us
+ : std::numeric_limits<float>::max();
+
+ video_stat.avg_encode_latency_sec =
+ frame_encoding_time_us.GetMean().value_or(0) / 1000000.0f;
+ video_stat.max_encode_latency_sec =
+ frame_encoding_time_us.GetMax().value_or(0) / 1000000.0f;
+
+ video_stat.avg_decode_latency_sec =
+ frame_decoding_time_us.GetMean().value_or(0) / 1000000.0f;
+ video_stat.max_decode_latency_sec =
+ frame_decoding_time_us.GetMax().value_or(0) / 1000000.0f;
+
+ auto MaxDelaySec = [target_bitrate_kbps](
+ const webrtc_impl::RunningStatistics<size_t>& stats) {
+ return 8 * stats.GetMax().value_or(0) / 1000 / target_bitrate_kbps;
+ };
+
+ video_stat.avg_delay_sec = buffer_level_sec.GetMean().value_or(0);
+ video_stat.max_key_frame_delay_sec = MaxDelaySec(key_frame_size_bytes);
+ video_stat.max_delta_frame_delay_sec = MaxDelaySec(delta_frame_size_bytes);
+
+ video_stat.avg_bitrate_mismatch_pct =
+ 100 * (bitrate_bps - target_bitrate_bps) / target_bitrate_bps;
+ video_stat.avg_framerate_mismatch_pct =
+ 100 * (video_stat.framerate_fps - input_framerate_fps) /
+ input_framerate_fps;
+
+ video_stat.avg_key_frame_size_bytes =
+ key_frame_size_bytes.GetMean().value_or(0);
+ video_stat.avg_delta_frame_size_bytes =
+ delta_frame_size_bytes.GetMean().value_or(0);
+ video_stat.avg_qp = qp.GetMean().value_or(0);
+
+ video_stat.avg_psnr_y = psnr_y.GetMean().value_or(0);
+ video_stat.avg_psnr_u = psnr_u.GetMean().value_or(0);
+ video_stat.avg_psnr_v = psnr_v.GetMean().value_or(0);
+ video_stat.avg_psnr = psnr.GetMean().value_or(0);
+ video_stat.min_psnr =
+ psnr.GetMin().value_or(std::numeric_limits<float>::max());
+ video_stat.avg_ssim = ssim.GetMean().value_or(0);
+ video_stat.min_ssim =
+ ssim.GetMin().value_or(std::numeric_limits<float>::max());
+
+ return video_stat;
+}
+
+void VideoCodecTestStatsImpl::GetNumberOfEncodedLayers(
+ size_t first_frame_num,
+ size_t last_frame_num,
+ size_t* num_encoded_spatial_layers,
+ size_t* num_encoded_temporal_layers) {
+ *num_encoded_spatial_layers = 0;
+ *num_encoded_temporal_layers = 0;
+
+ const size_t num_spatial_layers = layer_stats_.size();
+
+ for (size_t frame_num = first_frame_num; frame_num <= last_frame_num;
+ ++frame_num) {
+ for (size_t spatial_idx = 0; spatial_idx < num_spatial_layers;
+ ++spatial_idx) {
+ FrameStatistics* frame_stat = GetFrame(frame_num, spatial_idx);
+ if (frame_stat->encoding_successful) {
+ *num_encoded_spatial_layers =
+ std::max(*num_encoded_spatial_layers, frame_stat->spatial_idx + 1);
+ *num_encoded_temporal_layers = std::max(*num_encoded_temporal_layers,
+ frame_stat->temporal_idx + 1);
+ }
+ }
+ }
+}
+
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/test/videocodec_test_stats_impl.h b/third_party/libwebrtc/modules/video_coding/codecs/test/videocodec_test_stats_impl.h
new file mode 100644
index 0000000000..1a7980aa0a
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/test/videocodec_test_stats_impl.h
@@ -0,0 +1,95 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_CODECS_TEST_VIDEOCODEC_TEST_STATS_IMPL_H_
+#define MODULES_VIDEO_CODING_CODECS_TEST_VIDEOCODEC_TEST_STATS_IMPL_H_
+
+#include <stddef.h>
+
+#include <map>
+#include <string>
+#include <vector>
+
+#include "api/test/videocodec_test_stats.h" // NOLINT(build/include)
+
+namespace webrtc {
+namespace test {
+
+// Statistics for a sequence of processed frames. This class is not thread safe.
+class VideoCodecTestStatsImpl : public VideoCodecTestStats {
+ public:
+ VideoCodecTestStatsImpl();
+ ~VideoCodecTestStatsImpl() override;
+
+ // Creates a FrameStatistics for the next frame to be processed.
+ void AddFrame(const FrameStatistics& frame_stat);
+
+ // Returns the FrameStatistics corresponding to `frame_number` or `timestamp`.
+ FrameStatistics* GetFrame(size_t frame_number, size_t spatial_idx);
+ FrameStatistics* GetFrameWithTimestamp(size_t timestamp, size_t spatial_idx);
+
+ // Creates FrameStatisticts if it doesn't exists and/or returns
+ // created/existing FrameStatisticts.
+ FrameStatistics* GetOrAddFrame(size_t timestamp_rtp, size_t spatial_idx);
+
+ // Implements VideoCodecTestStats.
+ std::vector<FrameStatistics> GetFrameStatistics() const override;
+ std::vector<VideoStatistics> SliceAndCalcLayerVideoStatistic(
+ size_t first_frame_num,
+ size_t last_frame_num) override;
+
+ VideoStatistics SliceAndCalcAggregatedVideoStatistic(size_t first_frame_num,
+ size_t last_frame_num);
+
+ VideoStatistics CalcVideoStatistic(size_t first_frame,
+ size_t last_frame,
+ DataRate target_bitrate,
+ Frequency target_framerate) override;
+
+ size_t Size(size_t spatial_idx);
+
+ void Clear();
+
+ private:
+ VideoCodecTestStats::FrameStatistics AggregateFrameStatistic(
+ size_t frame_num,
+ size_t spatial_idx,
+ bool aggregate_independent_layers);
+
+ size_t CalcLayerTargetBitrateKbps(size_t first_frame_num,
+ size_t last_frame_num,
+ size_t spatial_idx,
+ size_t temporal_idx,
+ bool aggregate_independent_layers);
+
+ VideoCodecTestStats::VideoStatistics SliceAndCalcVideoStatistic(
+ size_t first_frame_num,
+ size_t last_frame_num,
+ size_t spatial_idx,
+ size_t temporal_idx,
+ bool aggregate_independent_layers,
+ absl::optional<DataRate> target_bitrate,
+ absl::optional<Frequency> target_framerate);
+
+ void GetNumberOfEncodedLayers(size_t first_frame_num,
+ size_t last_frame_num,
+ size_t* num_encoded_spatial_layers,
+ size_t* num_encoded_temporal_layers);
+
+ // layer_idx -> stats.
+ std::map<size_t, std::vector<FrameStatistics>> layer_stats_;
+ // layer_idx -> rtp_timestamp -> frame_num.
+ std::map<size_t, std::map<size_t, size_t>> rtp_timestamp_to_frame_num_;
+};
+
+} // namespace test
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_CODECS_TEST_VIDEOCODEC_TEST_STATS_IMPL_H_
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/test/videocodec_test_stats_impl_unittest.cc b/third_party/libwebrtc/modules/video_coding/codecs/test/videocodec_test_stats_impl_unittest.cc
new file mode 100644
index 0000000000..89e7d2e1c4
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/test/videocodec_test_stats_impl_unittest.cc
@@ -0,0 +1,105 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/codecs/test/videocodec_test_stats_impl.h"
+
+#include <vector>
+
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace test {
+
+using FrameStatistics = VideoCodecTestStatsImpl::FrameStatistics;
+
+namespace {
+
+const size_t kTimestamp = 12345;
+
+using ::testing::AllOf;
+using ::testing::Contains;
+using ::testing::Field;
+
+} // namespace
+
+TEST(StatsTest, AddAndGetFrame) {
+ VideoCodecTestStatsImpl stats;
+ stats.AddFrame(FrameStatistics(0, kTimestamp, 0));
+ FrameStatistics* frame_stat = stats.GetFrame(0u, 0);
+ EXPECT_EQ(0u, frame_stat->frame_number);
+ EXPECT_EQ(kTimestamp, frame_stat->rtp_timestamp);
+}
+
+TEST(StatsTest, GetOrAddFrame_noFrame_createsNewFrameStat) {
+ VideoCodecTestStatsImpl stats;
+ stats.GetOrAddFrame(kTimestamp, 0);
+ FrameStatistics* frame_stat = stats.GetFrameWithTimestamp(kTimestamp, 0);
+ EXPECT_EQ(kTimestamp, frame_stat->rtp_timestamp);
+}
+
+TEST(StatsTest, GetOrAddFrame_frameExists_returnsExistingFrameStat) {
+ VideoCodecTestStatsImpl stats;
+ stats.AddFrame(FrameStatistics(0, kTimestamp, 0));
+ FrameStatistics* frame_stat1 = stats.GetFrameWithTimestamp(kTimestamp, 0);
+ FrameStatistics* frame_stat2 = stats.GetOrAddFrame(kTimestamp, 0);
+ EXPECT_EQ(frame_stat1, frame_stat2);
+}
+
+TEST(StatsTest, AddAndGetFrames) {
+ VideoCodecTestStatsImpl stats;
+ const size_t kNumFrames = 1000;
+ for (size_t i = 0; i < kNumFrames; ++i) {
+ stats.AddFrame(FrameStatistics(i, kTimestamp + i, 0));
+ FrameStatistics* frame_stat = stats.GetFrame(i, 0);
+ EXPECT_EQ(i, frame_stat->frame_number);
+ EXPECT_EQ(kTimestamp + i, frame_stat->rtp_timestamp);
+ }
+ EXPECT_EQ(kNumFrames, stats.Size(0));
+ // Get frame.
+ size_t i = 22;
+ FrameStatistics* frame_stat = stats.GetFrameWithTimestamp(kTimestamp + i, 0);
+ EXPECT_EQ(i, frame_stat->frame_number);
+ EXPECT_EQ(kTimestamp + i, frame_stat->rtp_timestamp);
+}
+
+TEST(StatsTest, AddFrameLayering) {
+ VideoCodecTestStatsImpl stats;
+ for (size_t i = 0; i < 3; ++i) {
+ stats.AddFrame(FrameStatistics(0, kTimestamp + i, i));
+ FrameStatistics* frame_stat = stats.GetFrame(0u, i);
+ EXPECT_EQ(0u, frame_stat->frame_number);
+ EXPECT_EQ(kTimestamp, frame_stat->rtp_timestamp - i);
+ EXPECT_EQ(1u, stats.Size(i));
+ }
+}
+
+TEST(StatsTest, GetFrameStatistics) {
+ VideoCodecTestStatsImpl stats;
+
+ stats.AddFrame(FrameStatistics(0, kTimestamp, 0));
+ stats.AddFrame(FrameStatistics(0, kTimestamp, 1));
+ stats.AddFrame(FrameStatistics(1, kTimestamp + 3000, 0));
+ stats.AddFrame(FrameStatistics(1, kTimestamp + 3000, 1));
+
+ const std::vector<FrameStatistics> frame_stats = stats.GetFrameStatistics();
+
+ auto field_matcher = [](size_t frame_number, size_t spatial_idx) {
+ return AllOf(Field(&FrameStatistics::frame_number, frame_number),
+ Field(&FrameStatistics::spatial_idx, spatial_idx));
+ };
+ EXPECT_THAT(frame_stats, Contains(field_matcher(0, 0)));
+ EXPECT_THAT(frame_stats, Contains(field_matcher(0, 1)));
+ EXPECT_THAT(frame_stats, Contains(field_matcher(1, 0)));
+ EXPECT_THAT(frame_stats, Contains(field_matcher(1, 1)));
+}
+
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/test/videocodec_test_videotoolbox.cc b/third_party/libwebrtc/modules/video_coding/codecs/test/videocodec_test_videotoolbox.cc
new file mode 100644
index 0000000000..6df974362f
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/test/videocodec_test_videotoolbox.cc
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <memory>
+#include <vector>
+
+#include "api/test/create_videocodec_test_fixture.h"
+#include "media/base/media_constants.h"
+#include "modules/video_coding/codecs/test/objc_codec_factory_helper.h"
+#include "modules/video_coding/codecs/test/videocodec_test_fixture_impl.h"
+#include "test/gtest.h"
+#include "test/testsupport/file_utils.h"
+
+namespace webrtc {
+namespace test {
+
+namespace {
+const int kForemanNumFrames = 300;
+
+VideoCodecTestFixture::Config CreateConfig() {
+ VideoCodecTestFixture::Config config;
+ config.filename = "foreman_cif";
+ config.filepath = ResourcePath(config.filename, "yuv");
+ config.num_frames = kForemanNumFrames;
+ return config;
+}
+
+std::unique_ptr<VideoCodecTestFixture> CreateTestFixtureWithConfig(
+ VideoCodecTestFixture::Config config) {
+ auto decoder_factory = CreateObjCDecoderFactory();
+ auto encoder_factory = CreateObjCEncoderFactory();
+ return CreateVideoCodecTestFixture(config, std::move(decoder_factory),
+ std::move(encoder_factory));
+}
+} // namespace
+
+// TODO(webrtc:9099): Disabled until the issue is fixed.
+// HW codecs don't work on simulators. Only run these tests on device.
+// #if TARGET_OS_IPHONE && !TARGET_IPHONE_SIMULATOR
+// #define MAYBE_TEST TEST
+// #else
+#define MAYBE_TEST(s, name) TEST(s, DISABLED_##name)
+// #endif
+
+// TODO(kthelgason): Use RC Thresholds when the internal bitrateAdjuster is no
+// longer in use.
+MAYBE_TEST(VideoCodecTestVideoToolbox, ForemanCif500kbpsH264CBP) {
+ const auto frame_checker =
+ std::make_unique<VideoCodecTestFixtureImpl::H264KeyframeChecker>();
+ auto config = CreateConfig();
+ config.SetCodecSettings(cricket::kH264CodecName, 1, 1, 1, false, false, false,
+ 352, 288);
+ config.encoded_frame_checker = frame_checker.get();
+ auto fixture = CreateTestFixtureWithConfig(config);
+
+ std::vector<RateProfile> rate_profiles = {{500, 30, 0}};
+
+ std::vector<QualityThresholds> quality_thresholds = {{33, 29, 0.9, 0.82}};
+
+ fixture->RunTest(rate_profiles, nullptr, &quality_thresholds, nullptr);
+}
+
+MAYBE_TEST(VideoCodecTestVideoToolbox, ForemanCif500kbpsH264CHP) {
+ const auto frame_checker =
+ std::make_unique<VideoCodecTestFixtureImpl::H264KeyframeChecker>();
+ auto config = CreateConfig();
+ config.h264_codec_settings.profile = H264Profile::kProfileConstrainedHigh;
+ config.SetCodecSettings(cricket::kH264CodecName, 1, 1, 1, false, false, false,
+ 352, 288);
+ config.encoded_frame_checker = frame_checker.get();
+ auto fixture = CreateTestFixtureWithConfig(config);
+
+ std::vector<RateProfile> rate_profiles = {{500, 30, 0}};
+
+ std::vector<QualityThresholds> quality_thresholds = {{33, 30, 0.91, 0.83}};
+
+ fixture->RunTest(rate_profiles, nullptr, &quality_thresholds, nullptr);
+}
+
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/test/videoprocessor.cc b/third_party/libwebrtc/modules/video_coding/codecs/test/videoprocessor.cc
new file mode 100644
index 0000000000..13266c40df
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/test/videoprocessor.cc
@@ -0,0 +1,722 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/codecs/test/videoprocessor.h"
+
+#include <string.h>
+
+#include <algorithm>
+#include <cstddef>
+#include <limits>
+#include <memory>
+#include <utility>
+
+#include "api/scoped_refptr.h"
+#include "api/video/builtin_video_bitrate_allocator_factory.h"
+#include "api/video/i420_buffer.h"
+#include "api/video/video_bitrate_allocator_factory.h"
+#include "api/video/video_frame_buffer.h"
+#include "api/video/video_rotation.h"
+#include "api/video_codecs/video_codec.h"
+#include "api/video_codecs/video_encoder.h"
+#include "common_video/h264/h264_common.h"
+#include "common_video/libyuv/include/webrtc_libyuv.h"
+#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
+#include "modules/video_coding/codecs/interface/common_constants.h"
+#include "modules/video_coding/include/video_error_codes.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/time_utils.h"
+#include "test/gtest.h"
+#include "third_party/libyuv/include/libyuv/compare.h"
+#include "third_party/libyuv/include/libyuv/scale.h"
+
+namespace webrtc {
+namespace test {
+
+namespace {
+const int kMsToRtpTimestamp = kVideoPayloadTypeFrequency / 1000;
+const int kMaxBufferedInputFrames = 20;
+
+const VideoEncoder::Capabilities kCapabilities(false);
+
+size_t GetMaxNaluSizeBytes(const EncodedImage& encoded_frame,
+ const VideoCodecTestFixture::Config& config) {
+ if (config.codec_settings.codecType != kVideoCodecH264)
+ return 0;
+
+ std::vector<webrtc::H264::NaluIndex> nalu_indices =
+ webrtc::H264::FindNaluIndices(encoded_frame.data(), encoded_frame.size());
+
+ RTC_CHECK(!nalu_indices.empty());
+
+ size_t max_size = 0;
+ for (const webrtc::H264::NaluIndex& index : nalu_indices)
+ max_size = std::max(max_size, index.payload_size);
+
+ return max_size;
+}
+
+size_t GetTemporalLayerIndex(const CodecSpecificInfo& codec_specific) {
+ size_t temporal_idx = 0;
+ if (codec_specific.codecType == kVideoCodecVP8) {
+ temporal_idx = codec_specific.codecSpecific.VP8.temporalIdx;
+ } else if (codec_specific.codecType == kVideoCodecVP9) {
+ temporal_idx = codec_specific.codecSpecific.VP9.temporal_idx;
+ }
+ if (temporal_idx == kNoTemporalIdx) {
+ temporal_idx = 0;
+ }
+ return temporal_idx;
+}
+
+int GetElapsedTimeMicroseconds(int64_t start_ns, int64_t stop_ns) {
+ int64_t diff_us = (stop_ns - start_ns) / rtc::kNumNanosecsPerMicrosec;
+ RTC_DCHECK_GE(diff_us, std::numeric_limits<int>::min());
+ RTC_DCHECK_LE(diff_us, std::numeric_limits<int>::max());
+ return static_cast<int>(diff_us);
+}
+
+void CalculateFrameQuality(const I420BufferInterface& ref_buffer,
+ const I420BufferInterface& dec_buffer,
+ VideoCodecTestStats::FrameStatistics* frame_stat,
+ bool calc_ssim) {
+ if (ref_buffer.width() != dec_buffer.width() ||
+ ref_buffer.height() != dec_buffer.height()) {
+ RTC_CHECK_GE(ref_buffer.width(), dec_buffer.width());
+ RTC_CHECK_GE(ref_buffer.height(), dec_buffer.height());
+ // Downscale reference frame.
+ rtc::scoped_refptr<I420Buffer> scaled_buffer =
+ I420Buffer::Create(dec_buffer.width(), dec_buffer.height());
+ I420Scale(ref_buffer.DataY(), ref_buffer.StrideY(), ref_buffer.DataU(),
+ ref_buffer.StrideU(), ref_buffer.DataV(), ref_buffer.StrideV(),
+ ref_buffer.width(), ref_buffer.height(),
+ scaled_buffer->MutableDataY(), scaled_buffer->StrideY(),
+ scaled_buffer->MutableDataU(), scaled_buffer->StrideU(),
+ scaled_buffer->MutableDataV(), scaled_buffer->StrideV(),
+ scaled_buffer->width(), scaled_buffer->height(),
+ libyuv::kFilterBox);
+
+ CalculateFrameQuality(*scaled_buffer, dec_buffer, frame_stat, calc_ssim);
+ } else {
+ const uint64_t sse_y = libyuv::ComputeSumSquareErrorPlane(
+ dec_buffer.DataY(), dec_buffer.StrideY(), ref_buffer.DataY(),
+ ref_buffer.StrideY(), dec_buffer.width(), dec_buffer.height());
+
+ const uint64_t sse_u = libyuv::ComputeSumSquareErrorPlane(
+ dec_buffer.DataU(), dec_buffer.StrideU(), ref_buffer.DataU(),
+ ref_buffer.StrideU(), dec_buffer.width() / 2, dec_buffer.height() / 2);
+
+ const uint64_t sse_v = libyuv::ComputeSumSquareErrorPlane(
+ dec_buffer.DataV(), dec_buffer.StrideV(), ref_buffer.DataV(),
+ ref_buffer.StrideV(), dec_buffer.width() / 2, dec_buffer.height() / 2);
+
+ const size_t num_y_samples = dec_buffer.width() * dec_buffer.height();
+ const size_t num_u_samples =
+ dec_buffer.width() / 2 * dec_buffer.height() / 2;
+
+ frame_stat->psnr_y = libyuv::SumSquareErrorToPsnr(sse_y, num_y_samples);
+ frame_stat->psnr_u = libyuv::SumSquareErrorToPsnr(sse_u, num_u_samples);
+ frame_stat->psnr_v = libyuv::SumSquareErrorToPsnr(sse_v, num_u_samples);
+ frame_stat->psnr = libyuv::SumSquareErrorToPsnr(
+ sse_y + sse_u + sse_v, num_y_samples + 2 * num_u_samples);
+
+ if (calc_ssim) {
+ frame_stat->ssim = I420SSIM(ref_buffer, dec_buffer);
+ }
+ }
+}
+
+} // namespace
+
+VideoProcessor::VideoProcessor(webrtc::VideoEncoder* encoder,
+ VideoDecoderList* decoders,
+ FrameReader* input_frame_reader,
+ const VideoCodecTestFixture::Config& config,
+ VideoCodecTestStatsImpl* stats,
+ IvfFileWriterMap* encoded_frame_writers,
+ FrameWriterList* decoded_frame_writers)
+ : config_(config),
+ num_simulcast_or_spatial_layers_(
+ std::max(config_.NumberOfSimulcastStreams(),
+ config_.NumberOfSpatialLayers())),
+ analyze_frame_quality_(!config_.measure_cpu),
+ stats_(stats),
+ encoder_(encoder),
+ decoders_(decoders),
+ bitrate_allocator_(
+ CreateBuiltinVideoBitrateAllocatorFactory()
+ ->CreateVideoBitrateAllocator(config_.codec_settings)),
+ encode_callback_(this),
+ input_frame_reader_(input_frame_reader),
+ merged_encoded_frames_(num_simulcast_or_spatial_layers_),
+ encoded_frame_writers_(encoded_frame_writers),
+ decoded_frame_writers_(decoded_frame_writers),
+ last_inputed_frame_num_(0),
+ last_inputed_timestamp_(0),
+ first_encoded_frame_(num_simulcast_or_spatial_layers_, true),
+ last_encoded_frame_num_(num_simulcast_or_spatial_layers_),
+ first_decoded_frame_(num_simulcast_or_spatial_layers_, true),
+ last_decoded_frame_num_(num_simulcast_or_spatial_layers_),
+ last_decoded_frame_buffer_(num_simulcast_or_spatial_layers_),
+ post_encode_time_ns_(0),
+ is_finalized_(false) {
+ // Sanity checks.
+ RTC_CHECK(TaskQueueBase::Current())
+ << "VideoProcessor must be run on a task queue.";
+ RTC_CHECK(stats_);
+ RTC_CHECK(encoder_);
+ RTC_CHECK(decoders_);
+ RTC_CHECK_EQ(decoders_->size(), num_simulcast_or_spatial_layers_);
+ RTC_CHECK(input_frame_reader_);
+ RTC_CHECK(encoded_frame_writers_);
+ RTC_CHECK(!decoded_frame_writers ||
+ decoded_frame_writers->size() == num_simulcast_or_spatial_layers_);
+
+ // Setup required callbacks for the encoder and decoder and initialize them.
+ RTC_CHECK_EQ(encoder_->RegisterEncodeCompleteCallback(&encode_callback_),
+ WEBRTC_VIDEO_CODEC_OK);
+
+ // Initialize codecs so that they are ready to receive frames.
+ RTC_CHECK_EQ(encoder_->InitEncode(
+ &config_.codec_settings,
+ VideoEncoder::Settings(
+ kCapabilities, static_cast<int>(config_.NumberOfCores()),
+ config_.max_payload_size_bytes)),
+ WEBRTC_VIDEO_CODEC_OK);
+
+ for (size_t i = 0; i < num_simulcast_or_spatial_layers_; ++i) {
+ decode_callback_.push_back(
+ std::make_unique<VideoProcessorDecodeCompleteCallback>(this, i));
+ VideoDecoder::Settings decoder_settings;
+ decoder_settings.set_max_render_resolution(
+ {config_.codec_settings.width, config_.codec_settings.height});
+ decoder_settings.set_codec_type(config_.codec_settings.codecType);
+ decoder_settings.set_number_of_cores(config_.NumberOfCores());
+ RTC_CHECK(decoders_->at(i)->Configure(decoder_settings));
+ RTC_CHECK_EQ(decoders_->at(i)->RegisterDecodeCompleteCallback(
+ decode_callback_.at(i).get()),
+ WEBRTC_VIDEO_CODEC_OK);
+ }
+}
+
+VideoProcessor::~VideoProcessor() {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+
+ if (!is_finalized_) {
+ Finalize();
+ }
+
+ // Explicitly reset codecs, in case they don't do that themselves when they
+ // go out of scope.
+ RTC_CHECK_EQ(encoder_->Release(), WEBRTC_VIDEO_CODEC_OK);
+ encoder_->RegisterEncodeCompleteCallback(nullptr);
+ for (auto& decoder : *decoders_) {
+ RTC_CHECK_EQ(decoder->Release(), WEBRTC_VIDEO_CODEC_OK);
+ decoder->RegisterDecodeCompleteCallback(nullptr);
+ }
+
+ // Sanity check.
+ RTC_CHECK_LE(input_frames_.size(), kMaxBufferedInputFrames);
+}
+
+void VideoProcessor::ProcessFrame() {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+ RTC_DCHECK(!is_finalized_);
+
+ RTC_DCHECK_GT(target_rates_.size(), 0u);
+ RTC_DCHECK_EQ(target_rates_.begin()->first, 0u);
+ RateProfile target_rate =
+ std::prev(target_rates_.upper_bound(last_inputed_frame_num_))->second;
+
+ const size_t frame_number = last_inputed_frame_num_++;
+
+ // Get input frame and store for future quality calculation.
+ Resolution resolution = Resolution({.width = config_.codec_settings.width,
+ .height = config_.codec_settings.height});
+ FrameReader::Ratio framerate_scale = FrameReader::Ratio(
+ {.num = config_.clip_fps.value_or(config_.codec_settings.maxFramerate),
+ .den = static_cast<int>(config_.codec_settings.maxFramerate)});
+ rtc::scoped_refptr<I420BufferInterface> buffer =
+ input_frame_reader_->PullFrame(
+ /*frame_num*/ nullptr, resolution, framerate_scale);
+
+ RTC_CHECK(buffer) << "Tried to read too many frames from the file.";
+ const size_t timestamp =
+ last_inputed_timestamp_ +
+ static_cast<size_t>(kVideoPayloadTypeFrequency / target_rate.input_fps);
+ VideoFrame input_frame =
+ VideoFrame::Builder()
+ .set_video_frame_buffer(buffer)
+ .set_timestamp_rtp(static_cast<uint32_t>(timestamp))
+ .set_timestamp_ms(static_cast<int64_t>(timestamp / kMsToRtpTimestamp))
+ .set_rotation(webrtc::kVideoRotation_0)
+ .build();
+ // Store input frame as a reference for quality calculations.
+ if (config_.decode && !config_.measure_cpu) {
+ if (input_frames_.size() == kMaxBufferedInputFrames) {
+ input_frames_.erase(input_frames_.begin());
+ }
+
+ if (config_.reference_width != -1 && config_.reference_height != -1 &&
+ (input_frame.width() != config_.reference_width ||
+ input_frame.height() != config_.reference_height)) {
+ rtc::scoped_refptr<I420Buffer> scaled_buffer = I420Buffer::Create(
+ config_.codec_settings.width, config_.codec_settings.height);
+ scaled_buffer->ScaleFrom(*input_frame.video_frame_buffer()->ToI420());
+
+ VideoFrame scaled_reference_frame = input_frame;
+ scaled_reference_frame.set_video_frame_buffer(scaled_buffer);
+ input_frames_.emplace(frame_number, scaled_reference_frame);
+
+ if (config_.reference_width == config_.codec_settings.width &&
+ config_.reference_height == config_.codec_settings.height) {
+ // Both encoding and comparison uses the same down-scale factor, reuse
+ // it for encoder below.
+ input_frame = scaled_reference_frame;
+ }
+ } else {
+ input_frames_.emplace(frame_number, input_frame);
+ }
+ }
+ last_inputed_timestamp_ = timestamp;
+
+ post_encode_time_ns_ = 0;
+
+ // Create frame statistics object for all simulcast/spatial layers.
+ for (size_t i = 0; i < num_simulcast_or_spatial_layers_; ++i) {
+ FrameStatistics frame_stat(frame_number, timestamp, i);
+ stats_->AddFrame(frame_stat);
+ }
+
+ // For the highest measurement accuracy of the encode time, the start/stop
+ // time recordings should wrap the Encode call as tightly as possible.
+ const int64_t encode_start_ns = rtc::TimeNanos();
+ for (size_t i = 0; i < num_simulcast_or_spatial_layers_; ++i) {
+ FrameStatistics* frame_stat = stats_->GetFrame(frame_number, i);
+ frame_stat->encode_start_ns = encode_start_ns;
+ }
+
+ if (input_frame.width() != config_.codec_settings.width ||
+ input_frame.height() != config_.codec_settings.height) {
+ rtc::scoped_refptr<I420Buffer> scaled_buffer = I420Buffer::Create(
+ config_.codec_settings.width, config_.codec_settings.height);
+ scaled_buffer->ScaleFrom(*input_frame.video_frame_buffer()->ToI420());
+ input_frame.set_video_frame_buffer(scaled_buffer);
+ }
+
+ // Encode.
+ const std::vector<VideoFrameType> frame_types =
+ (frame_number == 0)
+ ? std::vector<VideoFrameType>(num_simulcast_or_spatial_layers_,
+ VideoFrameType::kVideoFrameKey)
+ : std::vector<VideoFrameType>(num_simulcast_or_spatial_layers_,
+ VideoFrameType::kVideoFrameDelta);
+ const int encode_return_code = encoder_->Encode(input_frame, &frame_types);
+ for (size_t i = 0; i < num_simulcast_or_spatial_layers_; ++i) {
+ FrameStatistics* frame_stat = stats_->GetFrame(frame_number, i);
+ frame_stat->encode_return_code = encode_return_code;
+ }
+}
+
+void VideoProcessor::SetRates(size_t bitrate_kbps, double framerate_fps) {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+ RTC_DCHECK(!is_finalized_);
+
+ target_rates_[last_inputed_frame_num_] =
+ RateProfile({.target_kbps = bitrate_kbps, .input_fps = framerate_fps});
+
+ auto bitrate_allocation =
+ bitrate_allocator_->Allocate(VideoBitrateAllocationParameters(
+ static_cast<uint32_t>(bitrate_kbps * 1000), framerate_fps));
+ encoder_->SetRates(
+ VideoEncoder::RateControlParameters(bitrate_allocation, framerate_fps));
+}
+
+int32_t VideoProcessor::VideoProcessorDecodeCompleteCallback::Decoded(
+ VideoFrame& image) {
+ // Post the callback to the right task queue, if needed.
+ if (!task_queue_->IsCurrent()) {
+ // There might be a limited amount of output buffers, make a copy to make
+ // sure we don't block the decoder.
+ VideoFrame copy = VideoFrame::Builder()
+ .set_video_frame_buffer(I420Buffer::Copy(
+ *image.video_frame_buffer()->ToI420()))
+ .set_rotation(image.rotation())
+ .set_timestamp_us(image.timestamp_us())
+ .set_id(image.id())
+ .build();
+ copy.set_timestamp(image.timestamp());
+
+ task_queue_->PostTask([this, copy]() {
+ video_processor_->FrameDecoded(copy, simulcast_svc_idx_);
+ });
+ return 0;
+ }
+ video_processor_->FrameDecoded(image, simulcast_svc_idx_);
+ return 0;
+}
+
+void VideoProcessor::FrameEncoded(
+ const webrtc::EncodedImage& encoded_image,
+ const webrtc::CodecSpecificInfo& codec_specific) {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+
+ // For the highest measurement accuracy of the encode time, the start/stop
+ // time recordings should wrap the Encode call as tightly as possible.
+ const int64_t encode_stop_ns = rtc::TimeNanos();
+
+ const VideoCodecType codec_type = codec_specific.codecType;
+ if (config_.encoded_frame_checker) {
+ config_.encoded_frame_checker->CheckEncodedFrame(codec_type, encoded_image);
+ }
+
+ // Layer metadata.
+ size_t spatial_idx = encoded_image.SpatialIndex().value_or(0);
+ size_t temporal_idx = GetTemporalLayerIndex(codec_specific);
+
+ FrameStatistics* frame_stat =
+ stats_->GetFrameWithTimestamp(encoded_image.Timestamp(), spatial_idx);
+ const size_t frame_number = frame_stat->frame_number;
+
+ // Ensure that the encode order is monotonically increasing, within this
+ // simulcast/spatial layer.
+ RTC_CHECK(first_encoded_frame_[spatial_idx] ||
+ last_encoded_frame_num_[spatial_idx] < frame_number);
+
+ // Ensure SVC spatial layers are delivered in ascending order.
+ const size_t num_spatial_layers = config_.NumberOfSpatialLayers();
+ if (!first_encoded_frame_[spatial_idx] && num_spatial_layers > 1) {
+ for (size_t i = 0; i < spatial_idx; ++i) {
+ RTC_CHECK_LE(last_encoded_frame_num_[i], frame_number);
+ }
+ for (size_t i = spatial_idx + 1; i < num_simulcast_or_spatial_layers_;
+ ++i) {
+ RTC_CHECK_GT(frame_number, last_encoded_frame_num_[i]);
+ }
+ }
+ first_encoded_frame_[spatial_idx] = false;
+ last_encoded_frame_num_[spatial_idx] = frame_number;
+
+ RateProfile target_rate =
+ std::prev(target_rates_.upper_bound(frame_number))->second;
+ auto bitrate_allocation =
+ bitrate_allocator_->Allocate(VideoBitrateAllocationParameters(
+ static_cast<uint32_t>(target_rate.target_kbps * 1000),
+ target_rate.input_fps));
+
+ // Update frame statistics.
+ frame_stat->encoding_successful = true;
+ frame_stat->encode_time_us = GetElapsedTimeMicroseconds(
+ frame_stat->encode_start_ns, encode_stop_ns - post_encode_time_ns_);
+ frame_stat->target_bitrate_kbps =
+ bitrate_allocation.GetTemporalLayerSum(spatial_idx, temporal_idx) / 1000;
+ frame_stat->target_framerate_fps = target_rate.input_fps;
+ frame_stat->length_bytes = encoded_image.size();
+ frame_stat->frame_type = encoded_image._frameType;
+ frame_stat->temporal_idx = temporal_idx;
+ frame_stat->max_nalu_size_bytes = GetMaxNaluSizeBytes(encoded_image, config_);
+ frame_stat->qp = encoded_image.qp_;
+
+ if (codec_type == kVideoCodecVP9) {
+ const CodecSpecificInfoVP9& vp9_info = codec_specific.codecSpecific.VP9;
+ frame_stat->inter_layer_predicted = vp9_info.inter_layer_predicted;
+ frame_stat->non_ref_for_inter_layer_pred =
+ vp9_info.non_ref_for_inter_layer_pred;
+ } else {
+ frame_stat->inter_layer_predicted = false;
+ frame_stat->non_ref_for_inter_layer_pred = true;
+ }
+
+ const webrtc::EncodedImage* encoded_image_for_decode = &encoded_image;
+ if (config_.decode || !encoded_frame_writers_->empty()) {
+ if (num_spatial_layers > 1) {
+ encoded_image_for_decode = BuildAndStoreSuperframe(
+ encoded_image, codec_type, frame_number, spatial_idx,
+ frame_stat->inter_layer_predicted);
+ }
+ }
+
+ if (config_.decode) {
+ DecodeFrame(*encoded_image_for_decode, spatial_idx);
+
+ if (codec_specific.end_of_picture && num_spatial_layers > 1) {
+ // If inter-layer prediction is enabled and upper layer was dropped then
+ // base layer should be passed to upper layer decoder. Otherwise decoder
+ // won't be able to decode next superframe.
+ const EncodedImage* base_image = nullptr;
+ const FrameStatistics* base_stat = nullptr;
+ for (size_t i = 0; i < num_spatial_layers; ++i) {
+ const bool layer_dropped = (first_decoded_frame_[i] ||
+ last_decoded_frame_num_[i] < frame_number);
+
+ // Ensure current layer was decoded.
+ RTC_CHECK(layer_dropped == false || i != spatial_idx);
+
+ if (!layer_dropped) {
+ base_image = &merged_encoded_frames_[i];
+ base_stat =
+ stats_->GetFrameWithTimestamp(encoded_image.Timestamp(), i);
+ } else if (base_image && !base_stat->non_ref_for_inter_layer_pred) {
+ DecodeFrame(*base_image, i);
+ }
+ }
+ }
+ } else {
+ frame_stat->decode_return_code = WEBRTC_VIDEO_CODEC_NO_OUTPUT;
+ }
+
+ // Since frames in higher TLs typically depend on frames in lower TLs,
+ // write out frames in lower TLs to bitstream dumps of higher TLs.
+ for (size_t write_temporal_idx = temporal_idx;
+ write_temporal_idx < config_.NumberOfTemporalLayers();
+ ++write_temporal_idx) {
+ const VideoProcessor::LayerKey layer_key(spatial_idx, write_temporal_idx);
+ auto it = encoded_frame_writers_->find(layer_key);
+ if (it != encoded_frame_writers_->cend()) {
+ RTC_CHECK(it->second->WriteFrame(*encoded_image_for_decode,
+ config_.codec_settings.codecType));
+ }
+ }
+
+ if (!config_.encode_in_real_time) {
+ // To get pure encode time for next layers, measure time spent in encode
+ // callback and subtract it from encode time of next layers.
+ post_encode_time_ns_ += rtc::TimeNanos() - encode_stop_ns;
+ }
+}
+
+void VideoProcessor::CalcFrameQuality(const I420BufferInterface& decoded_frame,
+ FrameStatistics* frame_stat) {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+
+ const auto reference_frame = input_frames_.find(frame_stat->frame_number);
+ RTC_CHECK(reference_frame != input_frames_.cend())
+ << "The codecs are either buffering too much, dropping too much, or "
+ "being too slow relative to the input frame rate.";
+
+ // SSIM calculation is not optimized. Skip it in real-time mode.
+ const bool calc_ssim = !config_.encode_in_real_time;
+ CalculateFrameQuality(*reference_frame->second.video_frame_buffer()->ToI420(),
+ decoded_frame, frame_stat, calc_ssim);
+
+ frame_stat->quality_analysis_successful = true;
+}
+
+void VideoProcessor::WriteDecodedFrame(const I420BufferInterface& decoded_frame,
+ FrameWriter& frame_writer) {
+ int input_video_width = config_.codec_settings.width;
+ int input_video_height = config_.codec_settings.height;
+
+ rtc::scoped_refptr<I420Buffer> scaled_buffer;
+ const I420BufferInterface* scaled_frame;
+
+ if (decoded_frame.width() == input_video_width &&
+ decoded_frame.height() == input_video_height) {
+ scaled_frame = &decoded_frame;
+ } else {
+ EXPECT_DOUBLE_EQ(
+ static_cast<double>(input_video_width) / input_video_height,
+ static_cast<double>(decoded_frame.width()) / decoded_frame.height());
+
+ scaled_buffer = I420Buffer::Create(input_video_width, input_video_height);
+ scaled_buffer->ScaleFrom(decoded_frame);
+
+ scaled_frame = scaled_buffer.get();
+ }
+
+ // Ensure there is no padding.
+ RTC_CHECK_EQ(scaled_frame->StrideY(), input_video_width);
+ RTC_CHECK_EQ(scaled_frame->StrideU(), input_video_width / 2);
+ RTC_CHECK_EQ(scaled_frame->StrideV(), input_video_width / 2);
+
+ RTC_CHECK_EQ(3 * input_video_width * input_video_height / 2,
+ frame_writer.FrameLength());
+
+ RTC_CHECK(frame_writer.WriteFrame(scaled_frame->DataY()));
+}
+
+void VideoProcessor::FrameDecoded(const VideoFrame& decoded_frame,
+ size_t spatial_idx) {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+
+ // For the highest measurement accuracy of the decode time, the start/stop
+ // time recordings should wrap the Decode call as tightly as possible.
+ const int64_t decode_stop_ns = rtc::TimeNanos();
+
+ FrameStatistics* frame_stat =
+ stats_->GetFrameWithTimestamp(decoded_frame.timestamp(), spatial_idx);
+ const size_t frame_number = frame_stat->frame_number;
+
+ if (!first_decoded_frame_[spatial_idx]) {
+ for (size_t dropped_frame_number = last_decoded_frame_num_[spatial_idx] + 1;
+ dropped_frame_number < frame_number; ++dropped_frame_number) {
+ FrameStatistics* dropped_frame_stat =
+ stats_->GetFrame(dropped_frame_number, spatial_idx);
+
+ if (analyze_frame_quality_ && config_.analyze_quality_of_dropped_frames) {
+ // Calculate frame quality comparing input frame with last decoded one.
+ CalcFrameQuality(*last_decoded_frame_buffer_[spatial_idx],
+ dropped_frame_stat);
+ }
+
+ if (decoded_frame_writers_ != nullptr) {
+ // Fill drops with last decoded frame to make them look like freeze at
+ // playback and to keep decoded layers in sync.
+ WriteDecodedFrame(*last_decoded_frame_buffer_[spatial_idx],
+ *decoded_frame_writers_->at(spatial_idx));
+ }
+ }
+ }
+
+ // Ensure that the decode order is monotonically increasing, within this
+ // simulcast/spatial layer.
+ RTC_CHECK(first_decoded_frame_[spatial_idx] ||
+ last_decoded_frame_num_[spatial_idx] < frame_number);
+ first_decoded_frame_[spatial_idx] = false;
+ last_decoded_frame_num_[spatial_idx] = frame_number;
+
+ // Update frame statistics.
+ frame_stat->decoding_successful = true;
+ frame_stat->decode_time_us =
+ GetElapsedTimeMicroseconds(frame_stat->decode_start_ns, decode_stop_ns);
+ frame_stat->decoded_width = decoded_frame.width();
+ frame_stat->decoded_height = decoded_frame.height();
+
+ // Skip quality metrics calculation to not affect CPU usage.
+ if (analyze_frame_quality_ || decoded_frame_writers_) {
+ // Save last decoded frame to handle possible future drops.
+ rtc::scoped_refptr<I420BufferInterface> i420buffer =
+ decoded_frame.video_frame_buffer()->ToI420();
+
+ // Copy decoded frame to a buffer without padding/stride such that we can
+ // dump Y, U and V planes into a file in one shot.
+ last_decoded_frame_buffer_[spatial_idx] = I420Buffer::Copy(
+ i420buffer->width(), i420buffer->height(), i420buffer->DataY(),
+ i420buffer->StrideY(), i420buffer->DataU(), i420buffer->StrideU(),
+ i420buffer->DataV(), i420buffer->StrideV());
+ }
+
+ if (analyze_frame_quality_) {
+ CalcFrameQuality(*decoded_frame.video_frame_buffer()->ToI420(), frame_stat);
+ }
+
+ if (decoded_frame_writers_ != nullptr) {
+ WriteDecodedFrame(*last_decoded_frame_buffer_[spatial_idx],
+ *decoded_frame_writers_->at(spatial_idx));
+ }
+
+ // Erase all buffered input frames that we have moved past for all
+ // simulcast/spatial layers. Never buffer more than
+ // `kMaxBufferedInputFrames` frames, to protect against long runs of
+ // consecutive frame drops for a particular layer.
+ const auto min_last_decoded_frame_num = std::min_element(
+ last_decoded_frame_num_.cbegin(), last_decoded_frame_num_.cend());
+ const size_t min_buffered_frame_num =
+ std::max(0, static_cast<int>(frame_number) - kMaxBufferedInputFrames + 1);
+ RTC_CHECK(min_last_decoded_frame_num != last_decoded_frame_num_.cend());
+ const auto input_frames_erase_before = input_frames_.lower_bound(
+ std::max(*min_last_decoded_frame_num, min_buffered_frame_num));
+ input_frames_.erase(input_frames_.cbegin(), input_frames_erase_before);
+}
+
+void VideoProcessor::DecodeFrame(const EncodedImage& encoded_image,
+ size_t spatial_idx) {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+ FrameStatistics* frame_stat =
+ stats_->GetFrameWithTimestamp(encoded_image.Timestamp(), spatial_idx);
+
+ frame_stat->decode_start_ns = rtc::TimeNanos();
+ frame_stat->decode_return_code =
+ decoders_->at(spatial_idx)->Decode(encoded_image, false, 0);
+}
+
+const webrtc::EncodedImage* VideoProcessor::BuildAndStoreSuperframe(
+ const EncodedImage& encoded_image,
+ const VideoCodecType codec,
+ size_t frame_number,
+ size_t spatial_idx,
+ bool inter_layer_predicted) {
+ // Should only be called for SVC.
+ RTC_CHECK_GT(config_.NumberOfSpatialLayers(), 1);
+
+ EncodedImage base_image;
+ RTC_CHECK_EQ(base_image.size(), 0);
+
+ // Each SVC layer is decoded with dedicated decoder. Find the nearest
+ // non-dropped base frame and merge it and current frame into superframe.
+ if (inter_layer_predicted) {
+ for (int base_idx = static_cast<int>(spatial_idx) - 1; base_idx >= 0;
+ --base_idx) {
+ EncodedImage lower_layer = merged_encoded_frames_.at(base_idx);
+ if (lower_layer.Timestamp() == encoded_image.Timestamp()) {
+ base_image = lower_layer;
+ break;
+ }
+ }
+ }
+ const size_t payload_size_bytes = base_image.size() + encoded_image.size();
+
+ auto buffer = EncodedImageBuffer::Create(payload_size_bytes);
+ if (base_image.size()) {
+ RTC_CHECK(base_image.data());
+ memcpy(buffer->data(), base_image.data(), base_image.size());
+ }
+ memcpy(buffer->data() + base_image.size(), encoded_image.data(),
+ encoded_image.size());
+
+ EncodedImage copied_image = encoded_image;
+ copied_image.SetEncodedData(buffer);
+ if (base_image.size())
+ copied_image._frameType = base_image._frameType;
+
+ // Replace previous EncodedImage for this spatial layer.
+ merged_encoded_frames_.at(spatial_idx) = std::move(copied_image);
+
+ return &merged_encoded_frames_.at(spatial_idx);
+}
+
+void VideoProcessor::Finalize() {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+ RTC_DCHECK(!is_finalized_);
+ is_finalized_ = true;
+
+ if (!(analyze_frame_quality_ && config_.analyze_quality_of_dropped_frames) &&
+ decoded_frame_writers_ == nullptr) {
+ return;
+ }
+
+ for (size_t spatial_idx = 0; spatial_idx < num_simulcast_or_spatial_layers_;
+ ++spatial_idx) {
+ if (first_decoded_frame_[spatial_idx]) {
+ continue; // No decoded frames on this spatial layer.
+ }
+
+ for (size_t dropped_frame_number = last_decoded_frame_num_[spatial_idx] + 1;
+ dropped_frame_number < last_inputed_frame_num_;
+ ++dropped_frame_number) {
+ FrameStatistics* frame_stat =
+ stats_->GetFrame(dropped_frame_number, spatial_idx);
+
+ RTC_DCHECK(!frame_stat->decoding_successful);
+
+ if (analyze_frame_quality_ && config_.analyze_quality_of_dropped_frames) {
+ CalcFrameQuality(*last_decoded_frame_buffer_[spatial_idx], frame_stat);
+ }
+
+ if (decoded_frame_writers_ != nullptr) {
+ WriteDecodedFrame(*last_decoded_frame_buffer_[spatial_idx],
+ *decoded_frame_writers_->at(spatial_idx));
+ }
+ }
+ }
+}
+
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/test/videoprocessor.h b/third_party/libwebrtc/modules/video_coding/codecs/test/videoprocessor.h
new file mode 100644
index 0000000000..0a5fdf8622
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/test/videoprocessor.h
@@ -0,0 +1,263 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_CODECS_TEST_VIDEOPROCESSOR_H_
+#define MODULES_VIDEO_CODING_CODECS_TEST_VIDEOPROCESSOR_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <map>
+#include <memory>
+#include <utility>
+#include <vector>
+
+#include "absl/types/optional.h"
+#include "api/sequence_checker.h"
+#include "api/task_queue/task_queue_base.h"
+#include "api/test/videocodec_test_fixture.h"
+#include "api/video/encoded_image.h"
+#include "api/video/i420_buffer.h"
+#include "api/video/resolution.h"
+#include "api/video/video_bitrate_allocation.h"
+#include "api/video/video_bitrate_allocator.h"
+#include "api/video/video_frame.h"
+#include "api/video_codecs/video_decoder.h"
+#include "api/video_codecs/video_encoder.h"
+#include "modules/include/module_common_types.h"
+#include "modules/video_coding/codecs/test/videocodec_test_stats_impl.h"
+#include "modules/video_coding/include/video_codec_interface.h"
+#include "modules/video_coding/utility/ivf_file_writer.h"
+#include "rtc_base/buffer.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/system/no_unique_address.h"
+#include "rtc_base/thread_annotations.h"
+#include "test/testsupport/frame_reader.h"
+#include "test/testsupport/frame_writer.h"
+
+namespace webrtc {
+namespace test {
+
+// Handles encoding/decoding of video using the VideoEncoder/VideoDecoder
+// interfaces. This is done in a sequential manner in order to be able to
+// measure times properly.
+// The class processes a frame at the time for the configured input file.
+// It maintains state of where in the source input file the processing is at.
+class VideoProcessor {
+ public:
+ using VideoDecoderList = std::vector<std::unique_ptr<VideoDecoder>>;
+ using LayerKey = std::pair<int /* spatial_idx */, int /* temporal_idx */>;
+ using IvfFileWriterMap = std::map<LayerKey, std::unique_ptr<IvfFileWriter>>;
+ // TODO(brandtr): Consider changing FrameWriterList to be a FrameWriterMap,
+ // to be able to save different TLs separately.
+ using FrameWriterList = std::vector<std::unique_ptr<FrameWriter>>;
+ using FrameStatistics = VideoCodecTestStats::FrameStatistics;
+
+ VideoProcessor(webrtc::VideoEncoder* encoder,
+ VideoDecoderList* decoders,
+ FrameReader* input_frame_reader,
+ const VideoCodecTestFixture::Config& config,
+ VideoCodecTestStatsImpl* stats,
+ IvfFileWriterMap* encoded_frame_writers,
+ FrameWriterList* decoded_frame_writers);
+ ~VideoProcessor();
+
+ VideoProcessor(const VideoProcessor&) = delete;
+ VideoProcessor& operator=(const VideoProcessor&) = delete;
+
+ // Reads a frame and sends it to the encoder. When the encode callback
+ // is received, the encoded frame is buffered. After encoding is finished
+ // buffered frame is sent to decoder. Quality evaluation is done in
+ // the decode callback.
+ void ProcessFrame();
+
+ // Updates the encoder with target rates. Must be called at least once.
+ void SetRates(size_t bitrate_kbps, double framerate_fps);
+
+ // Signals processor to finalize frame processing and handle possible tail
+ // drops. If not called expelicitly, this will be called in dtor. It is
+ // unexpected to get ProcessFrame() or SetRates() calls after Finalize().
+ void Finalize();
+
+ private:
+ class VideoProcessorEncodeCompleteCallback
+ : public webrtc::EncodedImageCallback {
+ public:
+ explicit VideoProcessorEncodeCompleteCallback(
+ VideoProcessor* video_processor)
+ : video_processor_(video_processor),
+ task_queue_(TaskQueueBase::Current()) {
+ RTC_DCHECK(video_processor_);
+ RTC_DCHECK(task_queue_);
+ }
+
+ Result OnEncodedImage(
+ const webrtc::EncodedImage& encoded_image,
+ const webrtc::CodecSpecificInfo* codec_specific_info) override {
+ RTC_CHECK(codec_specific_info);
+
+ // Post the callback to the right task queue, if needed.
+ if (!task_queue_->IsCurrent()) {
+ VideoProcessor* video_processor = video_processor_;
+ task_queue_->PostTask([video_processor, encoded_image,
+ codec_specific_info = *codec_specific_info] {
+ video_processor->FrameEncoded(encoded_image, codec_specific_info);
+ });
+ return Result(Result::OK, 0);
+ }
+
+ video_processor_->FrameEncoded(encoded_image, *codec_specific_info);
+ return Result(Result::OK, 0);
+ }
+
+ private:
+ VideoProcessor* const video_processor_;
+ TaskQueueBase* const task_queue_;
+ };
+
+ class VideoProcessorDecodeCompleteCallback
+ : public webrtc::DecodedImageCallback {
+ public:
+ explicit VideoProcessorDecodeCompleteCallback(
+ VideoProcessor* video_processor,
+ size_t simulcast_svc_idx)
+ : video_processor_(video_processor),
+ simulcast_svc_idx_(simulcast_svc_idx),
+ task_queue_(TaskQueueBase::Current()) {
+ RTC_DCHECK(video_processor_);
+ RTC_DCHECK(task_queue_);
+ }
+
+ int32_t Decoded(webrtc::VideoFrame& image) override;
+
+ int32_t Decoded(webrtc::VideoFrame& image,
+ int64_t decode_time_ms) override {
+ return Decoded(image);
+ }
+
+ void Decoded(webrtc::VideoFrame& image,
+ absl::optional<int32_t> decode_time_ms,
+ absl::optional<uint8_t> qp) override {
+ Decoded(image);
+ }
+
+ private:
+ VideoProcessor* const video_processor_;
+ const size_t simulcast_svc_idx_;
+ TaskQueueBase* const task_queue_;
+ };
+
+ // Invoked by the callback adapter when a frame has completed encoding.
+ void FrameEncoded(const webrtc::EncodedImage& encoded_image,
+ const webrtc::CodecSpecificInfo& codec_specific);
+
+ // Invoked by the callback adapter when a frame has completed decoding.
+ void FrameDecoded(const webrtc::VideoFrame& image, size_t simulcast_svc_idx);
+
+ void DecodeFrame(const EncodedImage& encoded_image, size_t simulcast_svc_idx);
+
+ // In order to supply the SVC decoders with super frames containing all
+ // lower layer frames, we merge and store the layer frames in this method.
+ const webrtc::EncodedImage* BuildAndStoreSuperframe(
+ const EncodedImage& encoded_image,
+ VideoCodecType codec,
+ size_t frame_number,
+ size_t simulcast_svc_idx,
+ bool inter_layer_predicted) RTC_RUN_ON(sequence_checker_);
+
+ void CalcFrameQuality(const I420BufferInterface& decoded_frame,
+ FrameStatistics* frame_stat);
+
+ void WriteDecodedFrame(const I420BufferInterface& decoded_frame,
+ FrameWriter& frame_writer);
+
+ void HandleTailDrops();
+
+ // Test config.
+ const VideoCodecTestFixture::Config config_;
+ const size_t num_simulcast_or_spatial_layers_;
+ const bool analyze_frame_quality_;
+
+ // Frame statistics.
+ VideoCodecTestStatsImpl* const stats_;
+
+ // Codecs.
+ webrtc::VideoEncoder* const encoder_;
+ VideoDecoderList* const decoders_;
+ const std::unique_ptr<VideoBitrateAllocator> bitrate_allocator_;
+
+ // Target bitrate and framerate per frame.
+ std::map<size_t, RateProfile> target_rates_ RTC_GUARDED_BY(sequence_checker_);
+
+ // Adapters for the codec callbacks.
+ VideoProcessorEncodeCompleteCallback encode_callback_;
+ // Assign separate callback object to each decoder. This allows us to identify
+ // decoded layer in frame decode callback.
+ // simulcast_svc_idx -> decode callback.
+ std::vector<std::unique_ptr<VideoProcessorDecodeCompleteCallback>>
+ decode_callback_;
+
+ // Each call to ProcessFrame() will read one frame from `input_frame_reader_`.
+ FrameReader* const input_frame_reader_;
+
+ // Input frames are used as reference for frame quality evaluations.
+ // Async codecs might queue frames. To handle that we keep input frame
+ // and release it after corresponding coded frame is decoded and quality
+ // measurement is done.
+ // frame_number -> frame.
+ std::map<size_t, VideoFrame> input_frames_ RTC_GUARDED_BY(sequence_checker_);
+
+ // Encoder delivers coded frame layer-by-layer. We store coded frames and
+ // then, after all layers are encoded, decode them. Such separation of
+ // frame processing on superframe level simplifies encoding/decoding time
+ // measurement.
+ // simulcast_svc_idx -> merged SVC encoded frame.
+ std::vector<EncodedImage> merged_encoded_frames_
+ RTC_GUARDED_BY(sequence_checker_);
+
+ // These (optional) file writers are used to persistently store the encoded
+ // and decoded bitstreams. Each frame writer is enabled by being non-null.
+ IvfFileWriterMap* const encoded_frame_writers_;
+ FrameWriterList* const decoded_frame_writers_;
+
+ // Metadata for inputed/encoded/decoded frames. Used for frame identification,
+ // frame drop detection, etc. We assume that encoded/decoded frames are
+ // ordered within each simulcast/spatial layer, but we do not make any
+ // assumptions of frame ordering between layers.
+ size_t last_inputed_frame_num_ RTC_GUARDED_BY(sequence_checker_);
+ size_t last_inputed_timestamp_ RTC_GUARDED_BY(sequence_checker_);
+ // simulcast_svc_idx -> encode status.
+ std::vector<bool> first_encoded_frame_ RTC_GUARDED_BY(sequence_checker_);
+ // simulcast_svc_idx -> frame_number.
+ std::vector<size_t> last_encoded_frame_num_ RTC_GUARDED_BY(sequence_checker_);
+ // simulcast_svc_idx -> decode status.
+ std::vector<bool> first_decoded_frame_ RTC_GUARDED_BY(sequence_checker_);
+ // simulcast_svc_idx -> frame_number.
+ std::vector<size_t> last_decoded_frame_num_ RTC_GUARDED_BY(sequence_checker_);
+ // simulcast_svc_idx -> buffer.
+ std::vector<rtc::scoped_refptr<I420Buffer>> last_decoded_frame_buffer_
+ RTC_GUARDED_BY(sequence_checker_);
+
+ // Time spent in frame encode callback. It is accumulated for layers and
+ // reset when frame encode starts. When next layer is encoded post-encode time
+ // is substracted from measured encode time. Thus we get pure encode time.
+ int64_t post_encode_time_ns_ RTC_GUARDED_BY(sequence_checker_);
+
+ // Indicates whether Finalize() was called or not.
+ bool is_finalized_ RTC_GUARDED_BY(sequence_checker_);
+
+ // This class must be operated on a TaskQueue.
+ RTC_NO_UNIQUE_ADDRESS SequenceChecker sequence_checker_;
+};
+
+} // namespace test
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_CODECS_TEST_VIDEOPROCESSOR_H_
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/test/videoprocessor_unittest.cc b/third_party/libwebrtc/modules/video_coding/codecs/test/videoprocessor_unittest.cc
new file mode 100644
index 0000000000..f1774af5df
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/test/videoprocessor_unittest.cc
@@ -0,0 +1,197 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/codecs/test/videoprocessor.h"
+
+#include <memory>
+
+#include "api/scoped_refptr.h"
+#include "api/test/mock_video_decoder.h"
+#include "api/test/mock_video_encoder.h"
+#include "api/test/videocodec_test_fixture.h"
+#include "api/video/i420_buffer.h"
+#include "media/base/media_constants.h"
+#include "modules/video_coding/codecs/test/videocodec_test_stats_impl.h"
+#include "rtc_base/task_queue_for_test.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+#include "test/testsupport/mock/mock_frame_reader.h"
+
+using ::testing::_;
+using ::testing::AllOf;
+using ::testing::Field;
+using ::testing::Property;
+using ::testing::ResultOf;
+using ::testing::Return;
+
+namespace webrtc {
+namespace test {
+
+namespace {
+
+const int kWidth = 352;
+const int kHeight = 288;
+
+} // namespace
+
+class VideoProcessorTest : public ::testing::Test {
+ protected:
+ VideoProcessorTest() : q_("VP queue") {
+ config_.SetCodecSettings(cricket::kVp8CodecName, 1, 1, 1, false, false,
+ false, kWidth, kHeight);
+
+ decoder_mock_ = new MockVideoDecoder();
+ decoders_.push_back(std::unique_ptr<VideoDecoder>(decoder_mock_));
+
+ ExpectInit();
+ q_.SendTask(
+ [this] {
+ video_processor_ = std::make_unique<VideoProcessor>(
+ &encoder_mock_, &decoders_, &frame_reader_mock_, config_, &stats_,
+ &encoded_frame_writers_, /*decoded_frame_writers=*/nullptr);
+ });
+ }
+
+ ~VideoProcessorTest() {
+ q_.SendTask([this] { video_processor_.reset(); });
+ }
+
+ void ExpectInit() {
+ EXPECT_CALL(encoder_mock_, InitEncode(_, _));
+ EXPECT_CALL(encoder_mock_, RegisterEncodeCompleteCallback);
+ EXPECT_CALL(*decoder_mock_, Configure);
+ EXPECT_CALL(*decoder_mock_, RegisterDecodeCompleteCallback);
+ }
+
+ void ExpectRelease() {
+ EXPECT_CALL(encoder_mock_, Release()).Times(1);
+ EXPECT_CALL(encoder_mock_, RegisterEncodeCompleteCallback(_)).Times(1);
+ EXPECT_CALL(*decoder_mock_, Release()).Times(1);
+ EXPECT_CALL(*decoder_mock_, RegisterDecodeCompleteCallback(_)).Times(1);
+ }
+
+ TaskQueueForTest q_;
+
+ VideoCodecTestFixture::Config config_;
+
+ MockVideoEncoder encoder_mock_;
+ MockVideoDecoder* decoder_mock_;
+ std::vector<std::unique_ptr<VideoDecoder>> decoders_;
+ MockFrameReader frame_reader_mock_;
+ VideoCodecTestStatsImpl stats_;
+ VideoProcessor::IvfFileWriterMap encoded_frame_writers_;
+ std::unique_ptr<VideoProcessor> video_processor_;
+};
+
+TEST_F(VideoProcessorTest, InitRelease) {
+ ExpectRelease();
+}
+
+TEST_F(VideoProcessorTest, ProcessFrames_FixedFramerate) {
+ const int kBitrateKbps = 456;
+ const int kFramerateFps = 31;
+ EXPECT_CALL(
+ encoder_mock_,
+ SetRates(Field(&VideoEncoder::RateControlParameters::framerate_fps,
+ static_cast<double>(kFramerateFps))))
+ .Times(1);
+ q_.SendTask([=] { video_processor_->SetRates(kBitrateKbps, kFramerateFps); });
+
+ EXPECT_CALL(frame_reader_mock_, PullFrame(_, _, _))
+ .WillRepeatedly(Return(I420Buffer::Create(kWidth, kHeight)));
+ EXPECT_CALL(
+ encoder_mock_,
+ Encode(Property(&VideoFrame::timestamp, 1 * 90000 / kFramerateFps), _))
+ .Times(1);
+ q_.SendTask([this] { video_processor_->ProcessFrame(); });
+
+ EXPECT_CALL(
+ encoder_mock_,
+ Encode(Property(&VideoFrame::timestamp, 2 * 90000 / kFramerateFps), _))
+ .Times(1);
+ q_.SendTask([this] { video_processor_->ProcessFrame(); });
+
+ ExpectRelease();
+}
+
+TEST_F(VideoProcessorTest, ProcessFrames_VariableFramerate) {
+ const int kBitrateKbps = 456;
+ const int kStartFramerateFps = 27;
+ const int kStartTimestamp = 90000 / kStartFramerateFps;
+ EXPECT_CALL(
+ encoder_mock_,
+ SetRates(Field(&VideoEncoder::RateControlParameters::framerate_fps,
+ static_cast<double>(kStartFramerateFps))))
+ .Times(1);
+ q_.SendTask(
+ [=] { video_processor_->SetRates(kBitrateKbps, kStartFramerateFps); });
+
+ EXPECT_CALL(frame_reader_mock_, PullFrame(_, _, _))
+ .WillRepeatedly(Return(I420Buffer::Create(kWidth, kHeight)));
+ EXPECT_CALL(encoder_mock_,
+ Encode(Property(&VideoFrame::timestamp, kStartTimestamp), _))
+ .Times(1);
+ q_.SendTask([this] { video_processor_->ProcessFrame(); });
+
+ const int kNewFramerateFps = 13;
+ EXPECT_CALL(
+ encoder_mock_,
+ SetRates(Field(&VideoEncoder::RateControlParameters::framerate_fps,
+ static_cast<double>(kNewFramerateFps))))
+ .Times(1);
+ q_.SendTask(
+ [=] { video_processor_->SetRates(kBitrateKbps, kNewFramerateFps); });
+
+ EXPECT_CALL(encoder_mock_,
+ Encode(Property(&VideoFrame::timestamp,
+ kStartTimestamp + 90000 / kNewFramerateFps),
+ _))
+ .Times(1);
+ q_.SendTask([this] { video_processor_->ProcessFrame(); });
+
+ ExpectRelease();
+}
+
+TEST_F(VideoProcessorTest, SetRates) {
+ const uint32_t kBitrateKbps = 123;
+ const int kFramerateFps = 17;
+
+ EXPECT_CALL(
+ encoder_mock_,
+ SetRates(AllOf(ResultOf(
+ [](const VideoEncoder::RateControlParameters& params) {
+ return params.bitrate.get_sum_kbps();
+ },
+ kBitrateKbps),
+ Field(&VideoEncoder::RateControlParameters::framerate_fps,
+ static_cast<double>(kFramerateFps)))))
+ .Times(1);
+ q_.SendTask([=] { video_processor_->SetRates(kBitrateKbps, kFramerateFps); });
+
+ const uint32_t kNewBitrateKbps = 456;
+ const int kNewFramerateFps = 34;
+ EXPECT_CALL(
+ encoder_mock_,
+ SetRates(AllOf(ResultOf(
+ [](const VideoEncoder::RateControlParameters& params) {
+ return params.bitrate.get_sum_kbps();
+ },
+ kNewBitrateKbps),
+ Field(&VideoEncoder::RateControlParameters::framerate_fps,
+ static_cast<double>(kNewFramerateFps)))))
+ .Times(1);
+ q_.SendTask(
+ [=] { video_processor_->SetRates(kNewBitrateKbps, kNewFramerateFps); });
+
+ ExpectRelease();
+}
+
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/vp8/default_temporal_layers.cc b/third_party/libwebrtc/modules/video_coding/codecs/vp8/default_temporal_layers.cc
new file mode 100644
index 0000000000..94860da1b6
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/vp8/default_temporal_layers.cc
@@ -0,0 +1,884 @@
+/* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/codecs/vp8/default_temporal_layers.h"
+
+#include <stdlib.h>
+
+#include <algorithm>
+#include <array>
+#include <memory>
+#include <set>
+#include <utility>
+#include <vector>
+
+#include "modules/video_coding/include/video_codec_interface.h"
+#include "rtc_base/arraysize.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "system_wrappers/include/field_trial.h"
+
+namespace webrtc {
+DefaultTemporalLayers::PendingFrame::PendingFrame() = default;
+DefaultTemporalLayers::PendingFrame::PendingFrame(
+ uint32_t timestamp,
+ bool expired,
+ uint8_t updated_buffers_mask,
+ const DependencyInfo& dependency_info)
+ : timestamp(timestamp),
+ expired(expired),
+ updated_buffer_mask(updated_buffers_mask),
+ dependency_info(dependency_info) {}
+
+namespace {
+using BufferFlags = Vp8FrameConfig::BufferFlags;
+using FreezeEntropy = Vp8FrameConfig::FreezeEntropy;
+using Vp8BufferReference = Vp8FrameConfig::Vp8BufferReference;
+
+constexpr BufferFlags kNone = BufferFlags::kNone;
+constexpr BufferFlags kReference = BufferFlags::kReference;
+constexpr BufferFlags kUpdate = BufferFlags::kUpdate;
+constexpr BufferFlags kReferenceAndUpdate = BufferFlags::kReferenceAndUpdate;
+constexpr FreezeEntropy kFreezeEntropy = FreezeEntropy::kFreezeEntropy;
+
+static constexpr uint8_t kUninitializedPatternIndex =
+ std::numeric_limits<uint8_t>::max();
+static constexpr std::array<Vp8BufferReference, 3> kAllBuffers = {
+ {Vp8BufferReference::kLast, Vp8BufferReference::kGolden,
+ Vp8BufferReference::kAltref}};
+
+std::vector<unsigned int> GetTemporalIds(size_t num_layers) {
+ switch (num_layers) {
+ case 1:
+ // Temporal layer structure (single layer):
+ // 0 0 0 0 ...
+ return {0};
+ case 2:
+ // Temporal layer structure:
+ // 1 1 ...
+ // 0 0 ...
+ return {0, 1};
+ case 3:
+ // Temporal layer structure:
+ // 2 2 2 2 ...
+ // 1 1 ...
+ // 0 0 ...
+ return {0, 2, 1, 2};
+ case 4:
+ // Temporal layer structure:
+ // 3 3 3 3 3 3 3 3 ...
+ // 2 2 2 2 ...
+ // 1 1 ...
+ // 0 0 ...
+ return {0, 3, 2, 3, 1, 3, 2, 3};
+ default:
+ RTC_DCHECK_NOTREACHED();
+ break;
+ }
+ RTC_DCHECK_NOTREACHED();
+ return {0};
+}
+
+uint8_t GetUpdatedBuffers(const Vp8FrameConfig& config) {
+ uint8_t flags = 0;
+ if (config.last_buffer_flags & BufferFlags::kUpdate) {
+ flags |= static_cast<uint8_t>(Vp8BufferReference::kLast);
+ }
+ if (config.golden_buffer_flags & BufferFlags::kUpdate) {
+ flags |= static_cast<uint8_t>(Vp8BufferReference::kGolden);
+ }
+ if (config.arf_buffer_flags & BufferFlags::kUpdate) {
+ flags |= static_cast<uint8_t>(Vp8BufferReference::kAltref);
+ }
+ return flags;
+}
+
+size_t BufferToIndex(Vp8BufferReference buffer) {
+ switch (buffer) {
+ case Vp8FrameConfig::Vp8BufferReference::kLast:
+ return 0;
+ case Vp8FrameConfig::Vp8BufferReference::kGolden:
+ return 1;
+ case Vp8FrameConfig::Vp8BufferReference::kAltref:
+ return 2;
+ case Vp8FrameConfig::Vp8BufferReference::kNone:
+ RTC_CHECK_NOTREACHED();
+ }
+}
+
+} // namespace
+
+constexpr size_t DefaultTemporalLayers::kNumReferenceBuffers;
+
+std::vector<DefaultTemporalLayers::DependencyInfo>
+DefaultTemporalLayers::GetDependencyInfo(size_t num_layers) {
+ // For indexing in the patterns described below (which temporal layers they
+ // belong to), see the diagram above.
+ // Layer sync is done similarly for all patterns (except single stream) and
+ // happens every 8 frames:
+ // TL1 layer syncs by periodically by only referencing TL0 ('last'), but still
+ // updating 'golden', so it can be used as a reference by future TL1 frames.
+ // TL2 layer syncs just before TL1 by only depending on TL0 (and not depending
+ // on TL1's buffer before TL1 has layer synced).
+ // TODO(pbos): Consider cyclically updating 'arf' (and 'golden' for 1TL) for
+ // the base layer in 1-3TL instead of 'last' periodically on long intervals,
+ // so that if scene changes occur (user walks between rooms or rotates webcam)
+ // the 'arf' (or 'golden' respectively) is not stuck on a no-longer relevant
+ // keyframe.
+
+ switch (num_layers) {
+ case 1:
+ // Always reference and update the same buffer.
+ return {{"S", {kReferenceAndUpdate, kNone, kNone}}};
+ case 2:
+ // All layers can reference but not update the 'alt' buffer, this means
+ // that the 'alt' buffer reference is effectively the last keyframe.
+ // TL0 also references and updates the 'last' buffer.
+ // TL1 also references 'last' and references and updates 'golden'.
+ if (!field_trial::IsDisabled("WebRTC-UseShortVP8TL2Pattern")) {
+ // Shortened 4-frame pattern:
+ // 1---1 1---1 ...
+ // / / / /
+ // 0---0---0---0 ...
+ return {{"SS", {kReferenceAndUpdate, kNone, kNone}},
+ {"-S", {kReference, kUpdate, kNone}},
+ {"SR", {kReferenceAndUpdate, kNone, kNone}},
+ {"-D", {kReference, kReference, kNone, kFreezeEntropy}}};
+ } else {
+ // "Default" 8-frame pattern:
+ // 1---1---1---1 1---1---1---1 ...
+ // / / / / / / / /
+ // 0---0---0---0---0---0---0---0 ...
+ return {{"SS", {kReferenceAndUpdate, kNone, kNone}},
+ {"-S", {kReference, kUpdate, kNone}},
+ {"SR", {kReferenceAndUpdate, kNone, kNone}},
+ {"-R", {kReference, kReferenceAndUpdate, kNone}},
+ {"SR", {kReferenceAndUpdate, kNone, kNone}},
+ {"-R", {kReference, kReferenceAndUpdate, kNone}},
+ {"SR", {kReferenceAndUpdate, kNone, kNone}},
+ {"-D", {kReference, kReference, kNone, kFreezeEntropy}}};
+ }
+ case 3:
+ if (field_trial::IsEnabled("WebRTC-UseShortVP8TL3Pattern")) {
+ // This field trial is intended to check if it is worth using a shorter
+ // temporal pattern, trading some coding efficiency for less risk of
+ // dropped frames.
+ // The coding efficiency will decrease somewhat since the higher layer
+ // state is more volatile, but it will be offset slightly by updating
+ // the altref buffer with TL2 frames, instead of just referencing lower
+ // layers.
+ // If a frame is dropped in a higher layer, the jitter
+ // buffer on the receive side won't be able to decode any higher layer
+ // frame until the next sync frame. So we expect a noticeable decrease
+ // in frame drops on links with high packet loss.
+
+ // TL0 references and updates the 'last' buffer.
+ // TL1 references 'last' and references and updates 'golden'.
+ // TL2 references both 'last' & 'golden' and references and updates
+ // 'arf'.
+ // 2-------2 2-------2 2
+ // / __/ / __/ /
+ // / __1 / __1 /
+ // /___/ /___/ /
+ // 0---------------0---------------0-----
+ // 0 1 2 3 4 5 6 7 8 9 ...
+ return {{"SSS", {kReferenceAndUpdate, kNone, kNone}},
+ {"--S", {kReference, kNone, kUpdate}},
+ {"-DR", {kReference, kUpdate, kNone}},
+ {"--D", {kReference, kReference, kReference, kFreezeEntropy}}};
+ } else {
+ // All layers can reference but not update the 'alt' buffer, this means
+ // that the 'alt' buffer reference is effectively the last keyframe.
+ // TL0 also references and updates the 'last' buffer.
+ // TL1 also references 'last' and references and updates 'golden'.
+ // TL2 references both 'last' and 'golden' but updates no buffer.
+ // 2 __2 _____2 __2 2
+ // / /____/ / / /
+ // / 1---------/-----1 /
+ // /_____/ /_____/ /
+ // 0---------------0---------------0-----
+ // 0 1 2 3 4 5 6 7 8 9 ...
+ return {{"SSS", {kReferenceAndUpdate, kNone, kNone}},
+ {"--D", {kReference, kNone, kNone, kFreezeEntropy}},
+ {"-SS", {kReference, kUpdate, kNone}},
+ {"--D", {kReference, kReference, kNone, kFreezeEntropy}},
+ {"SRR", {kReferenceAndUpdate, kNone, kNone}},
+ {"--D", {kReference, kReference, kNone, kFreezeEntropy}},
+ {"-DS", {kReference, kReferenceAndUpdate, kNone}},
+ {"--D", {kReference, kReference, kNone, kFreezeEntropy}}};
+ }
+ case 4:
+ // TL0 references and updates only the 'last' buffer.
+ // TL1 references 'last' and updates and references 'golden'.
+ // TL2 references 'last' and 'golden', and references and updates 'arf'.
+ // TL3 references all buffers but update none of them.
+ // TODO(philipel): Set decode target information for this structure.
+ return {{"----", {kReferenceAndUpdate, kNone, kNone}},
+ {"----", {kReference, kNone, kNone, kFreezeEntropy}},
+ {"----", {kReference, kNone, kUpdate}},
+ {"----", {kReference, kNone, kReference, kFreezeEntropy}},
+ {"----", {kReference, kUpdate, kNone}},
+ {"----", {kReference, kReference, kReference, kFreezeEntropy}},
+ {"----", {kReference, kReference, kReferenceAndUpdate}},
+ {"----", {kReference, kReference, kReference, kFreezeEntropy}},
+ {"----", {kReferenceAndUpdate, kNone, kNone}},
+ {"----", {kReference, kReference, kReference, kFreezeEntropy}},
+ {"----", {kReference, kReference, kReferenceAndUpdate}},
+ {"----", {kReference, kReference, kReference, kFreezeEntropy}},
+ {"----", {kReference, kReferenceAndUpdate, kNone}},
+ {"----", {kReference, kReference, kReference, kFreezeEntropy}},
+ {"----", {kReference, kReference, kReferenceAndUpdate}},
+ {"----", {kReference, kReference, kReference, kFreezeEntropy}}};
+ default:
+ RTC_DCHECK_NOTREACHED();
+ break;
+ }
+ RTC_DCHECK_NOTREACHED();
+ return {{"", {kNone, kNone, kNone}}};
+}
+
+std::bitset<DefaultTemporalLayers::kNumReferenceBuffers>
+DefaultTemporalLayers::DetermineStaticBuffers(
+ const std::vector<DependencyInfo>& temporal_pattern) {
+ std::bitset<kNumReferenceBuffers> buffers;
+ buffers.set();
+ for (const DependencyInfo& info : temporal_pattern) {
+ uint8_t updated_buffers = GetUpdatedBuffers(info.frame_config);
+
+ for (Vp8BufferReference buffer : kAllBuffers) {
+ if (static_cast<uint8_t>(buffer) & updated_buffers) {
+ buffers.reset(BufferToIndex(buffer));
+ }
+ }
+ }
+ return buffers;
+}
+
+DefaultTemporalLayers::DefaultTemporalLayers(int number_of_temporal_layers)
+ : num_layers_(std::max(1, number_of_temporal_layers)),
+ temporal_ids_(GetTemporalIds(num_layers_)),
+ temporal_pattern_(GetDependencyInfo(num_layers_)),
+ is_static_buffer_(DetermineStaticBuffers(temporal_pattern_)),
+ pattern_idx_(kUninitializedPatternIndex),
+ new_bitrates_bps_(std::vector<uint32_t>(num_layers_, 0u)) {
+ RTC_CHECK_GE(kMaxTemporalStreams, number_of_temporal_layers);
+ RTC_CHECK_GE(number_of_temporal_layers, 0);
+ RTC_CHECK_LE(number_of_temporal_layers, 4);
+ // pattern_idx_ wraps around temporal_pattern_.size, this is incorrect if
+ // temporal_ids_ are ever longer. If this is no longer correct it needs to
+ // wrap at max(temporal_ids_.size(), temporal_pattern_.size()).
+ RTC_DCHECK_LE(temporal_ids_.size(), temporal_pattern_.size());
+
+ RTC_DCHECK(
+ checker_ = TemporalLayersChecker::CreateTemporalLayersChecker(
+ Vp8TemporalLayersType::kFixedPattern, number_of_temporal_layers));
+
+ // Always need to start with a keyframe, so pre-populate all frame counters.
+ frames_since_buffer_refresh_.fill(0);
+}
+
+DefaultTemporalLayers::~DefaultTemporalLayers() = default;
+
+void DefaultTemporalLayers::SetQpLimits(size_t stream_index,
+ int min_qp,
+ int max_qp) {
+ RTC_DCHECK_LT(stream_index, StreamCount());
+ // Ignore.
+}
+
+size_t DefaultTemporalLayers::StreamCount() const {
+ return 1;
+}
+
+bool DefaultTemporalLayers::SupportsEncoderFrameDropping(
+ size_t stream_index) const {
+ RTC_DCHECK_LT(stream_index, StreamCount());
+ // This class allows the encoder drop frames as it sees fit.
+ return true;
+}
+
+void DefaultTemporalLayers::OnRatesUpdated(
+ size_t stream_index,
+ const std::vector<uint32_t>& bitrates_bps,
+ int framerate_fps) {
+ RTC_DCHECK_LT(stream_index, StreamCount());
+ RTC_DCHECK_GT(bitrates_bps.size(), 0);
+ RTC_DCHECK_LE(bitrates_bps.size(), num_layers_);
+ // `bitrates_bps` uses individual rate per layer, but Vp8EncoderConfig wants
+ // the accumulated rate, so sum them up.
+ new_bitrates_bps_ = bitrates_bps;
+ new_bitrates_bps_->resize(num_layers_);
+ for (size_t i = 1; i < num_layers_; ++i) {
+ (*new_bitrates_bps_)[i] += (*new_bitrates_bps_)[i - 1];
+ }
+}
+
+Vp8EncoderConfig DefaultTemporalLayers::UpdateConfiguration(
+ size_t stream_index) {
+ RTC_DCHECK_LT(stream_index, StreamCount());
+
+ Vp8EncoderConfig config;
+
+ if (!new_bitrates_bps_) {
+ return config;
+ }
+
+ config.temporal_layer_config.emplace();
+ Vp8EncoderConfig::TemporalLayerConfig& ts_config =
+ config.temporal_layer_config.value();
+
+ for (size_t i = 0; i < num_layers_; ++i) {
+ ts_config.ts_target_bitrate[i] = (*new_bitrates_bps_)[i] / 1000;
+ // ..., 4, 2, 1
+ ts_config.ts_rate_decimator[i] = 1 << (num_layers_ - i - 1);
+ }
+
+ ts_config.ts_number_layers = num_layers_;
+ ts_config.ts_periodicity = temporal_ids_.size();
+ std::copy(temporal_ids_.begin(), temporal_ids_.end(),
+ ts_config.ts_layer_id.begin());
+
+ new_bitrates_bps_.reset();
+
+ return config;
+}
+
+bool DefaultTemporalLayers::IsSyncFrame(const Vp8FrameConfig& config) const {
+ // Since we always assign TL0 to 'last' in these patterns, we can infer layer
+ // sync by checking if temporal id > 0 and we only reference TL0 or buffers
+ // containing the last key-frame.
+ if (config.packetizer_temporal_idx == 0) {
+ // TL0 frames are per definition not sync frames.
+ return false;
+ }
+
+ if ((config.last_buffer_flags & BufferFlags::kReference) == 0) {
+ // Sync frames must reference TL0.
+ return false;
+ }
+
+ if ((config.golden_buffer_flags & BufferFlags::kReference) &&
+ !is_static_buffer_[BufferToIndex(Vp8BufferReference::kGolden)]) {
+ // Referencing a golden frame that contains a non-(base layer|key frame).
+ return false;
+ }
+ if ((config.arf_buffer_flags & BufferFlags::kReference) &&
+ !is_static_buffer_[BufferToIndex(Vp8BufferReference::kAltref)]) {
+ // Referencing an altref frame that contains a non-(base layer|key frame).
+ return false;
+ }
+
+ return true;
+}
+
+Vp8FrameConfig DefaultTemporalLayers::NextFrameConfig(size_t stream_index,
+ uint32_t timestamp) {
+ RTC_DCHECK_LT(stream_index, StreamCount());
+ RTC_DCHECK_GT(num_layers_, 0);
+ RTC_DCHECK_GT(temporal_pattern_.size(), 0);
+
+ RTC_DCHECK_GT(kUninitializedPatternIndex, temporal_pattern_.size());
+ const bool first_frame = (pattern_idx_ == kUninitializedPatternIndex);
+
+ pattern_idx_ = (pattern_idx_ + 1) % temporal_pattern_.size();
+ DependencyInfo dependency_info = temporal_pattern_[pattern_idx_];
+ Vp8FrameConfig& tl_config = dependency_info.frame_config;
+ tl_config.encoder_layer_id = tl_config.packetizer_temporal_idx =
+ temporal_ids_[pattern_idx_ % temporal_ids_.size()];
+
+ if (pattern_idx_ == 0) {
+ // Start of new pattern iteration, set up clear state by invalidating any
+ // pending frames, so that we don't make an invalid reference to a buffer
+ // containing data from a previous iteration.
+ for (auto& frame : pending_frames_) {
+ frame.expired = true;
+ }
+ }
+
+ if (first_frame) {
+ tl_config = Vp8FrameConfig::GetIntraFrameConfig();
+ } else {
+ // Last is always ok to reference as it contains the base layer. For other
+ // buffers though, we need to check if the buffer has actually been
+ // refreshed this cycle of the temporal pattern. If the encoder dropped
+ // a frame, it might not have.
+ ValidateReferences(&tl_config.golden_buffer_flags,
+ Vp8BufferReference::kGolden);
+ ValidateReferences(&tl_config.arf_buffer_flags,
+ Vp8BufferReference::kAltref);
+ // Update search order to let the encoder know which buffers contains the
+ // most recent data.
+ UpdateSearchOrder(&tl_config);
+ // Figure out if this a sync frame (non-base-layer frame with only
+ // base-layer references).
+ tl_config.layer_sync = IsSyncFrame(tl_config);
+
+ // Increment frame age, this needs to be in sync with `pattern_idx_`,
+ // so must update it here. Resetting age to 0 must be done when encoding is
+ // complete though, and so in the case of pipelining encoder it might lag.
+ // To prevent this data spill over into the next iteration,
+ // the `pedning_frames_` map is reset in loops. If delay is constant,
+ // the relative age should still be OK for the search order.
+ for (size_t& n : frames_since_buffer_refresh_) {
+ ++n;
+ }
+ }
+
+ // Add frame to set of pending frames, awaiting completion.
+ pending_frames_.emplace_back(timestamp, false, GetUpdatedBuffers(tl_config),
+ dependency_info);
+
+ // Checker does not yet support encoder frame dropping, so validate flags
+ // here before they can be dropped.
+ // TODO(sprang): Update checker to support dropping.
+ RTC_DCHECK(checker_->CheckTemporalConfig(first_frame, tl_config));
+
+ return tl_config;
+}
+
+void DefaultTemporalLayers::ValidateReferences(BufferFlags* flags,
+ Vp8BufferReference ref) const {
+ // Check if the buffer specified by `ref` is actually referenced, and if so
+ // if it also a dynamically updating one (buffers always just containing
+ // keyframes are always safe to reference).
+ if ((*flags & BufferFlags::kReference) &&
+ !is_static_buffer_[BufferToIndex(ref)]) {
+ if (NumFramesSinceBufferRefresh(ref) >= pattern_idx_) {
+ // No valid buffer state, or buffer contains frame that is older than the
+ // current pattern. This reference is not valid, so remove it.
+ *flags = static_cast<BufferFlags>(*flags & ~BufferFlags::kReference);
+ }
+ }
+}
+
+void DefaultTemporalLayers::UpdateSearchOrder(Vp8FrameConfig* config) {
+ // Figure out which of the buffers we can reference, and order them so that
+ // the most recently refreshed is first. Otherwise prioritize last first,
+ // golden second, and altref third.
+ using BufferRefAge = std::pair<Vp8BufferReference, size_t>;
+ std::vector<BufferRefAge> eligible_buffers;
+ if (config->last_buffer_flags & BufferFlags::kReference) {
+ eligible_buffers.emplace_back(
+ Vp8BufferReference::kLast,
+ NumFramesSinceBufferRefresh(Vp8BufferReference::kLast));
+ }
+ if (config->golden_buffer_flags & BufferFlags::kReference) {
+ eligible_buffers.emplace_back(
+ Vp8BufferReference::kGolden,
+ NumFramesSinceBufferRefresh(Vp8BufferReference::kGolden));
+ }
+ if (config->arf_buffer_flags & BufferFlags::kReference) {
+ eligible_buffers.emplace_back(
+ Vp8BufferReference::kAltref,
+ NumFramesSinceBufferRefresh(Vp8BufferReference::kAltref));
+ }
+
+ std::sort(eligible_buffers.begin(), eligible_buffers.end(),
+ [](const BufferRefAge& lhs, const BufferRefAge& rhs) {
+ if (lhs.second != rhs.second) {
+ // Lower count has highest precedence.
+ return lhs.second < rhs.second;
+ }
+ return lhs.first < rhs.first;
+ });
+
+ // Populate the search order fields where possible.
+ if (!eligible_buffers.empty()) {
+ config->first_reference = eligible_buffers.front().first;
+ if (eligible_buffers.size() > 1)
+ config->second_reference = eligible_buffers[1].first;
+ }
+}
+
+size_t DefaultTemporalLayers::NumFramesSinceBufferRefresh(
+ Vp8FrameConfig::Vp8BufferReference ref) const {
+ return frames_since_buffer_refresh_[BufferToIndex(ref)];
+}
+
+void DefaultTemporalLayers::ResetNumFramesSinceBufferRefresh(
+ Vp8FrameConfig::Vp8BufferReference ref) {
+ frames_since_buffer_refresh_[BufferToIndex(ref)] = 0;
+}
+
+void DefaultTemporalLayers::CullPendingFramesBefore(uint32_t timestamp) {
+ while (!pending_frames_.empty() &&
+ pending_frames_.front().timestamp != timestamp) {
+ pending_frames_.pop_front();
+ }
+}
+
+void DefaultTemporalLayers::OnEncodeDone(size_t stream_index,
+ uint32_t rtp_timestamp,
+ size_t size_bytes,
+ bool is_keyframe,
+ int qp,
+ CodecSpecificInfo* info) {
+ RTC_DCHECK_LT(stream_index, StreamCount());
+ RTC_DCHECK_GT(num_layers_, 0);
+
+ if (size_bytes == 0) {
+ RTC_LOG(LS_WARNING) << "Empty frame; treating as dropped.";
+ OnFrameDropped(stream_index, rtp_timestamp);
+ return;
+ }
+
+ CullPendingFramesBefore(rtp_timestamp);
+ RTC_CHECK(!pending_frames_.empty());
+ PendingFrame& frame = pending_frames_.front();
+ RTC_DCHECK_EQ(frame.timestamp, rtp_timestamp);
+ const Vp8FrameConfig& frame_config = frame.dependency_info.frame_config;
+ if (is_keyframe) {
+ // Signal key-frame so checker resets state.
+ RTC_DCHECK(checker_->CheckTemporalConfig(true, frame_config));
+ }
+
+ CodecSpecificInfoVP8& vp8_info = info->codecSpecific.VP8;
+ if (num_layers_ == 1) {
+ vp8_info.temporalIdx = kNoTemporalIdx;
+ vp8_info.layerSync = false;
+ } else {
+ if (is_keyframe) {
+ // Restart the temporal pattern on keyframes.
+ pattern_idx_ = 0;
+ vp8_info.temporalIdx = 0;
+ vp8_info.layerSync = true; // Keyframes are always sync frames.
+
+ for (Vp8BufferReference buffer : kAllBuffers) {
+ if (is_static_buffer_[BufferToIndex(buffer)]) {
+ // Update frame count of all kf-only buffers, regardless of state of
+ // `pending_frames_`.
+ ResetNumFramesSinceBufferRefresh(buffer);
+ } else {
+ // Key-frames update all buffers, this should be reflected when
+ // updating state in FrameEncoded().
+ frame.updated_buffer_mask |= static_cast<uint8_t>(buffer);
+ }
+ }
+ } else {
+ // Delta frame, update codec specifics with temporal id and sync flag.
+ vp8_info.temporalIdx = frame_config.packetizer_temporal_idx;
+ vp8_info.layerSync = frame_config.layer_sync;
+ }
+ }
+
+ vp8_info.useExplicitDependencies = true;
+ RTC_DCHECK_EQ(vp8_info.referencedBuffersCount, 0u);
+ RTC_DCHECK_EQ(vp8_info.updatedBuffersCount, 0u);
+
+ GenericFrameInfo& generic_frame_info = info->generic_frame_info.emplace();
+
+ for (int i = 0; i < static_cast<int>(Vp8FrameConfig::Buffer::kCount); ++i) {
+ bool references = false;
+ bool updates = is_keyframe;
+
+ if (!is_keyframe &&
+ frame_config.References(static_cast<Vp8FrameConfig::Buffer>(i))) {
+ RTC_DCHECK_LT(vp8_info.referencedBuffersCount,
+ arraysize(CodecSpecificInfoVP8::referencedBuffers));
+ references = true;
+ vp8_info.referencedBuffers[vp8_info.referencedBuffersCount++] = i;
+ }
+
+ if (is_keyframe ||
+ frame_config.Updates(static_cast<Vp8FrameConfig::Buffer>(i))) {
+ RTC_DCHECK_LT(vp8_info.updatedBuffersCount,
+ arraysize(CodecSpecificInfoVP8::updatedBuffers));
+ updates = true;
+ vp8_info.updatedBuffers[vp8_info.updatedBuffersCount++] = i;
+ }
+
+ if (references || updates) {
+ generic_frame_info.encoder_buffers.emplace_back(i, references, updates);
+ }
+ }
+
+ // The templates are always present on keyframes, and then refered to by
+ // subsequent frames.
+ if (is_keyframe) {
+ info->template_structure = GetTemplateStructure(num_layers_);
+ generic_frame_info.decode_target_indications =
+ temporal_pattern_.front().decode_target_indications;
+ generic_frame_info.temporal_id = 0;
+ } else {
+ generic_frame_info.decode_target_indications =
+ frame.dependency_info.decode_target_indications;
+ generic_frame_info.temporal_id = frame_config.packetizer_temporal_idx;
+ }
+
+ if (!frame.expired) {
+ for (Vp8BufferReference buffer : kAllBuffers) {
+ if (frame.updated_buffer_mask & static_cast<uint8_t>(buffer)) {
+ ResetNumFramesSinceBufferRefresh(buffer);
+ }
+ }
+ }
+
+ pending_frames_.pop_front();
+}
+
+void DefaultTemporalLayers::OnFrameDropped(size_t stream_index,
+ uint32_t rtp_timestamp) {
+ CullPendingFramesBefore(rtp_timestamp);
+ RTC_CHECK(!pending_frames_.empty());
+ RTC_DCHECK_EQ(pending_frames_.front().timestamp, rtp_timestamp);
+ pending_frames_.pop_front();
+}
+
+void DefaultTemporalLayers::OnPacketLossRateUpdate(float packet_loss_rate) {}
+
+void DefaultTemporalLayers::OnRttUpdate(int64_t rtt_ms) {}
+
+void DefaultTemporalLayers::OnLossNotification(
+ const VideoEncoder::LossNotification& loss_notification) {}
+
+FrameDependencyStructure DefaultTemporalLayers::GetTemplateStructure(
+ int num_layers) const {
+ RTC_CHECK_LT(num_layers, 5);
+ RTC_CHECK_GT(num_layers, 0);
+
+ FrameDependencyStructure template_structure;
+ template_structure.num_decode_targets = num_layers;
+
+ switch (num_layers) {
+ case 1: {
+ template_structure.templates.resize(2);
+ template_structure.templates[0].T(0).Dtis("S");
+ template_structure.templates[1].T(0).Dtis("S").FrameDiffs({1});
+ return template_structure;
+ }
+ case 2: {
+ template_structure.templates.resize(5);
+ template_structure.templates[0].T(0).Dtis("SS");
+ template_structure.templates[1].T(0).Dtis("SS").FrameDiffs({2});
+ template_structure.templates[2].T(0).Dtis("SR").FrameDiffs({2});
+ template_structure.templates[3].T(1).Dtis("-S").FrameDiffs({1});
+ template_structure.templates[4].T(1).Dtis("-D").FrameDiffs({2, 1});
+ return template_structure;
+ }
+ case 3: {
+ if (field_trial::IsEnabled("WebRTC-UseShortVP8TL3Pattern")) {
+ template_structure.templates.resize(5);
+ template_structure.templates[0].T(0).Dtis("SSS");
+ template_structure.templates[1].T(0).Dtis("SSS").FrameDiffs({4});
+ template_structure.templates[2].T(1).Dtis("-DR").FrameDiffs({2});
+ template_structure.templates[3].T(2).Dtis("--S").FrameDiffs({1});
+ template_structure.templates[4].T(2).Dtis("--D").FrameDiffs({2, 1});
+ } else {
+ template_structure.templates.resize(7);
+ template_structure.templates[0].T(0).Dtis("SSS");
+ template_structure.templates[1].T(0).Dtis("SSS").FrameDiffs({4});
+ template_structure.templates[2].T(0).Dtis("SRR").FrameDiffs({4});
+ template_structure.templates[3].T(1).Dtis("-SS").FrameDiffs({2});
+ template_structure.templates[4].T(1).Dtis("-DS").FrameDiffs({4, 2});
+ template_structure.templates[5].T(2).Dtis("--D").FrameDiffs({1});
+ template_structure.templates[6].T(2).Dtis("--D").FrameDiffs({3, 1});
+ }
+ return template_structure;
+ }
+ case 4: {
+ template_structure.templates.resize(8);
+ template_structure.templates[0].T(0).Dtis("SSSS");
+ template_structure.templates[1].T(0).Dtis("SSSS").FrameDiffs({8});
+ template_structure.templates[2].T(1).Dtis("-SRR").FrameDiffs({4});
+ template_structure.templates[3].T(1).Dtis("-SRR").FrameDiffs({4, 8});
+ template_structure.templates[4].T(2).Dtis("--SR").FrameDiffs({2});
+ template_structure.templates[5].T(2).Dtis("--SR").FrameDiffs({2, 4});
+ template_structure.templates[6].T(3).Dtis("---D").FrameDiffs({1});
+ template_structure.templates[7].T(3).Dtis("---D").FrameDiffs({1, 3});
+ return template_structure;
+ }
+ default:
+ RTC_DCHECK_NOTREACHED();
+ // To make the compiler happy!
+ return template_structure;
+ }
+}
+
+// Returns list of temporal dependencies for each frame in the temporal pattern.
+// Values are lists of indecies in the pattern.
+std::vector<std::set<uint8_t>> GetTemporalDependencies(
+ int num_temporal_layers) {
+ switch (num_temporal_layers) {
+ case 1:
+ return {{0}};
+ case 2:
+ if (!field_trial::IsDisabled("WebRTC-UseShortVP8TL2Pattern")) {
+ return {{2}, {0}, {0}, {1, 2}};
+ } else {
+ return {{6}, {0}, {0}, {1, 2}, {2}, {3, 4}, {4}, {5, 6}};
+ }
+ case 3:
+ if (field_trial::IsEnabled("WebRTC-UseShortVP8TL3Pattern")) {
+ return {{0}, {0}, {0}, {0, 1, 2}};
+ } else {
+ return {{4}, {0}, {0}, {0, 2}, {0}, {2, 4}, {2, 4}, {4, 6}};
+ }
+ case 4:
+ return {{8}, {0}, {0}, {0, 2},
+ {0}, {0, 2, 4}, {0, 2, 4}, {0, 4, 6},
+ {0}, {4, 6, 8}, {4, 6, 8}, {4, 8, 10},
+ {4, 8}, {8, 10, 12}, {8, 10, 12}, {8, 12, 14}};
+ default:
+ RTC_DCHECK_NOTREACHED();
+ return {};
+ }
+}
+
+DefaultTemporalLayersChecker::DefaultTemporalLayersChecker(
+ int num_temporal_layers)
+ : TemporalLayersChecker(num_temporal_layers),
+ num_layers_(std::max(1, num_temporal_layers)),
+ temporal_ids_(GetTemporalIds(num_layers_)),
+ temporal_dependencies_(GetTemporalDependencies(num_layers_)),
+ pattern_idx_(255) {
+ int i = 0;
+ while (temporal_ids_.size() < temporal_dependencies_.size()) {
+ temporal_ids_.push_back(temporal_ids_[i++]);
+ }
+}
+
+DefaultTemporalLayersChecker::~DefaultTemporalLayersChecker() = default;
+
+bool DefaultTemporalLayersChecker::CheckTemporalConfig(
+ bool frame_is_keyframe,
+ const Vp8FrameConfig& frame_config) {
+ if (!TemporalLayersChecker::CheckTemporalConfig(frame_is_keyframe,
+ frame_config)) {
+ return false;
+ }
+ if (frame_config.drop_frame) {
+ return true;
+ }
+
+ if (frame_is_keyframe) {
+ pattern_idx_ = 0;
+ last_ = BufferState();
+ golden_ = BufferState();
+ arf_ = BufferState();
+ return true;
+ }
+
+ ++pattern_idx_;
+ if (pattern_idx_ == temporal_ids_.size()) {
+ // All non key-frame buffers should be updated each pattern cycle.
+ if (!last_.is_keyframe && !last_.is_updated_this_cycle) {
+ RTC_LOG(LS_ERROR) << "Last buffer was not updated during pattern cycle.";
+ return false;
+ }
+ if (!arf_.is_keyframe && !arf_.is_updated_this_cycle) {
+ RTC_LOG(LS_ERROR) << "Arf buffer was not updated during pattern cycle.";
+ return false;
+ }
+ if (!golden_.is_keyframe && !golden_.is_updated_this_cycle) {
+ RTC_LOG(LS_ERROR)
+ << "Golden buffer was not updated during pattern cycle.";
+ return false;
+ }
+ last_.is_updated_this_cycle = false;
+ arf_.is_updated_this_cycle = false;
+ golden_.is_updated_this_cycle = false;
+ pattern_idx_ = 0;
+ }
+ uint8_t expected_tl_idx = temporal_ids_[pattern_idx_];
+ if (frame_config.packetizer_temporal_idx != expected_tl_idx) {
+ RTC_LOG(LS_ERROR) << "Frame has an incorrect temporal index. Expected: "
+ << static_cast<int>(expected_tl_idx) << " Actual: "
+ << static_cast<int>(frame_config.packetizer_temporal_idx);
+ return false;
+ }
+
+ bool need_sync = temporal_ids_[pattern_idx_] > 0 &&
+ temporal_ids_[pattern_idx_] != kNoTemporalIdx;
+ std::vector<int> dependencies;
+
+ if (frame_config.last_buffer_flags & BufferFlags::kReference) {
+ uint8_t referenced_layer = temporal_ids_[last_.pattern_idx];
+ if (referenced_layer > 0) {
+ need_sync = false;
+ }
+ if (!last_.is_keyframe) {
+ dependencies.push_back(last_.pattern_idx);
+ }
+ } else if (frame_config.first_reference == Vp8BufferReference::kLast ||
+ frame_config.second_reference == Vp8BufferReference::kLast) {
+ RTC_LOG(LS_ERROR)
+ << "Last buffer not referenced, but present in search order.";
+ return false;
+ }
+
+ if (frame_config.arf_buffer_flags & BufferFlags::kReference) {
+ uint8_t referenced_layer = temporal_ids_[arf_.pattern_idx];
+ if (referenced_layer > 0) {
+ need_sync = false;
+ }
+ if (!arf_.is_keyframe) {
+ dependencies.push_back(arf_.pattern_idx);
+ }
+ } else if (frame_config.first_reference == Vp8BufferReference::kAltref ||
+ frame_config.second_reference == Vp8BufferReference::kAltref) {
+ RTC_LOG(LS_ERROR)
+ << "Altret buffer not referenced, but present in search order.";
+ return false;
+ }
+
+ if (frame_config.golden_buffer_flags & BufferFlags::kReference) {
+ uint8_t referenced_layer = temporal_ids_[golden_.pattern_idx];
+ if (referenced_layer > 0) {
+ need_sync = false;
+ }
+ if (!golden_.is_keyframe) {
+ dependencies.push_back(golden_.pattern_idx);
+ }
+ } else if (frame_config.first_reference == Vp8BufferReference::kGolden ||
+ frame_config.second_reference == Vp8BufferReference::kGolden) {
+ RTC_LOG(LS_ERROR)
+ << "Golden buffer not referenced, but present in search order.";
+ return false;
+ }
+
+ if (need_sync != frame_config.layer_sync) {
+ RTC_LOG(LS_ERROR) << "Sync bit is set incorrectly on a frame. Expected: "
+ << need_sync << " Actual: " << frame_config.layer_sync;
+ return false;
+ }
+
+ if (!frame_is_keyframe) {
+ size_t i;
+ for (i = 0; i < dependencies.size(); ++i) {
+ if (temporal_dependencies_[pattern_idx_].find(dependencies[i]) ==
+ temporal_dependencies_[pattern_idx_].end()) {
+ RTC_LOG(LS_ERROR)
+ << "Illegal temporal dependency out of defined pattern "
+ "from position "
+ << static_cast<int>(pattern_idx_) << " to position "
+ << static_cast<int>(dependencies[i]);
+ return false;
+ }
+ }
+ }
+
+ if (frame_config.last_buffer_flags & BufferFlags::kUpdate) {
+ last_.is_updated_this_cycle = true;
+ last_.pattern_idx = pattern_idx_;
+ last_.is_keyframe = false;
+ }
+ if (frame_config.arf_buffer_flags & BufferFlags::kUpdate) {
+ arf_.is_updated_this_cycle = true;
+ arf_.pattern_idx = pattern_idx_;
+ arf_.is_keyframe = false;
+ }
+ if (frame_config.golden_buffer_flags & BufferFlags::kUpdate) {
+ golden_.is_updated_this_cycle = true;
+ golden_.pattern_idx = pattern_idx_;
+ golden_.is_keyframe = false;
+ }
+ return true;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/vp8/default_temporal_layers.h b/third_party/libwebrtc/modules/video_coding/codecs/vp8/default_temporal_layers.h
new file mode 100644
index 0000000000..bc6574c54c
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/vp8/default_temporal_layers.h
@@ -0,0 +1,168 @@
+/* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+/*
+ * This file defines classes for doing temporal layers with VP8.
+ */
+#ifndef MODULES_VIDEO_CODING_CODECS_VP8_DEFAULT_TEMPORAL_LAYERS_H_
+#define MODULES_VIDEO_CODING_CODECS_VP8_DEFAULT_TEMPORAL_LAYERS_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <bitset>
+#include <deque>
+#include <limits>
+#include <memory>
+#include <set>
+#include <utility>
+#include <vector>
+
+#include "absl/types/optional.h"
+#include "api/video_codecs/vp8_frame_config.h"
+#include "api/video_codecs/vp8_temporal_layers.h"
+#include "modules/video_coding/codecs/vp8/include/temporal_layers_checker.h"
+#include "modules/video_coding/include/video_codec_interface.h"
+
+namespace webrtc {
+
+class DefaultTemporalLayers final : public Vp8FrameBufferController {
+ public:
+ explicit DefaultTemporalLayers(int number_of_temporal_layers);
+ ~DefaultTemporalLayers() override;
+
+ void SetQpLimits(size_t stream_index, int min_qp, int max_qp) override;
+
+ size_t StreamCount() const override;
+
+ bool SupportsEncoderFrameDropping(size_t stream_index) const override;
+
+ // Returns the recommended VP8 encode flags needed. May refresh the decoder
+ // and/or update the reference buffers.
+ Vp8FrameConfig NextFrameConfig(size_t stream_index,
+ uint32_t timestamp) override;
+
+ // New target bitrate, per temporal layer.
+ void OnRatesUpdated(size_t stream_index,
+ const std::vector<uint32_t>& bitrates_bps,
+ int framerate_fps) override;
+
+ Vp8EncoderConfig UpdateConfiguration(size_t stream_index) override;
+
+ // Callbacks methods on frame completion. OnEncodeDone() or OnFrameDropped()
+ // should be called once for each NextFrameConfig() call (using the RTP
+ // timestamp as ID), and the calls MUST be in the same order.
+ void OnEncodeDone(size_t stream_index,
+ uint32_t rtp_timestamp,
+ size_t size_bytes,
+ bool is_keyframe,
+ int qp,
+ CodecSpecificInfo* info) override;
+ void OnFrameDropped(size_t stream_index, uint32_t rtp_timestamp) override;
+
+ void OnPacketLossRateUpdate(float packet_loss_rate) override;
+
+ void OnRttUpdate(int64_t rtt_ms) override;
+
+ void OnLossNotification(
+ const VideoEncoder::LossNotification& loss_notification) override;
+
+ private:
+ static constexpr size_t kNumReferenceBuffers = 3; // Last, golden, altref.
+ struct DependencyInfo {
+ DependencyInfo() = default;
+ DependencyInfo(absl::string_view indication_symbols,
+ Vp8FrameConfig frame_config)
+ : decode_target_indications(
+ webrtc_impl::StringToDecodeTargetIndications(indication_symbols)),
+ frame_config(frame_config) {}
+
+ absl::InlinedVector<DecodeTargetIndication, 10> decode_target_indications;
+ Vp8FrameConfig frame_config;
+ };
+ struct PendingFrame {
+ PendingFrame();
+ PendingFrame(uint32_t timestamp,
+ bool expired,
+ uint8_t updated_buffers_mask,
+ const DependencyInfo& dependency_info);
+ uint32_t timestamp = 0;
+ // Flag indicating if this frame has expired, ie it belongs to a previous
+ // iteration of the temporal pattern.
+ bool expired = false;
+ // Bitmask of Vp8BufferReference flags, indicating which buffers this frame
+ // updates.
+ uint8_t updated_buffer_mask = 0;
+ // The frame config returned by NextFrameConfig() for this frame.
+ DependencyInfo dependency_info;
+ };
+
+ static std::vector<DependencyInfo> GetDependencyInfo(size_t num_layers);
+ static std::bitset<kNumReferenceBuffers> DetermineStaticBuffers(
+ const std::vector<DependencyInfo>& temporal_pattern);
+ bool IsSyncFrame(const Vp8FrameConfig& config) const;
+ void ValidateReferences(Vp8FrameConfig::BufferFlags* flags,
+ Vp8FrameConfig::Vp8BufferReference ref) const;
+ void UpdateSearchOrder(Vp8FrameConfig* config);
+ size_t NumFramesSinceBufferRefresh(
+ Vp8FrameConfig::Vp8BufferReference ref) const;
+ void ResetNumFramesSinceBufferRefresh(Vp8FrameConfig::Vp8BufferReference ref);
+ void CullPendingFramesBefore(uint32_t timestamp);
+
+ const size_t num_layers_;
+ const std::vector<unsigned int> temporal_ids_;
+ const std::vector<DependencyInfo> temporal_pattern_;
+ // Per reference buffer flag indicating if it is static, meaning it is only
+ // updated by key-frames.
+ const std::bitset<kNumReferenceBuffers> is_static_buffer_;
+ FrameDependencyStructure GetTemplateStructure(int num_layers) const;
+
+ uint8_t pattern_idx_;
+ // Updated cumulative bitrates, per temporal layer.
+ absl::optional<std::vector<uint32_t>> new_bitrates_bps_;
+
+ // Status for each pending frame, in
+ std::deque<PendingFrame> pending_frames_;
+
+ // One counter per reference buffer, indicating number of frames since last
+ // refresh. For non-base-layer frames (ie golden, altref buffers), this is
+ // reset when the pattern loops.
+ std::array<size_t, kNumReferenceBuffers> frames_since_buffer_refresh_;
+
+ // Optional utility used to verify reference validity.
+ std::unique_ptr<TemporalLayersChecker> checker_;
+};
+
+class DefaultTemporalLayersChecker : public TemporalLayersChecker {
+ public:
+ explicit DefaultTemporalLayersChecker(int number_of_temporal_layers);
+ ~DefaultTemporalLayersChecker() override;
+
+ bool CheckTemporalConfig(bool frame_is_keyframe,
+ const Vp8FrameConfig& frame_config) override;
+
+ private:
+ struct BufferState {
+ BufferState()
+ : is_updated_this_cycle(false), is_keyframe(true), pattern_idx(0) {}
+
+ bool is_updated_this_cycle;
+ bool is_keyframe;
+ uint8_t pattern_idx;
+ };
+ const size_t num_layers_;
+ std::vector<unsigned int> temporal_ids_;
+ const std::vector<std::set<uint8_t>> temporal_dependencies_;
+ BufferState last_;
+ BufferState arf_;
+ BufferState golden_;
+ uint8_t pattern_idx_;
+};
+
+} // namespace webrtc
+#endif // MODULES_VIDEO_CODING_CODECS_VP8_DEFAULT_TEMPORAL_LAYERS_H_
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/vp8/default_temporal_layers_unittest.cc b/third_party/libwebrtc/modules/video_coding/codecs/vp8/default_temporal_layers_unittest.cc
new file mode 100644
index 0000000000..ae027a9d8a
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/vp8/default_temporal_layers_unittest.cc
@@ -0,0 +1,781 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/codecs/vp8/default_temporal_layers.h"
+
+#include <cstdint>
+#include <memory>
+
+#include "api/video/video_bitrate_allocation.h"
+#include "api/video_codecs/video_codec.h"
+#include "api/video_codecs/vp8_frame_config.h"
+#include "modules/video_coding/codecs/vp8/libvpx_vp8_encoder.h"
+#include "modules/video_coding/include/video_codec_interface.h"
+#include "modules/video_coding/utility/simulcast_rate_allocator.h"
+#include "test/field_trial.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+#include "vpx/vp8cx.h"
+
+// TODO(bugs.webrtc.org/10582): Test the behavior of UpdateConfiguration().
+
+namespace webrtc {
+namespace test {
+namespace {
+
+using ::testing::Each;
+
+enum {
+ kTemporalUpdateLast = VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF |
+ VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_REF_ARF,
+ kTemporalUpdateGoldenWithoutDependency =
+ VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_REF_ARF | VP8_EFLAG_NO_UPD_ARF |
+ VP8_EFLAG_NO_UPD_LAST,
+ kTemporalUpdateGolden =
+ VP8_EFLAG_NO_REF_ARF | VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_UPD_LAST,
+ kTemporalUpdateAltrefWithoutDependency =
+ VP8_EFLAG_NO_REF_ARF | VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_UPD_GF |
+ VP8_EFLAG_NO_UPD_LAST,
+ kTemporalUpdateAltref = VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_LAST,
+ kTemporalUpdateNone = VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF |
+ VP8_EFLAG_NO_UPD_LAST | VP8_EFLAG_NO_UPD_ENTROPY,
+ kTemporalUpdateNoneNoRefAltRef =
+ VP8_EFLAG_NO_REF_ARF | VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF |
+ VP8_EFLAG_NO_UPD_LAST | VP8_EFLAG_NO_UPD_ENTROPY,
+ kTemporalUpdateNoneNoRefGolden =
+ VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF |
+ VP8_EFLAG_NO_UPD_LAST | VP8_EFLAG_NO_UPD_ENTROPY,
+ kTemporalUpdateNoneNoRefGoldenAltRef =
+ VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_REF_ARF |
+ VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_UPD_LAST | VP8_EFLAG_NO_UPD_ENTROPY,
+ kTemporalUpdateGoldenWithoutDependencyRefAltRef =
+ VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_UPD_LAST,
+ kTemporalUpdateGoldenRefAltRef = VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_UPD_LAST,
+ kTemporalUpdateLastRefAltRef =
+ VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_REF_GF,
+ kTemporalUpdateLastAndGoldenRefAltRef =
+ VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_REF_GF,
+};
+
+using BufferFlags = Vp8FrameConfig::BufferFlags;
+using Vp8BufferReference = Vp8FrameConfig::Vp8BufferReference;
+
+constexpr uint8_t kNone = static_cast<uint8_t>(Vp8BufferReference::kNone);
+constexpr uint8_t kLast = static_cast<uint8_t>(Vp8BufferReference::kLast);
+constexpr uint8_t kGolden = static_cast<uint8_t>(Vp8BufferReference::kGolden);
+constexpr uint8_t kAltref = static_cast<uint8_t>(Vp8BufferReference::kAltref);
+constexpr uint8_t kAll = kLast | kGolden | kAltref;
+
+constexpr int ToVp8CodecFlags(uint8_t referenced_buffers,
+ uint8_t updated_buffers,
+ bool update_entropy) {
+ return (((referenced_buffers & kLast) == 0) ? VP8_EFLAG_NO_REF_LAST : 0) |
+ (((referenced_buffers & kGolden) == 0) ? VP8_EFLAG_NO_REF_GF : 0) |
+ (((referenced_buffers & kAltref) == 0) ? VP8_EFLAG_NO_REF_ARF : 0) |
+ (((updated_buffers & kLast) == 0) ? VP8_EFLAG_NO_UPD_LAST : 0) |
+ (((updated_buffers & kGolden) == 0) ? VP8_EFLAG_NO_UPD_GF : 0) |
+ (((updated_buffers & kAltref) == 0) ? VP8_EFLAG_NO_UPD_ARF : 0) |
+ (update_entropy ? 0 : VP8_EFLAG_NO_UPD_ENTROPY);
+}
+
+constexpr int kKeyFrameFlags = ToVp8CodecFlags(kNone, kAll, true);
+
+std::vector<uint32_t> GetTemporalLayerRates(int target_bitrate_kbps,
+ int framerate_fps,
+ int num_temporal_layers) {
+ VideoCodec codec;
+ codec.codecType = VideoCodecType::kVideoCodecVP8;
+ codec.numberOfSimulcastStreams = 1;
+ codec.maxBitrate = target_bitrate_kbps;
+ codec.maxFramerate = framerate_fps;
+ codec.simulcastStream[0].targetBitrate = target_bitrate_kbps;
+ codec.simulcastStream[0].maxBitrate = target_bitrate_kbps;
+ codec.simulcastStream[0].numberOfTemporalLayers = num_temporal_layers;
+ codec.simulcastStream[0].active = true;
+ SimulcastRateAllocator allocator(codec);
+ return allocator
+ .Allocate(
+ VideoBitrateAllocationParameters(target_bitrate_kbps, framerate_fps))
+ .GetTemporalLayerAllocation(0);
+}
+
+constexpr int kDefaultBitrateBps = 500;
+constexpr int kDefaultFramerate = 30;
+constexpr int kDefaultBytesPerFrame =
+ (kDefaultBitrateBps / 8) / kDefaultFramerate;
+constexpr int kDefaultQp = 2;
+} // namespace
+
+class TemporalLayersTest : public ::testing::Test {
+ public:
+ ~TemporalLayersTest() override = default;
+
+ CodecSpecificInfo* IgnoredCodecSpecificInfo() {
+ codec_specific_info_ = std::make_unique<CodecSpecificInfo>();
+ return codec_specific_info_.get();
+ }
+
+ private:
+ std::unique_ptr<CodecSpecificInfo> codec_specific_info_;
+};
+
+TEST_F(TemporalLayersTest, 2Layers) {
+ constexpr int kNumLayers = 2;
+ DefaultTemporalLayers tl(kNumLayers);
+ DefaultTemporalLayersChecker checker(kNumLayers);
+ tl.OnRatesUpdated(0,
+ GetTemporalLayerRates(kDefaultBytesPerFrame,
+ kDefaultFramerate, kNumLayers),
+ kDefaultFramerate);
+ tl.UpdateConfiguration(0);
+
+ constexpr size_t kPatternSize = 4;
+ constexpr size_t kRepetitions = 4;
+
+ const int expected_flags[kPatternSize] = {
+ ToVp8CodecFlags(kLast, kLast, true),
+ ToVp8CodecFlags(kLast, kGolden, true),
+ ToVp8CodecFlags(kLast, kLast, true),
+ ToVp8CodecFlags(kLast | kGolden, kNone, false),
+ };
+ const int expected_temporal_idx[kPatternSize] = {0, 1, 0, 1};
+ const bool expected_layer_sync[kPatternSize] = {false, true, false, false};
+
+ uint32_t timestamp = 0;
+ for (size_t i = 0; i < kPatternSize * kRepetitions; ++i) {
+ const size_t ind = i % kPatternSize;
+ const bool is_keyframe = (i == 0);
+ CodecSpecificInfo info;
+ Vp8FrameConfig tl_config = tl.NextFrameConfig(0, timestamp);
+ EXPECT_EQ(is_keyframe ? kKeyFrameFlags : expected_flags[ind],
+ LibvpxVp8Encoder::EncodeFlags(tl_config))
+ << i;
+ tl.OnEncodeDone(0, timestamp, kDefaultBytesPerFrame, is_keyframe,
+ kDefaultQp, &info);
+ EXPECT_TRUE(checker.CheckTemporalConfig(is_keyframe, tl_config));
+ EXPECT_EQ(expected_temporal_idx[ind], info.codecSpecific.VP8.temporalIdx);
+ EXPECT_EQ(expected_temporal_idx[ind], tl_config.packetizer_temporal_idx);
+ EXPECT_EQ(expected_temporal_idx[ind], tl_config.encoder_layer_id);
+ EXPECT_EQ(is_keyframe || expected_layer_sync[ind],
+ info.codecSpecific.VP8.layerSync);
+ EXPECT_EQ(expected_layer_sync[ind], tl_config.layer_sync);
+ timestamp += 3000;
+ }
+}
+
+TEST_F(TemporalLayersTest, 3Layers) {
+ constexpr int kNumLayers = 3;
+ DefaultTemporalLayers tl(kNumLayers);
+ DefaultTemporalLayersChecker checker(kNumLayers);
+ tl.OnRatesUpdated(0,
+ GetTemporalLayerRates(kDefaultBytesPerFrame,
+ kDefaultFramerate, kNumLayers),
+ kDefaultFramerate);
+ tl.UpdateConfiguration(0);
+
+ int expected_flags[16] = {
+ kTemporalUpdateLast,
+ kTemporalUpdateNoneNoRefGoldenAltRef,
+ kTemporalUpdateGoldenWithoutDependency,
+ kTemporalUpdateNoneNoRefAltRef,
+ kTemporalUpdateLast,
+ kTemporalUpdateNoneNoRefAltRef,
+ kTemporalUpdateGolden,
+ kTemporalUpdateNoneNoRefAltRef,
+ kTemporalUpdateLast,
+ kTemporalUpdateNoneNoRefGoldenAltRef,
+ kTemporalUpdateGoldenWithoutDependency,
+ kTemporalUpdateNoneNoRefAltRef,
+ kTemporalUpdateLast,
+ kTemporalUpdateNoneNoRefAltRef,
+ kTemporalUpdateGolden,
+ kTemporalUpdateNoneNoRefAltRef,
+ };
+ int expected_temporal_idx[16] = {0, 2, 1, 2, 0, 2, 1, 2,
+ 0, 2, 1, 2, 0, 2, 1, 2};
+
+ bool expected_layer_sync[16] = {false, true, true, false, false, false,
+ false, false, false, true, true, false,
+ false, false, false, false};
+
+ unsigned int timestamp = 0;
+ for (int i = 0; i < 16; ++i) {
+ const bool is_keyframe = (i == 0);
+ CodecSpecificInfo info;
+ Vp8FrameConfig tl_config = tl.NextFrameConfig(0, timestamp);
+ EXPECT_EQ(is_keyframe ? kKeyFrameFlags : expected_flags[i],
+ LibvpxVp8Encoder::EncodeFlags(tl_config))
+ << i;
+ tl.OnEncodeDone(0, timestamp, kDefaultBytesPerFrame, is_keyframe,
+ kDefaultQp, &info);
+ EXPECT_TRUE(checker.CheckTemporalConfig(is_keyframe, tl_config));
+ EXPECT_EQ(expected_temporal_idx[i], info.codecSpecific.VP8.temporalIdx);
+ EXPECT_EQ(expected_temporal_idx[i], tl_config.packetizer_temporal_idx);
+ EXPECT_EQ(expected_temporal_idx[i], tl_config.encoder_layer_id);
+ EXPECT_EQ(is_keyframe || expected_layer_sync[i],
+ info.codecSpecific.VP8.layerSync);
+ EXPECT_EQ(expected_layer_sync[i], tl_config.layer_sync);
+ timestamp += 3000;
+ }
+}
+
+TEST_F(TemporalLayersTest, Alternative3Layers) {
+ constexpr int kNumLayers = 3;
+ ScopedFieldTrials field_trial("WebRTC-UseShortVP8TL3Pattern/Enabled/");
+ DefaultTemporalLayers tl(kNumLayers);
+ DefaultTemporalLayersChecker checker(kNumLayers);
+ tl.OnRatesUpdated(0,
+ GetTemporalLayerRates(kDefaultBytesPerFrame,
+ kDefaultFramerate, kNumLayers),
+ kDefaultFramerate);
+ tl.UpdateConfiguration(0);
+
+ int expected_flags[8] = {kTemporalUpdateLast,
+ kTemporalUpdateAltrefWithoutDependency,
+ kTemporalUpdateGoldenWithoutDependency,
+ kTemporalUpdateNone,
+ kTemporalUpdateLast,
+ kTemporalUpdateAltrefWithoutDependency,
+ kTemporalUpdateGoldenWithoutDependency,
+ kTemporalUpdateNone};
+ int expected_temporal_idx[8] = {0, 2, 1, 2, 0, 2, 1, 2};
+
+ bool expected_layer_sync[8] = {false, true, true, false,
+ false, true, true, false};
+
+ unsigned int timestamp = 0;
+ for (int i = 0; i < 8; ++i) {
+ const bool is_keyframe = (i == 0);
+ CodecSpecificInfo info;
+ Vp8FrameConfig tl_config = tl.NextFrameConfig(0, timestamp);
+ EXPECT_EQ(is_keyframe ? kKeyFrameFlags : expected_flags[i],
+ LibvpxVp8Encoder::EncodeFlags(tl_config))
+ << i;
+ tl.OnEncodeDone(0, timestamp, kDefaultBytesPerFrame, is_keyframe,
+ kDefaultQp, &info);
+ EXPECT_TRUE(checker.CheckTemporalConfig(is_keyframe, tl_config));
+ EXPECT_EQ(expected_temporal_idx[i], info.codecSpecific.VP8.temporalIdx);
+ EXPECT_EQ(expected_temporal_idx[i], tl_config.packetizer_temporal_idx);
+ EXPECT_EQ(expected_temporal_idx[i], tl_config.encoder_layer_id);
+ EXPECT_EQ(is_keyframe || expected_layer_sync[i],
+ info.codecSpecific.VP8.layerSync);
+ EXPECT_EQ(expected_layer_sync[i], tl_config.layer_sync);
+ timestamp += 3000;
+ }
+}
+
+TEST_F(TemporalLayersTest, SearchOrder) {
+ constexpr int kNumLayers = 3;
+ ScopedFieldTrials field_trial("WebRTC-UseShortVP8TL3Pattern/Enabled/");
+ DefaultTemporalLayers tl(kNumLayers);
+ DefaultTemporalLayersChecker checker(kNumLayers);
+ tl.OnRatesUpdated(0,
+ GetTemporalLayerRates(kDefaultBytesPerFrame,
+ kDefaultFramerate, kNumLayers),
+ kDefaultFramerate);
+ tl.UpdateConfiguration(0);
+
+ // Use a repeating pattern of tl 0, 2, 1, 2.
+ // Tl 0, 1, 2 update last, golden, altref respectively.
+
+ // Start with a key-frame. tl_config flags can be ignored.
+ uint32_t timestamp = 0;
+ Vp8FrameConfig tl_config = tl.NextFrameConfig(0, timestamp);
+ tl.OnEncodeDone(0, timestamp, kDefaultBytesPerFrame, true, kDefaultQp,
+ IgnoredCodecSpecificInfo());
+
+ // TL2 frame. First one only references TL0. Updates altref.
+ tl_config = tl.NextFrameConfig(0, ++timestamp);
+ tl.OnEncodeDone(0, timestamp, kDefaultBytesPerFrame, false, kDefaultQp,
+ IgnoredCodecSpecificInfo());
+ EXPECT_EQ(tl_config.first_reference, Vp8BufferReference::kLast);
+ EXPECT_EQ(tl_config.second_reference, Vp8BufferReference::kNone);
+
+ // TL1 frame. Can only reference TL0. Updated golden.
+ tl_config = tl.NextFrameConfig(0, ++timestamp);
+ tl.OnEncodeDone(0, timestamp, kDefaultBytesPerFrame, false, kDefaultQp,
+ IgnoredCodecSpecificInfo());
+ EXPECT_EQ(tl_config.first_reference, Vp8BufferReference::kLast);
+ EXPECT_EQ(tl_config.second_reference, Vp8BufferReference::kNone);
+
+ // TL2 frame. Can reference all three buffers. Golden was the last to be
+ // updated, the next to last was altref.
+ tl_config = tl.NextFrameConfig(0, ++timestamp);
+ tl.OnEncodeDone(0, timestamp, kDefaultBytesPerFrame, false, kDefaultQp,
+ IgnoredCodecSpecificInfo());
+ EXPECT_EQ(tl_config.first_reference, Vp8BufferReference::kGolden);
+ EXPECT_EQ(tl_config.second_reference, Vp8BufferReference::kAltref);
+}
+
+TEST_F(TemporalLayersTest, SearchOrderWithDrop) {
+ constexpr int kNumLayers = 3;
+ ScopedFieldTrials field_trial("WebRTC-UseShortVP8TL3Pattern/Enabled/");
+ DefaultTemporalLayers tl(kNumLayers);
+ DefaultTemporalLayersChecker checker(kNumLayers);
+ tl.OnRatesUpdated(0,
+ GetTemporalLayerRates(kDefaultBytesPerFrame,
+ kDefaultFramerate, kNumLayers),
+ kDefaultFramerate);
+ tl.UpdateConfiguration(0);
+
+ // Use a repeating pattern of tl 0, 2, 1, 2.
+ // Tl 0, 1, 2 update last, golden, altref respectively.
+
+ // Start with a key-frame. tl_config flags can be ignored.
+ uint32_t timestamp = 0;
+ Vp8FrameConfig tl_config = tl.NextFrameConfig(0, timestamp);
+ tl.OnEncodeDone(0, timestamp, kDefaultBytesPerFrame, true, kDefaultQp,
+ IgnoredCodecSpecificInfo());
+
+ // TL2 frame. First one only references TL0. Updates altref.
+ tl_config = tl.NextFrameConfig(0, ++timestamp);
+ tl.OnEncodeDone(0, timestamp, kDefaultBytesPerFrame, false, kDefaultQp,
+ IgnoredCodecSpecificInfo());
+ EXPECT_EQ(tl_config.first_reference, Vp8BufferReference::kLast);
+ EXPECT_EQ(tl_config.second_reference, Vp8BufferReference::kNone);
+
+ // Dropped TL1 frame. Can only reference TL0. Should have updated golden.
+ tl_config = tl.NextFrameConfig(0, ++timestamp);
+ tl.OnEncodeDone(0, timestamp, 0, false, 0, nullptr);
+
+ // TL2 frame. Can normally reference all three buffers, but golden has not
+ // been populated this cycle. Altref was last to be updated, before that last.
+ tl_config = tl.NextFrameConfig(0, ++timestamp);
+ tl.OnEncodeDone(0, timestamp, kDefaultBytesPerFrame, false, kDefaultQp,
+ IgnoredCodecSpecificInfo());
+ EXPECT_EQ(tl_config.first_reference, Vp8BufferReference::kAltref);
+ EXPECT_EQ(tl_config.second_reference, Vp8BufferReference::kLast);
+}
+
+TEST_F(TemporalLayersTest, DoesNotReferenceDroppedFrames) {
+ constexpr int kNumLayers = 3;
+ // Use a repeating pattern of tl 0, 2, 1, 2.
+ // Tl 0, 1, 2 update last, golden, altref respectively.
+ ScopedFieldTrials field_trial("WebRTC-UseShortVP8TL3Pattern/Enabled/");
+ DefaultTemporalLayers tl(kNumLayers);
+ DefaultTemporalLayersChecker checker(kNumLayers);
+ tl.OnRatesUpdated(0,
+ GetTemporalLayerRates(kDefaultBytesPerFrame,
+ kDefaultFramerate, kNumLayers),
+ kDefaultFramerate);
+ tl.UpdateConfiguration(0);
+
+ // Start with a keyframe.
+ uint32_t timestamp = 0;
+ Vp8FrameConfig tl_config = tl.NextFrameConfig(0, timestamp);
+ tl.OnEncodeDone(0, timestamp, kDefaultBytesPerFrame, true, kDefaultQp,
+ IgnoredCodecSpecificInfo());
+
+ // Dropped TL2 frame.
+ tl_config = tl.NextFrameConfig(0, ++timestamp);
+ tl.OnEncodeDone(0, timestamp, 0, false, 0, nullptr);
+
+ // Dropped TL1 frame.
+ tl_config = tl.NextFrameConfig(0, ++timestamp);
+ tl.OnEncodeDone(0, timestamp, 0, false, 0, nullptr);
+
+ // TL2 frame. Can reference all three buffers, valid since golden and altref
+ // both contain the last keyframe.
+ tl_config = tl.NextFrameConfig(0, ++timestamp);
+ tl.OnEncodeDone(0, timestamp, kDefaultBytesPerFrame, false, kDefaultQp,
+ IgnoredCodecSpecificInfo());
+ EXPECT_TRUE(tl_config.last_buffer_flags & BufferFlags::kReference);
+ EXPECT_TRUE(tl_config.golden_buffer_flags & BufferFlags::kReference);
+ EXPECT_TRUE(tl_config.arf_buffer_flags & BufferFlags::kReference);
+
+ // Restart of cycle!
+
+ // TL0 base layer frame, updating and referencing last.
+ tl_config = tl.NextFrameConfig(0, ++timestamp);
+ tl.OnEncodeDone(0, timestamp, kDefaultBytesPerFrame, false, kDefaultQp,
+ IgnoredCodecSpecificInfo());
+
+ // TL2 frame, updating altref.
+ tl_config = tl.NextFrameConfig(0, ++timestamp);
+ tl.OnEncodeDone(0, timestamp, kDefaultBytesPerFrame, false, kDefaultQp,
+ IgnoredCodecSpecificInfo());
+
+ // TL1 frame, updating golden.
+ tl_config = tl.NextFrameConfig(0, ++timestamp);
+ tl.OnEncodeDone(0, timestamp, kDefaultBytesPerFrame, false, kDefaultQp,
+ IgnoredCodecSpecificInfo());
+
+ // TL2 frame. Can still reference all buffer since they have been update this
+ // cycle.
+ tl_config = tl.NextFrameConfig(0, ++timestamp);
+ tl.OnEncodeDone(0, timestamp, kDefaultBytesPerFrame, false, kDefaultQp,
+ IgnoredCodecSpecificInfo());
+ EXPECT_TRUE(tl_config.last_buffer_flags & BufferFlags::kReference);
+ EXPECT_TRUE(tl_config.golden_buffer_flags & BufferFlags::kReference);
+ EXPECT_TRUE(tl_config.arf_buffer_flags & BufferFlags::kReference);
+
+ // Restart of cycle!
+
+ // TL0 base layer frame, updating and referencing last.
+ tl_config = tl.NextFrameConfig(0, ++timestamp);
+ tl.OnEncodeDone(0, timestamp, kDefaultBytesPerFrame, false, kDefaultQp,
+ IgnoredCodecSpecificInfo());
+
+ // Dropped TL2 frame.
+ tl_config = tl.NextFrameConfig(0, ++timestamp);
+ tl.OnEncodeDone(0, timestamp, 0, false, 0, nullptr);
+
+ // Dropped TL1 frame.
+ tl_config = tl.NextFrameConfig(0, ++timestamp);
+ tl.OnEncodeDone(0, timestamp, 0, false, 0, nullptr);
+
+ // TL2 frame. This time golden and altref contain data from the previous cycle
+ // and cannot be referenced.
+ tl_config = tl.NextFrameConfig(0, ++timestamp);
+ tl.OnEncodeDone(0, timestamp, kDefaultBytesPerFrame, false, kDefaultQp,
+ IgnoredCodecSpecificInfo());
+ EXPECT_TRUE(tl_config.last_buffer_flags & BufferFlags::kReference);
+ EXPECT_FALSE(tl_config.golden_buffer_flags & BufferFlags::kReference);
+ EXPECT_FALSE(tl_config.arf_buffer_flags & BufferFlags::kReference);
+}
+
+TEST_F(TemporalLayersTest, DoesNotReferenceUnlessGuaranteedToExist) {
+ constexpr int kNumLayers = 3;
+ // Use a repeating pattern of tl 0, 2, 1, 2.
+ // Tl 0, 1 updates last, golden respectively. Altref is always last keyframe.
+ DefaultTemporalLayers tl(kNumLayers);
+ DefaultTemporalLayersChecker checker(kNumLayers);
+ tl.OnRatesUpdated(0,
+ GetTemporalLayerRates(kDefaultBytesPerFrame,
+ kDefaultFramerate, kNumLayers),
+ kDefaultFramerate);
+ tl.UpdateConfiguration(0);
+
+ // Start with a keyframe.
+ uint32_t timestamp = 0;
+ Vp8FrameConfig tl_config = tl.NextFrameConfig(0, timestamp);
+ tl.OnEncodeDone(0, timestamp, kDefaultBytesPerFrame, true, kDefaultQp,
+ IgnoredCodecSpecificInfo());
+
+ // Do a full cycle of the pattern.
+ for (int i = 0; i < 7; ++i) {
+ tl_config = tl.NextFrameConfig(0, ++timestamp);
+ tl.OnEncodeDone(0, timestamp, kDefaultBytesPerFrame, false, kDefaultQp,
+ IgnoredCodecSpecificInfo());
+ }
+
+ // TL0 base layer frame, starting the cycle over.
+ tl_config = tl.NextFrameConfig(0, ++timestamp);
+ tl.OnEncodeDone(0, timestamp, kDefaultBytesPerFrame, false, kDefaultQp,
+ IgnoredCodecSpecificInfo());
+
+ // TL2 frame.
+ tl_config = tl.NextFrameConfig(0, ++timestamp);
+ tl.OnEncodeDone(0, timestamp, kDefaultBytesPerFrame, false, kDefaultQp,
+ IgnoredCodecSpecificInfo());
+
+ // Encoder has a hiccup and builds a queue, so frame encoding is delayed.
+ // TL1 frame, updating golden.
+ tl_config = tl.NextFrameConfig(0, ++timestamp);
+
+ // TL2 frame, that should be referencing golden, but we can't be certain it's
+ // not going to be dropped, so that is not allowed.
+ tl_config = tl.NextFrameConfig(0, timestamp + 1);
+ EXPECT_TRUE(tl_config.last_buffer_flags & BufferFlags::kReference);
+ EXPECT_FALSE(tl_config.golden_buffer_flags & BufferFlags::kReference);
+ EXPECT_FALSE(tl_config.arf_buffer_flags & BufferFlags::kReference);
+
+ // TL0 base layer frame.
+ tl_config = tl.NextFrameConfig(0, timestamp + 2);
+
+ // The previous four enqueued frames finally get encoded, and the updated
+ // buffers are now OK to reference.
+ // Enqueued TL1 frame ready.
+ tl.OnEncodeDone(0, timestamp, kDefaultBytesPerFrame, false, kDefaultQp,
+ IgnoredCodecSpecificInfo());
+ // Enqueued TL2 frame.
+ tl.OnEncodeDone(0, ++timestamp, kDefaultBytesPerFrame, false, kDefaultQp,
+ IgnoredCodecSpecificInfo());
+ // Enqueued TL0 frame.
+ tl.OnEncodeDone(0, ++timestamp, kDefaultBytesPerFrame, false, kDefaultQp,
+ IgnoredCodecSpecificInfo());
+
+ // TL2 frame, all buffers are now in a known good state, OK to reference.
+ tl_config = tl.NextFrameConfig(0, ++timestamp + 1);
+ EXPECT_TRUE(tl_config.last_buffer_flags & BufferFlags::kReference);
+ EXPECT_TRUE(tl_config.golden_buffer_flags & BufferFlags::kReference);
+ EXPECT_FALSE(tl_config.arf_buffer_flags & BufferFlags::kReference);
+}
+
+TEST_F(TemporalLayersTest, DoesNotReferenceUnlessGuaranteedToExistLongDelay) {
+ constexpr int kNumLayers = 3;
+ // Use a repeating pattern of tl 0, 2, 1, 2.
+ // Tl 0, 1 updates last, golden, altref respectively.
+ ScopedFieldTrials field_trial("WebRTC-UseShortVP8TL3Pattern/Enabled/");
+ DefaultTemporalLayers tl(kNumLayers);
+ DefaultTemporalLayersChecker checker(kNumLayers);
+ tl.OnRatesUpdated(0,
+ GetTemporalLayerRates(kDefaultBytesPerFrame,
+ kDefaultFramerate, kNumLayers),
+ kDefaultFramerate);
+ tl.UpdateConfiguration(0);
+
+ // Start with a keyframe.
+ uint32_t timestamp = 0;
+ Vp8FrameConfig tl_config = tl.NextFrameConfig(0, timestamp);
+ tl.OnEncodeDone(0, timestamp, kDefaultBytesPerFrame, true, kDefaultQp,
+ IgnoredCodecSpecificInfo());
+
+ // Do a full cycle of the pattern.
+ for (int i = 0; i < 3; ++i) {
+ tl_config = tl.NextFrameConfig(0, ++timestamp);
+ tl.OnEncodeDone(0, timestamp, kDefaultBytesPerFrame, false, kDefaultQp,
+ IgnoredCodecSpecificInfo());
+ }
+
+ // TL0 base layer frame, starting the cycle over.
+ tl_config = tl.NextFrameConfig(0, ++timestamp);
+ tl.OnEncodeDone(0, timestamp, kDefaultBytesPerFrame, false, kDefaultQp,
+ IgnoredCodecSpecificInfo());
+
+ // TL2 frame.
+ tl_config = tl.NextFrameConfig(0, ++timestamp);
+ tl.OnEncodeDone(0, timestamp, kDefaultBytesPerFrame, false, kDefaultQp,
+ IgnoredCodecSpecificInfo());
+
+ // Encoder has a hiccup and builds a queue, so frame encoding is delayed.
+ // Encoded, but delayed frames in TL 1, 2.
+ tl_config = tl.NextFrameConfig(0, timestamp + 1);
+ tl_config = tl.NextFrameConfig(0, timestamp + 2);
+
+ // Restart of the pattern!
+
+ // Encoded, but delayed frames in TL 2, 1.
+ tl_config = tl.NextFrameConfig(0, timestamp + 3);
+ tl_config = tl.NextFrameConfig(0, timestamp + 4);
+
+ // TL1 frame from last cycle is ready.
+ tl.OnEncodeDone(0, timestamp + 1, kDefaultBytesPerFrame, false, kDefaultQp,
+ IgnoredCodecSpecificInfo());
+ // TL2 frame from last cycle is ready.
+ tl.OnEncodeDone(0, timestamp + 2, kDefaultBytesPerFrame, false, kDefaultQp,
+ IgnoredCodecSpecificInfo());
+
+ // TL2 frame, that should be referencing all buffers, but altref and golden
+ // haven not been updated this cycle. (Don't be fooled by the late frames from
+ // the last cycle!)
+ tl_config = tl.NextFrameConfig(0, timestamp + 5);
+ EXPECT_TRUE(tl_config.last_buffer_flags & BufferFlags::kReference);
+ EXPECT_FALSE(tl_config.golden_buffer_flags & BufferFlags::kReference);
+ EXPECT_FALSE(tl_config.arf_buffer_flags & BufferFlags::kReference);
+}
+
+TEST_F(TemporalLayersTest, KeyFrame) {
+ constexpr int kNumLayers = 3;
+ DefaultTemporalLayers tl(kNumLayers);
+ DefaultTemporalLayersChecker checker(kNumLayers);
+ tl.OnRatesUpdated(0,
+ GetTemporalLayerRates(kDefaultBytesPerFrame,
+ kDefaultFramerate, kNumLayers),
+ kDefaultFramerate);
+ tl.UpdateConfiguration(0);
+
+ int expected_flags[8] = {
+ kTemporalUpdateLastRefAltRef,
+ kTemporalUpdateNoneNoRefGoldenAltRef,
+ kTemporalUpdateGoldenWithoutDependency,
+ kTemporalUpdateNoneNoRefAltRef,
+ kTemporalUpdateLast,
+ kTemporalUpdateNoneNoRefAltRef,
+ kTemporalUpdateGolden,
+ kTemporalUpdateNone,
+ };
+ int expected_temporal_idx[8] = {0, 2, 1, 2, 0, 2, 1, 2};
+ bool expected_layer_sync[8] = {true, true, true, false,
+ false, false, false, false};
+
+ uint32_t timestamp = 0;
+ for (int i = 0; i < 7; ++i) {
+ // Temporal pattern starts from 0 after key frame. Let the first `i` - 1
+ // frames be delta frames, and the `i`th one key frame.
+ for (int j = 1; j <= i; ++j) {
+ // Since last frame was always a keyframe and thus index 0 in the pattern,
+ // this loop starts at index 1.
+ Vp8FrameConfig tl_config = tl.NextFrameConfig(0, timestamp);
+ EXPECT_EQ(expected_flags[j], LibvpxVp8Encoder::EncodeFlags(tl_config))
+ << j;
+ tl.OnEncodeDone(0, timestamp, kDefaultBytesPerFrame, false, kDefaultQp,
+ IgnoredCodecSpecificInfo());
+ EXPECT_TRUE(checker.CheckTemporalConfig(false, tl_config));
+ EXPECT_EQ(expected_temporal_idx[j], tl_config.packetizer_temporal_idx);
+ EXPECT_EQ(expected_temporal_idx[j], tl_config.encoder_layer_id);
+ EXPECT_EQ(expected_layer_sync[j], tl_config.layer_sync);
+ timestamp += 3000;
+ }
+
+ CodecSpecificInfo info;
+ Vp8FrameConfig tl_config = tl.NextFrameConfig(0, timestamp);
+ tl.OnEncodeDone(0, timestamp, kDefaultBytesPerFrame, true, kDefaultQp,
+ &info);
+ EXPECT_TRUE(info.codecSpecific.VP8.layerSync)
+ << "Key frame should be marked layer sync.";
+ EXPECT_EQ(0, info.codecSpecific.VP8.temporalIdx)
+ << "Key frame should always be packetized as layer 0";
+ EXPECT_EQ(0, info.generic_frame_info->temporal_id)
+ << "Key frame should always be packetized as layer 0";
+ EXPECT_THAT(info.generic_frame_info->decode_target_indications,
+ Each(DecodeTargetIndication::kSwitch))
+ << "Key frame is universal switch";
+ EXPECT_TRUE(checker.CheckTemporalConfig(true, tl_config));
+ }
+}
+
+TEST_F(TemporalLayersTest, SetsTlCountOnFirstConfigUpdate) {
+ // Create an instance and fetch config update without setting any rate.
+ constexpr int kNumLayers = 2;
+ DefaultTemporalLayers tl(kNumLayers);
+ Vp8EncoderConfig config = tl.UpdateConfiguration(0);
+
+ // Config should indicate correct number of temporal layers, but zero bitrate.
+ ASSERT_TRUE(config.temporal_layer_config.has_value());
+ EXPECT_EQ(config.temporal_layer_config->ts_number_layers,
+ uint32_t{kNumLayers});
+ std::array<uint32_t, Vp8EncoderConfig::TemporalLayerConfig::kMaxLayers>
+ kZeroRate = {};
+ EXPECT_EQ(config.temporal_layer_config->ts_target_bitrate, kZeroRate);
+
+ // On second call, no new update.
+ config = tl.UpdateConfiguration(0);
+ EXPECT_FALSE(config.temporal_layer_config.has_value());
+}
+
+class TemporalLayersReferenceTest : public TemporalLayersTest,
+ public ::testing::WithParamInterface<int> {
+ public:
+ TemporalLayersReferenceTest()
+ : timestamp_(1),
+ last_sync_timestamp_(timestamp_),
+ tl0_reference_(nullptr) {}
+ virtual ~TemporalLayersReferenceTest() {}
+
+ protected:
+ static const int kMaxPatternLength = 32;
+
+ struct BufferState {
+ BufferState() : BufferState(-1, 0, false) {}
+ BufferState(int temporal_idx, uint32_t timestamp, bool sync)
+ : temporal_idx(temporal_idx), timestamp(timestamp), sync(sync) {}
+ int temporal_idx;
+ uint32_t timestamp;
+ bool sync;
+ };
+
+ bool UpdateSyncRefState(const BufferFlags& flags, BufferState* buffer_state) {
+ if (flags & BufferFlags::kReference) {
+ if (buffer_state->temporal_idx == -1)
+ return true; // References key-frame.
+ if (buffer_state->temporal_idx == 0) {
+ // No more than one reference to TL0 frame.
+ EXPECT_EQ(nullptr, tl0_reference_);
+ tl0_reference_ = buffer_state;
+ return true;
+ }
+ return false; // References higher layer.
+ }
+ return true; // No reference, does not affect sync frame status.
+ }
+
+ void ValidateReference(const BufferFlags& flags,
+ const BufferState& buffer_state,
+ int temporal_layer) {
+ if (flags & BufferFlags::kReference) {
+ if (temporal_layer > 0 && buffer_state.timestamp > 0) {
+ // Check that high layer reference does not go past last sync frame.
+ EXPECT_GE(buffer_state.timestamp, last_sync_timestamp_);
+ }
+ // No reference to buffer in higher layer.
+ EXPECT_LE(buffer_state.temporal_idx, temporal_layer);
+ }
+ }
+
+ uint32_t timestamp_ = 1;
+ uint32_t last_sync_timestamp_ = timestamp_;
+ BufferState* tl0_reference_;
+
+ BufferState last_state;
+ BufferState golden_state;
+ BufferState altref_state;
+};
+
+INSTANTIATE_TEST_SUITE_P(DefaultTemporalLayersTest,
+ TemporalLayersReferenceTest,
+ ::testing::Range(1, kMaxTemporalStreams + 1));
+
+TEST_P(TemporalLayersReferenceTest, ValidFrameConfigs) {
+ const int num_layers = GetParam();
+ DefaultTemporalLayers tl(num_layers);
+ tl.OnRatesUpdated(
+ 0, GetTemporalLayerRates(kDefaultBytesPerFrame, kDefaultFramerate, 1),
+ kDefaultFramerate);
+ tl.UpdateConfiguration(0);
+
+ // Run through the pattern and store the frame dependencies, plus keep track
+ // of the buffer state; which buffers references which temporal layers (if
+ // (any). If a given buffer is never updated, it is legal to reference it
+ // even for sync frames. In order to be general, don't assume TL0 always
+ // updates `last`.
+ std::vector<Vp8FrameConfig> tl_configs(kMaxPatternLength);
+ for (int i = 0; i < kMaxPatternLength; ++i) {
+ Vp8FrameConfig tl_config = tl.NextFrameConfig(0, timestamp_);
+ tl.OnEncodeDone(0, timestamp_, kDefaultBytesPerFrame, i == 0, kDefaultQp,
+ IgnoredCodecSpecificInfo());
+ ++timestamp_;
+ EXPECT_FALSE(tl_config.drop_frame);
+ tl_configs.push_back(tl_config);
+ int temporal_idx = tl_config.encoder_layer_id;
+ // For the default layers, always keep encoder and rtp layers in sync.
+ EXPECT_EQ(tl_config.packetizer_temporal_idx, temporal_idx);
+
+ // Determine if this frame is in a higher layer but references only TL0
+ // or untouched buffers, if so verify it is marked as a layer sync.
+ bool is_sync_frame = true;
+ tl0_reference_ = nullptr;
+ if (temporal_idx <= 0) {
+ is_sync_frame = false; // TL0 by definition not a sync frame.
+ } else if (!UpdateSyncRefState(tl_config.last_buffer_flags, &last_state)) {
+ is_sync_frame = false;
+ } else if (!UpdateSyncRefState(tl_config.golden_buffer_flags,
+ &golden_state)) {
+ is_sync_frame = false;
+ } else if (!UpdateSyncRefState(tl_config.arf_buffer_flags, &altref_state)) {
+ is_sync_frame = false;
+ }
+ if (is_sync_frame) {
+ // Cache timestamp for last found sync frame, so that we can verify no
+ // references back past this frame.
+ ASSERT_TRUE(tl0_reference_);
+ last_sync_timestamp_ = tl0_reference_->timestamp;
+ }
+ EXPECT_EQ(tl_config.layer_sync, is_sync_frame);
+
+ // Validate no reference from lower to high temporal layer, or backwards
+ // past last reference frame.
+ ValidateReference(tl_config.last_buffer_flags, last_state, temporal_idx);
+ ValidateReference(tl_config.golden_buffer_flags, golden_state,
+ temporal_idx);
+ ValidateReference(tl_config.arf_buffer_flags, altref_state, temporal_idx);
+
+ // Update the current layer state.
+ BufferState state = {temporal_idx, timestamp_, is_sync_frame};
+ if (tl_config.last_buffer_flags & BufferFlags::kUpdate)
+ last_state = state;
+ if (tl_config.golden_buffer_flags & BufferFlags::kUpdate)
+ golden_state = state;
+ if (tl_config.arf_buffer_flags & BufferFlags::kUpdate)
+ altref_state = state;
+ }
+}
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/vp8/include/temporal_layers_checker.h b/third_party/libwebrtc/modules/video_coding/codecs/vp8/include/temporal_layers_checker.h
new file mode 100644
index 0000000000..3d1671a676
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/vp8/include/temporal_layers_checker.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_CODECS_VP8_INCLUDE_TEMPORAL_LAYERS_CHECKER_H_
+#define MODULES_VIDEO_CODING_CODECS_VP8_INCLUDE_TEMPORAL_LAYERS_CHECKER_H_
+
+#include <stdint.h>
+
+#include <memory>
+
+#include "api/video_codecs/vp8_frame_config.h"
+#include "api/video_codecs/vp8_temporal_layers.h"
+
+namespace webrtc {
+
+// Interface for a class that verifies correctness of temporal layer
+// configurations (dependencies, sync flag, etc).
+// Intended to be used in tests as well as with real apps in debug mode.
+class TemporalLayersChecker {
+ public:
+ explicit TemporalLayersChecker(int num_temporal_layers);
+ virtual ~TemporalLayersChecker() {}
+
+ virtual bool CheckTemporalConfig(bool frame_is_keyframe,
+ const Vp8FrameConfig& frame_config);
+
+ static std::unique_ptr<TemporalLayersChecker> CreateTemporalLayersChecker(
+ Vp8TemporalLayersType type,
+ int num_temporal_layers);
+
+ private:
+ struct BufferState {
+ BufferState() : is_keyframe(true), temporal_layer(0), sequence_number(0) {}
+ bool is_keyframe;
+ uint8_t temporal_layer;
+ uint32_t sequence_number;
+ };
+ bool CheckAndUpdateBufferState(BufferState* state,
+ bool* need_sync,
+ bool frame_is_keyframe,
+ uint8_t temporal_layer,
+ Vp8FrameConfig::BufferFlags flags,
+ uint32_t sequence_number,
+ uint32_t* lowest_sequence_referenced);
+ BufferState last_;
+ BufferState arf_;
+ BufferState golden_;
+ int num_temporal_layers_;
+ uint32_t sequence_number_;
+ uint32_t last_sync_sequence_number_;
+ uint32_t last_tl0_sequence_number_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_CODECS_VP8_INCLUDE_TEMPORAL_LAYERS_CHECKER_H_
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/vp8/include/vp8.h b/third_party/libwebrtc/modules/video_coding/codecs/vp8/include/vp8.h
new file mode 100644
index 0000000000..2fc647874f
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/vp8/include/vp8.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_CODECS_VP8_INCLUDE_VP8_H_
+#define MODULES_VIDEO_CODING_CODECS_VP8_INCLUDE_VP8_H_
+
+#include <memory>
+#include <vector>
+
+#include "api/video_codecs/video_encoder.h"
+#include "api/video_codecs/vp8_frame_buffer_controller.h"
+#include "modules/video_coding/include/video_codec_interface.h"
+
+namespace webrtc {
+
+// TODO(brandtr): Move these interfaces to the api/ folder.
+class VP8Encoder {
+ public:
+ struct Settings {
+ // Allows for overriding the Vp8FrameBufferController used by the encoder.
+ // If unset, a default Vp8FrameBufferController will be instantiated
+ // internally.
+ std::unique_ptr<Vp8FrameBufferControllerFactory>
+ frame_buffer_controller_factory = nullptr;
+
+ // Allows for overriding the resolution/bitrate limits exposed through
+ // VideoEncoder::GetEncoderInfo(). No override is done if empty.
+ std::vector<VideoEncoder::ResolutionBitrateLimits>
+ resolution_bitrate_limits = {};
+ };
+
+ static std::unique_ptr<VideoEncoder> Create();
+ static std::unique_ptr<VideoEncoder> Create(Settings settings);
+};
+
+class VP8Decoder {
+ public:
+ static std::unique_ptr<VideoDecoder> Create();
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_CODECS_VP8_INCLUDE_VP8_H_
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/vp8/include/vp8_globals.h b/third_party/libwebrtc/modules/video_coding/codecs/vp8/include/vp8_globals.h
new file mode 100644
index 0000000000..1fab5f45a6
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/vp8/include/vp8_globals.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+// This file contains codec dependent definitions that are needed in
+// order to compile the WebRTC codebase, even if this codec is not used.
+
+#ifndef MODULES_VIDEO_CODING_CODECS_VP8_INCLUDE_VP8_GLOBALS_H_
+#define MODULES_VIDEO_CODING_CODECS_VP8_INCLUDE_VP8_GLOBALS_H_
+
+#include "modules/video_coding/codecs/interface/common_constants.h"
+
+namespace webrtc {
+
+struct RTPVideoHeaderVP8 {
+ void InitRTPVideoHeaderVP8() {
+ nonReference = false;
+ pictureId = kNoPictureId;
+ tl0PicIdx = kNoTl0PicIdx;
+ temporalIdx = kNoTemporalIdx;
+ layerSync = false;
+ keyIdx = kNoKeyIdx;
+ partitionId = 0;
+ beginningOfPartition = false;
+ }
+
+ bool nonReference; // Frame is discardable.
+ int16_t pictureId; // Picture ID index, 15 bits;
+ // kNoPictureId if PictureID does not exist.
+ int16_t tl0PicIdx; // TL0PIC_IDX, 8 bits;
+ // kNoTl0PicIdx means no value provided.
+ uint8_t temporalIdx; // Temporal layer index, or kNoTemporalIdx.
+ bool layerSync; // This frame is a layer sync frame.
+ // Disabled if temporalIdx == kNoTemporalIdx.
+ int keyIdx; // 5 bits; kNoKeyIdx means not used.
+ int partitionId; // VP8 partition ID
+ bool beginningOfPartition; // True if this packet is the first
+ // in a VP8 partition. Otherwise false
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_CODECS_VP8_INCLUDE_VP8_GLOBALS_H_
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/vp8/libvpx_vp8_decoder.cc b/third_party/libwebrtc/modules/video_coding/codecs/vp8/libvpx_vp8_decoder.cc
new file mode 100644
index 0000000000..3fe86f2f85
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/vp8/libvpx_vp8_decoder.cc
@@ -0,0 +1,384 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/codecs/vp8/libvpx_vp8_decoder.h"
+
+#include <stdio.h>
+#include <string.h>
+
+#include <algorithm>
+#include <memory>
+#include <string>
+
+#include "absl/types/optional.h"
+#include "api/scoped_refptr.h"
+#include "api/video/i420_buffer.h"
+#include "api/video/video_frame.h"
+#include "api/video/video_frame_buffer.h"
+#include "api/video/video_rotation.h"
+#include "modules/video_coding/codecs/vp8/include/vp8.h"
+#include "modules/video_coding/include/video_error_codes.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/numerics/exp_filter.h"
+#include "rtc_base/time_utils.h"
+#include "system_wrappers/include/field_trial.h"
+#include "system_wrappers/include/metrics.h"
+#include "third_party/libyuv/include/libyuv/convert.h"
+#include "vpx/vp8.h"
+#include "vpx/vp8dx.h"
+#include "vpx/vpx_decoder.h"
+
+namespace webrtc {
+namespace {
+constexpr int kVp8ErrorPropagationTh = 30;
+// vpx_decoder.h documentation indicates decode deadline is time in us, with
+// "Set to zero for unlimited.", but actual implementation requires this to be
+// a mode with 0 meaning allow delay and 1 not allowing it.
+constexpr long kDecodeDeadlineRealtime = 1; // NOLINT
+
+const char kVp8PostProcArmFieldTrial[] = "WebRTC-VP8-Postproc-Config-Arm";
+const char kVp8PostProcFieldTrial[] = "WebRTC-VP8-Postproc-Config";
+
+#if defined(WEBRTC_ARCH_ARM) || defined(WEBRTC_ARCH_ARM64) || \
+ defined(WEBRTC_ANDROID) || defined(WEBRTC_ARCH_MIPS)
+constexpr bool kIsArm = true;
+#else
+constexpr bool kIsArm = false;
+#endif
+
+absl::optional<LibvpxVp8Decoder::DeblockParams> DefaultDeblockParams() {
+ return LibvpxVp8Decoder::DeblockParams(/*max_level=*/8,
+ /*degrade_qp=*/60,
+ /*min_qp=*/30);
+}
+
+absl::optional<LibvpxVp8Decoder::DeblockParams>
+GetPostProcParamsFromFieldTrialGroup() {
+ std::string group = webrtc::field_trial::FindFullName(
+ kIsArm ? kVp8PostProcArmFieldTrial : kVp8PostProcFieldTrial);
+ if (group.empty()) {
+ return DefaultDeblockParams();
+ }
+
+ LibvpxVp8Decoder::DeblockParams params;
+ if (sscanf(group.c_str(), "Enabled-%d,%d,%d", &params.max_level,
+ &params.min_qp, &params.degrade_qp) != 3) {
+ return DefaultDeblockParams();
+ }
+
+ if (params.max_level < 0 || params.max_level > 16) {
+ return DefaultDeblockParams();
+ }
+
+ if (params.min_qp < 0 || params.degrade_qp <= params.min_qp) {
+ return DefaultDeblockParams();
+ }
+
+ return params;
+}
+
+} // namespace
+
+std::unique_ptr<VideoDecoder> VP8Decoder::Create() {
+ return std::make_unique<LibvpxVp8Decoder>();
+}
+
+class LibvpxVp8Decoder::QpSmoother {
+ public:
+ QpSmoother() : last_sample_ms_(rtc::TimeMillis()), smoother_(kAlpha) {}
+
+ int GetAvg() const {
+ float value = smoother_.filtered();
+ return (value == rtc::ExpFilter::kValueUndefined) ? 0
+ : static_cast<int>(value);
+ }
+
+ void Add(float sample) {
+ int64_t now_ms = rtc::TimeMillis();
+ smoother_.Apply(static_cast<float>(now_ms - last_sample_ms_), sample);
+ last_sample_ms_ = now_ms;
+ }
+
+ void Reset() { smoother_.Reset(kAlpha); }
+
+ private:
+ const float kAlpha = 0.95f;
+ int64_t last_sample_ms_;
+ rtc::ExpFilter smoother_;
+};
+
+LibvpxVp8Decoder::LibvpxVp8Decoder()
+ : use_postproc_(
+ kIsArm ? webrtc::field_trial::IsEnabled(kVp8PostProcArmFieldTrial)
+ : true),
+ buffer_pool_(false, 300 /* max_number_of_buffers*/),
+ decode_complete_callback_(NULL),
+ inited_(false),
+ decoder_(NULL),
+ propagation_cnt_(-1),
+ last_frame_width_(0),
+ last_frame_height_(0),
+ key_frame_required_(true),
+ deblock_params_(use_postproc_ ? GetPostProcParamsFromFieldTrialGroup()
+ : absl::nullopt),
+ qp_smoother_(use_postproc_ ? new QpSmoother() : nullptr) {}
+
+LibvpxVp8Decoder::~LibvpxVp8Decoder() {
+ inited_ = true; // in order to do the actual release
+ Release();
+}
+
+bool LibvpxVp8Decoder::Configure(const Settings& settings) {
+ if (Release() < 0) {
+ return false;
+ }
+ if (decoder_ == NULL) {
+ decoder_ = new vpx_codec_ctx_t;
+ memset(decoder_, 0, sizeof(*decoder_));
+ }
+ vpx_codec_dec_cfg_t cfg;
+ // Setting number of threads to a constant value (1)
+ cfg.threads = 1;
+ cfg.h = cfg.w = 0; // set after decode
+
+ vpx_codec_flags_t flags = use_postproc_ ? VPX_CODEC_USE_POSTPROC : 0;
+
+ if (vpx_codec_dec_init(decoder_, vpx_codec_vp8_dx(), &cfg, flags)) {
+ delete decoder_;
+ decoder_ = nullptr;
+ return false;
+ }
+
+ propagation_cnt_ = -1;
+ inited_ = true;
+
+ // Always start with a complete key frame.
+ key_frame_required_ = true;
+ if (absl::optional<int> buffer_pool_size = settings.buffer_pool_size()) {
+ if (!buffer_pool_.Resize(*buffer_pool_size)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+int LibvpxVp8Decoder::Decode(const EncodedImage& input_image,
+ bool missing_frames,
+ int64_t /*render_time_ms*/) {
+ if (!inited_) {
+ return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
+ }
+ if (decode_complete_callback_ == NULL) {
+ return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
+ }
+ if (input_image.data() == NULL && input_image.size() > 0) {
+ // Reset to avoid requesting key frames too often.
+ if (propagation_cnt_ > 0)
+ propagation_cnt_ = 0;
+ return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
+ }
+
+ // Post process configurations.
+ if (use_postproc_) {
+ vp8_postproc_cfg_t ppcfg;
+ // MFQE enabled to reduce key frame popping.
+ ppcfg.post_proc_flag = VP8_MFQE;
+
+ if (kIsArm) {
+ RTC_DCHECK(deblock_params_.has_value());
+ }
+ if (deblock_params_.has_value()) {
+ // For low resolutions, use stronger deblocking filter.
+ int last_width_x_height = last_frame_width_ * last_frame_height_;
+ if (last_width_x_height > 0 && last_width_x_height <= 320 * 240) {
+ // Enable the deblock and demacroblocker based on qp thresholds.
+ RTC_DCHECK(qp_smoother_);
+ int qp = qp_smoother_->GetAvg();
+ if (qp > deblock_params_->min_qp) {
+ int level = deblock_params_->max_level;
+ if (qp < deblock_params_->degrade_qp) {
+ // Use lower level.
+ level = deblock_params_->max_level *
+ (qp - deblock_params_->min_qp) /
+ (deblock_params_->degrade_qp - deblock_params_->min_qp);
+ }
+ // Deblocking level only affects VP8_DEMACROBLOCK.
+ ppcfg.deblocking_level = std::max(level, 1);
+ ppcfg.post_proc_flag |= VP8_DEBLOCK | VP8_DEMACROBLOCK;
+ }
+ }
+ } else {
+ // Non-arm with no explicit deblock params set.
+ ppcfg.post_proc_flag |= VP8_DEBLOCK;
+ // For VGA resolutions and lower, enable the demacroblocker postproc.
+ if (last_frame_width_ * last_frame_height_ <= 640 * 360) {
+ ppcfg.post_proc_flag |= VP8_DEMACROBLOCK;
+ }
+ // Strength of deblocking filter. Valid range:[0,16]
+ ppcfg.deblocking_level = 3;
+ }
+
+ vpx_codec_control(decoder_, VP8_SET_POSTPROC, &ppcfg);
+ }
+
+ // Always start with a complete key frame.
+ if (key_frame_required_) {
+ if (input_image._frameType != VideoFrameType::kVideoFrameKey)
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ key_frame_required_ = false;
+ }
+ // Restrict error propagation using key frame requests.
+ // Reset on a key frame refresh.
+ if (input_image._frameType == VideoFrameType::kVideoFrameKey) {
+ propagation_cnt_ = -1;
+ // Start count on first loss.
+ } else if (missing_frames && propagation_cnt_ == -1) {
+ propagation_cnt_ = 0;
+ }
+ if (propagation_cnt_ >= 0) {
+ propagation_cnt_++;
+ }
+
+ vpx_codec_iter_t iter = NULL;
+ vpx_image_t* img;
+ int ret;
+
+ // Check for missing frames.
+ if (missing_frames) {
+ // Call decoder with zero data length to signal missing frames.
+ if (vpx_codec_decode(decoder_, NULL, 0, 0, kDecodeDeadlineRealtime)) {
+ // Reset to avoid requesting key frames too often.
+ if (propagation_cnt_ > 0)
+ propagation_cnt_ = 0;
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+ img = vpx_codec_get_frame(decoder_, &iter);
+ iter = NULL;
+ }
+
+ const uint8_t* buffer = input_image.data();
+ if (input_image.size() == 0) {
+ buffer = NULL; // Triggers full frame concealment.
+ }
+ if (vpx_codec_decode(decoder_, buffer, input_image.size(), 0,
+ kDecodeDeadlineRealtime)) {
+ // Reset to avoid requesting key frames too often.
+ if (propagation_cnt_ > 0) {
+ propagation_cnt_ = 0;
+ }
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+
+ img = vpx_codec_get_frame(decoder_, &iter);
+ int qp;
+ vpx_codec_err_t vpx_ret =
+ vpx_codec_control(decoder_, VPXD_GET_LAST_QUANTIZER, &qp);
+ RTC_DCHECK_EQ(vpx_ret, VPX_CODEC_OK);
+ ret = ReturnFrame(img, input_image.Timestamp(), qp, input_image.ColorSpace());
+ if (ret != 0) {
+ // Reset to avoid requesting key frames too often.
+ if (ret < 0 && propagation_cnt_ > 0)
+ propagation_cnt_ = 0;
+ return ret;
+ }
+ // Check Vs. threshold
+ if (propagation_cnt_ > kVp8ErrorPropagationTh) {
+ // Reset to avoid requesting key frames too often.
+ propagation_cnt_ = 0;
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+int LibvpxVp8Decoder::ReturnFrame(
+ const vpx_image_t* img,
+ uint32_t timestamp,
+ int qp,
+ const webrtc::ColorSpace* explicit_color_space) {
+ if (img == NULL) {
+ // Decoder OK and NULL image => No show frame
+ return WEBRTC_VIDEO_CODEC_NO_OUTPUT;
+ }
+ if (qp_smoother_) {
+ if (last_frame_width_ != static_cast<int>(img->d_w) ||
+ last_frame_height_ != static_cast<int>(img->d_h)) {
+ qp_smoother_->Reset();
+ }
+ qp_smoother_->Add(qp);
+ }
+ last_frame_width_ = img->d_w;
+ last_frame_height_ = img->d_h;
+ // Allocate memory for decoded image.
+ rtc::scoped_refptr<VideoFrameBuffer> buffer;
+
+ rtc::scoped_refptr<I420Buffer> i420_buffer =
+ buffer_pool_.CreateI420Buffer(img->d_w, img->d_h);
+ buffer = i420_buffer;
+ if (i420_buffer.get()) {
+ libyuv::I420Copy(img->planes[VPX_PLANE_Y], img->stride[VPX_PLANE_Y],
+ img->planes[VPX_PLANE_U], img->stride[VPX_PLANE_U],
+ img->planes[VPX_PLANE_V], img->stride[VPX_PLANE_V],
+ i420_buffer->MutableDataY(), i420_buffer->StrideY(),
+ i420_buffer->MutableDataU(), i420_buffer->StrideU(),
+ i420_buffer->MutableDataV(), i420_buffer->StrideV(),
+ img->d_w, img->d_h);
+ }
+
+ if (!buffer.get()) {
+ // Pool has too many pending frames.
+ RTC_HISTOGRAM_BOOLEAN("WebRTC.Video.LibvpxVp8Decoder.TooManyPendingFrames",
+ 1);
+ return WEBRTC_VIDEO_CODEC_NO_OUTPUT;
+ }
+
+ VideoFrame decoded_image = VideoFrame::Builder()
+ .set_video_frame_buffer(buffer)
+ .set_timestamp_rtp(timestamp)
+ .set_color_space(explicit_color_space)
+ .build();
+ decode_complete_callback_->Decoded(decoded_image, absl::nullopt, qp);
+
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+int LibvpxVp8Decoder::RegisterDecodeCompleteCallback(
+ DecodedImageCallback* callback) {
+ decode_complete_callback_ = callback;
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+int LibvpxVp8Decoder::Release() {
+ int ret_val = WEBRTC_VIDEO_CODEC_OK;
+
+ if (decoder_ != NULL) {
+ if (inited_) {
+ if (vpx_codec_destroy(decoder_)) {
+ ret_val = WEBRTC_VIDEO_CODEC_MEMORY;
+ }
+ }
+ delete decoder_;
+ decoder_ = NULL;
+ }
+ buffer_pool_.Release();
+ inited_ = false;
+ return ret_val;
+}
+
+VideoDecoder::DecoderInfo LibvpxVp8Decoder::GetDecoderInfo() const {
+ DecoderInfo info;
+ info.implementation_name = "libvpx";
+ info.is_hardware_accelerated = false;
+ return info;
+}
+
+const char* LibvpxVp8Decoder::ImplementationName() const {
+ return "libvpx";
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/vp8/libvpx_vp8_decoder.h b/third_party/libwebrtc/modules/video_coding/codecs/vp8/libvpx_vp8_decoder.h
new file mode 100644
index 0000000000..f9acd70bad
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/vp8/libvpx_vp8_decoder.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_CODECS_VP8_LIBVPX_VP8_DECODER_H_
+#define MODULES_VIDEO_CODING_CODECS_VP8_LIBVPX_VP8_DECODER_H_
+
+#include <memory>
+
+#include "absl/types/optional.h"
+#include "api/video/encoded_image.h"
+#include "api/video_codecs/video_decoder.h"
+#include "common_video/include/video_frame_buffer_pool.h"
+#include "modules/video_coding/codecs/vp8/include/vp8.h"
+#include "modules/video_coding/include/video_codec_interface.h"
+#include "vpx/vp8dx.h"
+#include "vpx/vpx_decoder.h"
+
+namespace webrtc {
+
+class LibvpxVp8Decoder : public VideoDecoder {
+ public:
+ LibvpxVp8Decoder();
+ ~LibvpxVp8Decoder() override;
+
+ bool Configure(const Settings& settings) override;
+ int Decode(const EncodedImage& input_image,
+ bool missing_frames,
+ int64_t /*render_time_ms*/) override;
+
+ int RegisterDecodeCompleteCallback(DecodedImageCallback* callback) override;
+ int Release() override;
+
+ DecoderInfo GetDecoderInfo() const override;
+ const char* ImplementationName() const override;
+
+ struct DeblockParams {
+ DeblockParams() : max_level(6), degrade_qp(1), min_qp(0) {}
+ DeblockParams(int max_level, int degrade_qp, int min_qp)
+ : max_level(max_level), degrade_qp(degrade_qp), min_qp(min_qp) {}
+ int max_level; // Deblocking strength: [0, 16].
+ int degrade_qp; // If QP value is below, start lowering `max_level`.
+ int min_qp; // If QP value is below, turn off deblocking.
+ };
+
+ private:
+ class QpSmoother;
+ int ReturnFrame(const vpx_image_t* img,
+ uint32_t timeStamp,
+ int qp,
+ const webrtc::ColorSpace* explicit_color_space);
+ const bool use_postproc_;
+
+ VideoFrameBufferPool buffer_pool_;
+ DecodedImageCallback* decode_complete_callback_;
+ bool inited_;
+ vpx_codec_ctx_t* decoder_;
+ int propagation_cnt_;
+ int last_frame_width_;
+ int last_frame_height_;
+ bool key_frame_required_;
+ const absl::optional<DeblockParams> deblock_params_;
+ const std::unique_ptr<QpSmoother> qp_smoother_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_CODECS_VP8_LIBVPX_VP8_DECODER_H_
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/vp8/libvpx_vp8_encoder.cc b/third_party/libwebrtc/modules/video_coding/codecs/vp8/libvpx_vp8_encoder.cc
new file mode 100644
index 0000000000..cc84605ce7
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/vp8/libvpx_vp8_encoder.cc
@@ -0,0 +1,1438 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/codecs/vp8/libvpx_vp8_encoder.h"
+
+#include <string.h>
+
+#include <algorithm>
+#include <cstdint>
+#include <iterator>
+#include <memory>
+#include <string>
+#include <utility>
+#include <vector>
+
+#include "absl/algorithm/container.h"
+#include "api/scoped_refptr.h"
+#include "api/video/video_content_type.h"
+#include "api/video/video_frame_buffer.h"
+#include "api/video/video_timing.h"
+#include "api/video_codecs/scalability_mode.h"
+#include "api/video_codecs/vp8_temporal_layers.h"
+#include "api/video_codecs/vp8_temporal_layers_factory.h"
+#include "modules/video_coding/codecs/interface/common_constants.h"
+#include "modules/video_coding/codecs/vp8/include/vp8.h"
+#include "modules/video_coding/codecs/vp8/vp8_scalability.h"
+#include "modules/video_coding/include/video_error_codes.h"
+#include "modules/video_coding/svc/scalability_mode_util.h"
+#include "modules/video_coding/utility/simulcast_rate_allocator.h"
+#include "modules/video_coding/utility/simulcast_utility.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/experiments/field_trial_parser.h"
+#include "rtc_base/experiments/field_trial_units.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/trace_event.h"
+#include "system_wrappers/include/field_trial.h"
+#include "third_party/libyuv/include/libyuv/scale.h"
+#include "vpx/vp8cx.h"
+
+namespace webrtc {
+namespace {
+#if defined(WEBRTC_IOS)
+constexpr char kVP8IosMaxNumberOfThreadFieldTrial[] =
+ "WebRTC-VP8IosMaxNumberOfThread";
+constexpr char kVP8IosMaxNumberOfThreadFieldTrialParameter[] = "max_thread";
+#endif
+
+constexpr char kVp8ForcePartitionResilience[] =
+ "WebRTC-VP8-ForcePartitionResilience";
+
+// QP is obtained from VP8-bitstream for HW, so the QP corresponds to the
+// bitstream range of [0, 127] and not the user-level range of [0,63].
+constexpr int kLowVp8QpThreshold = 29;
+constexpr int kHighVp8QpThreshold = 95;
+
+constexpr int kTokenPartitions = VP8_ONE_TOKENPARTITION;
+constexpr uint32_t kVp832ByteAlign = 32u;
+
+constexpr int kRtpTicksPerSecond = 90000;
+constexpr int kRtpTicksPerMs = kRtpTicksPerSecond / 1000;
+
+// VP8 denoiser states.
+enum denoiserState : uint32_t {
+ kDenoiserOff,
+ kDenoiserOnYOnly,
+ kDenoiserOnYUV,
+ kDenoiserOnYUVAggressive,
+ // Adaptive mode defaults to kDenoiserOnYUV on key frame, but may switch
+ // to kDenoiserOnYUVAggressive based on a computed noise metric.
+ kDenoiserOnAdaptive
+};
+
+// Greatest common divisior
+int GCD(int a, int b) {
+ int c = a % b;
+ while (c != 0) {
+ a = b;
+ b = c;
+ c = a % b;
+ }
+ return b;
+}
+
+static_assert(Vp8EncoderConfig::TemporalLayerConfig::kMaxPeriodicity ==
+ VPX_TS_MAX_PERIODICITY,
+ "Vp8EncoderConfig::kMaxPeriodicity must be kept in sync with the "
+ "constant in libvpx.");
+static_assert(Vp8EncoderConfig::TemporalLayerConfig::kMaxLayers ==
+ VPX_TS_MAX_LAYERS,
+ "Vp8EncoderConfig::kMaxLayers must be kept in sync with the "
+ "constant in libvpx.");
+
+// Allow a newer value to override a current value only if the new value
+// is set.
+template <typename T>
+bool MaybeSetNewValue(const absl::optional<T>& new_value,
+ absl::optional<T>* base_value) {
+ if (new_value.has_value() && new_value != *base_value) {
+ *base_value = new_value;
+ return true;
+ } else {
+ return false;
+ }
+}
+
+// Adds configuration from `new_config` to `base_config`. Both configs consist
+// of optionals, and only optionals which are set in `new_config` can have
+// an effect. (That is, set values in `base_config` cannot be unset.)
+// Returns `true` iff any changes were made to `base_config`.
+bool MaybeExtendVp8EncoderConfig(const Vp8EncoderConfig& new_config,
+ Vp8EncoderConfig* base_config) {
+ bool changes_made = false;
+ changes_made |= MaybeSetNewValue(new_config.temporal_layer_config,
+ &base_config->temporal_layer_config);
+ changes_made |= MaybeSetNewValue(new_config.rc_target_bitrate,
+ &base_config->rc_target_bitrate);
+ changes_made |= MaybeSetNewValue(new_config.rc_max_quantizer,
+ &base_config->rc_max_quantizer);
+ changes_made |= MaybeSetNewValue(new_config.g_error_resilient,
+ &base_config->g_error_resilient);
+ return changes_made;
+}
+
+void ApplyVp8EncoderConfigToVpxConfig(const Vp8EncoderConfig& encoder_config,
+ vpx_codec_enc_cfg_t* vpx_config) {
+ if (encoder_config.temporal_layer_config.has_value()) {
+ const Vp8EncoderConfig::TemporalLayerConfig& ts_config =
+ encoder_config.temporal_layer_config.value();
+ vpx_config->ts_number_layers = ts_config.ts_number_layers;
+ std::copy(ts_config.ts_target_bitrate.begin(),
+ ts_config.ts_target_bitrate.end(),
+ std::begin(vpx_config->ts_target_bitrate));
+ std::copy(ts_config.ts_rate_decimator.begin(),
+ ts_config.ts_rate_decimator.end(),
+ std::begin(vpx_config->ts_rate_decimator));
+ vpx_config->ts_periodicity = ts_config.ts_periodicity;
+ std::copy(ts_config.ts_layer_id.begin(), ts_config.ts_layer_id.end(),
+ std::begin(vpx_config->ts_layer_id));
+ } else {
+ vpx_config->ts_number_layers = 1;
+ vpx_config->ts_rate_decimator[0] = 1;
+ vpx_config->ts_periodicity = 1;
+ vpx_config->ts_layer_id[0] = 0;
+ }
+
+ if (encoder_config.rc_target_bitrate.has_value()) {
+ vpx_config->rc_target_bitrate = encoder_config.rc_target_bitrate.value();
+ }
+
+ if (encoder_config.rc_max_quantizer.has_value()) {
+ vpx_config->rc_max_quantizer = encoder_config.rc_max_quantizer.value();
+ }
+
+ if (encoder_config.g_error_resilient.has_value()) {
+ vpx_config->g_error_resilient = encoder_config.g_error_resilient.value();
+ }
+}
+
+bool IsCompatibleVideoFrameBufferType(VideoFrameBuffer::Type left,
+ VideoFrameBuffer::Type right) {
+ if (left == VideoFrameBuffer::Type::kI420 ||
+ left == VideoFrameBuffer::Type::kI420A) {
+ // LibvpxVp8Encoder does not care about the alpha channel, I420A and I420
+ // are considered compatible.
+ return right == VideoFrameBuffer::Type::kI420 ||
+ right == VideoFrameBuffer::Type::kI420A;
+ }
+ return left == right;
+}
+
+void SetRawImagePlanes(vpx_image_t* raw_image, VideoFrameBuffer* buffer) {
+ switch (buffer->type()) {
+ case VideoFrameBuffer::Type::kI420:
+ case VideoFrameBuffer::Type::kI420A: {
+ const I420BufferInterface* i420_buffer = buffer->GetI420();
+ RTC_DCHECK(i420_buffer);
+ raw_image->planes[VPX_PLANE_Y] =
+ const_cast<uint8_t*>(i420_buffer->DataY());
+ raw_image->planes[VPX_PLANE_U] =
+ const_cast<uint8_t*>(i420_buffer->DataU());
+ raw_image->planes[VPX_PLANE_V] =
+ const_cast<uint8_t*>(i420_buffer->DataV());
+ raw_image->stride[VPX_PLANE_Y] = i420_buffer->StrideY();
+ raw_image->stride[VPX_PLANE_U] = i420_buffer->StrideU();
+ raw_image->stride[VPX_PLANE_V] = i420_buffer->StrideV();
+ break;
+ }
+ case VideoFrameBuffer::Type::kNV12: {
+ const NV12BufferInterface* nv12_buffer = buffer->GetNV12();
+ RTC_DCHECK(nv12_buffer);
+ raw_image->planes[VPX_PLANE_Y] =
+ const_cast<uint8_t*>(nv12_buffer->DataY());
+ raw_image->planes[VPX_PLANE_U] =
+ const_cast<uint8_t*>(nv12_buffer->DataUV());
+ raw_image->planes[VPX_PLANE_V] = raw_image->planes[VPX_PLANE_U] + 1;
+ raw_image->stride[VPX_PLANE_Y] = nv12_buffer->StrideY();
+ raw_image->stride[VPX_PLANE_U] = nv12_buffer->StrideUV();
+ raw_image->stride[VPX_PLANE_V] = nv12_buffer->StrideUV();
+ break;
+ }
+ default:
+ RTC_DCHECK_NOTREACHED();
+ }
+}
+
+} // namespace
+
+std::unique_ptr<VideoEncoder> VP8Encoder::Create() {
+ return std::make_unique<LibvpxVp8Encoder>(LibvpxInterface::Create(),
+ VP8Encoder::Settings());
+}
+
+std::unique_ptr<VideoEncoder> VP8Encoder::Create(
+ VP8Encoder::Settings settings) {
+ return std::make_unique<LibvpxVp8Encoder>(LibvpxInterface::Create(),
+ std::move(settings));
+}
+
+vpx_enc_frame_flags_t LibvpxVp8Encoder::EncodeFlags(
+ const Vp8FrameConfig& references) {
+ RTC_DCHECK(!references.drop_frame);
+
+ vpx_enc_frame_flags_t flags = 0;
+
+ if ((references.last_buffer_flags &
+ Vp8FrameConfig::BufferFlags::kReference) == 0)
+ flags |= VP8_EFLAG_NO_REF_LAST;
+ if ((references.last_buffer_flags & Vp8FrameConfig::BufferFlags::kUpdate) ==
+ 0)
+ flags |= VP8_EFLAG_NO_UPD_LAST;
+ if ((references.golden_buffer_flags &
+ Vp8FrameConfig::BufferFlags::kReference) == 0)
+ flags |= VP8_EFLAG_NO_REF_GF;
+ if ((references.golden_buffer_flags & Vp8FrameConfig::BufferFlags::kUpdate) ==
+ 0)
+ flags |= VP8_EFLAG_NO_UPD_GF;
+ if ((references.arf_buffer_flags & Vp8FrameConfig::BufferFlags::kReference) ==
+ 0)
+ flags |= VP8_EFLAG_NO_REF_ARF;
+ if ((references.arf_buffer_flags & Vp8FrameConfig::BufferFlags::kUpdate) == 0)
+ flags |= VP8_EFLAG_NO_UPD_ARF;
+ if (references.freeze_entropy)
+ flags |= VP8_EFLAG_NO_UPD_ENTROPY;
+
+ return flags;
+}
+
+LibvpxVp8Encoder::LibvpxVp8Encoder(std::unique_ptr<LibvpxInterface> interface,
+ VP8Encoder::Settings settings)
+ : libvpx_(std::move(interface)),
+ rate_control_settings_(RateControlSettings::ParseFromFieldTrials()),
+ frame_buffer_controller_factory_(
+ std::move(settings.frame_buffer_controller_factory)),
+ resolution_bitrate_limits_(std::move(settings.resolution_bitrate_limits)),
+ key_frame_request_(kMaxSimulcastStreams, false),
+ variable_framerate_experiment_(ParseVariableFramerateConfig(
+ "WebRTC-VP8VariableFramerateScreenshare")),
+ framerate_controller_(variable_framerate_experiment_.framerate_limit) {
+ // TODO(eladalon/ilnik): These reservations might be wasting memory.
+ // InitEncode() is resizing to the actual size, which might be smaller.
+ raw_images_.reserve(kMaxSimulcastStreams);
+ encoded_images_.reserve(kMaxSimulcastStreams);
+ send_stream_.reserve(kMaxSimulcastStreams);
+ cpu_speed_.assign(kMaxSimulcastStreams, cpu_speed_default_);
+ encoders_.reserve(kMaxSimulcastStreams);
+ vpx_configs_.reserve(kMaxSimulcastStreams);
+ config_overrides_.reserve(kMaxSimulcastStreams);
+ downsampling_factors_.reserve(kMaxSimulcastStreams);
+}
+
+LibvpxVp8Encoder::~LibvpxVp8Encoder() {
+ Release();
+}
+
+int LibvpxVp8Encoder::Release() {
+ int ret_val = WEBRTC_VIDEO_CODEC_OK;
+
+ encoded_images_.clear();
+
+ if (inited_) {
+ for (auto it = encoders_.rbegin(); it != encoders_.rend(); ++it) {
+ if (libvpx_->codec_destroy(&*it)) {
+ ret_val = WEBRTC_VIDEO_CODEC_MEMORY;
+ }
+ }
+ }
+ encoders_.clear();
+
+ vpx_configs_.clear();
+ config_overrides_.clear();
+ send_stream_.clear();
+ cpu_speed_.clear();
+
+ for (auto it = raw_images_.rbegin(); it != raw_images_.rend(); ++it) {
+ libvpx_->img_free(&*it);
+ }
+ raw_images_.clear();
+
+ frame_buffer_controller_.reset();
+ inited_ = false;
+ return ret_val;
+}
+
+void LibvpxVp8Encoder::SetRates(const RateControlParameters& parameters) {
+ if (!inited_) {
+ RTC_LOG(LS_WARNING) << "SetRates() while not initialize";
+ return;
+ }
+
+ if (encoders_[0].err) {
+ RTC_LOG(LS_WARNING) << "Encoder in error state.";
+ return;
+ }
+
+ if (parameters.framerate_fps < 1.0) {
+ RTC_LOG(LS_WARNING) << "Unsupported framerate (must be >= 1.0): "
+ << parameters.framerate_fps;
+ return;
+ }
+
+ if (parameters.bitrate.get_sum_bps() == 0) {
+ // Encoder paused, turn off all encoding.
+ const int num_streams = static_cast<size_t>(encoders_.size());
+ for (int i = 0; i < num_streams; ++i)
+ SetStreamState(false, i);
+ return;
+ }
+
+ codec_.maxFramerate = static_cast<uint32_t>(parameters.framerate_fps + 0.5);
+
+ if (encoders_.size() > 1) {
+ // If we have more than 1 stream, reduce the qp_max for the low resolution
+ // stream if frame rate is not too low. The trade-off with lower qp_max is
+ // possibly more dropped frames, so we only do this if the frame rate is
+ // above some threshold (base temporal layer is down to 1/4 for 3 layers).
+ // We may want to condition this on bitrate later.
+ if (rate_control_settings_.Vp8BoostBaseLayerQuality() &&
+ parameters.framerate_fps > 20.0) {
+ vpx_configs_[encoders_.size() - 1].rc_max_quantizer = 45;
+ } else {
+ // Go back to default value set in InitEncode.
+ vpx_configs_[encoders_.size() - 1].rc_max_quantizer = qp_max_;
+ }
+ }
+
+ for (size_t i = 0; i < encoders_.size(); ++i) {
+ const size_t stream_idx = encoders_.size() - 1 - i;
+
+ unsigned int target_bitrate_kbps =
+ parameters.bitrate.GetSpatialLayerSum(stream_idx) / 1000;
+
+ bool send_stream = target_bitrate_kbps > 0;
+ if (send_stream || encoders_.size() > 1)
+ SetStreamState(send_stream, stream_idx);
+
+ vpx_configs_[i].rc_target_bitrate = target_bitrate_kbps;
+ if (send_stream) {
+ frame_buffer_controller_->OnRatesUpdated(
+ stream_idx, parameters.bitrate.GetTemporalLayerAllocation(stream_idx),
+ static_cast<int>(parameters.framerate_fps + 0.5));
+ }
+
+ UpdateVpxConfiguration(stream_idx);
+
+ vpx_codec_err_t err =
+ libvpx_->codec_enc_config_set(&encoders_[i], &vpx_configs_[i]);
+ if (err != VPX_CODEC_OK) {
+ RTC_LOG(LS_WARNING) << "Error configuring codec, error code: " << err
+ << ", details: "
+ << libvpx_->codec_error_detail(&encoders_[i]);
+ }
+ }
+}
+
+void LibvpxVp8Encoder::OnPacketLossRateUpdate(float packet_loss_rate) {
+ // TODO(bugs.webrtc.org/10431): Replace condition by DCHECK.
+ if (frame_buffer_controller_) {
+ frame_buffer_controller_->OnPacketLossRateUpdate(packet_loss_rate);
+ }
+}
+
+void LibvpxVp8Encoder::OnRttUpdate(int64_t rtt_ms) {
+ // TODO(bugs.webrtc.org/10431): Replace condition by DCHECK.
+ if (frame_buffer_controller_) {
+ frame_buffer_controller_->OnRttUpdate(rtt_ms);
+ }
+}
+
+void LibvpxVp8Encoder::OnLossNotification(
+ const LossNotification& loss_notification) {
+ if (frame_buffer_controller_) {
+ frame_buffer_controller_->OnLossNotification(loss_notification);
+ }
+}
+
+void LibvpxVp8Encoder::SetStreamState(bool send_stream, int stream_idx) {
+ if (send_stream && !send_stream_[stream_idx]) {
+ // Need a key frame if we have not sent this stream before.
+ key_frame_request_[stream_idx] = true;
+ }
+ send_stream_[stream_idx] = send_stream;
+}
+
+void LibvpxVp8Encoder::SetFecControllerOverride(
+ FecControllerOverride* fec_controller_override) {
+ // TODO(bugs.webrtc.org/10769): Update downstream and remove ability to
+ // pass nullptr.
+ // RTC_DCHECK(fec_controller_override);
+ RTC_DCHECK(!fec_controller_override_);
+ fec_controller_override_ = fec_controller_override;
+}
+
+// TODO(eladalon): s/inst/codec_settings/g.
+int LibvpxVp8Encoder::InitEncode(const VideoCodec* inst,
+ const VideoEncoder::Settings& settings) {
+ if (inst == NULL) {
+ return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
+ }
+ if (inst->maxFramerate < 1) {
+ return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
+ }
+ // allow zero to represent an unspecified maxBitRate
+ if (inst->maxBitrate > 0 && inst->startBitrate > inst->maxBitrate) {
+ return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
+ }
+ if (inst->width < 1 || inst->height < 1) {
+ return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
+ }
+ if (settings.number_of_cores < 1) {
+ return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
+ }
+
+ if (absl::optional<ScalabilityMode> scalability_mode =
+ inst->GetScalabilityMode();
+ scalability_mode.has_value() &&
+ !VP8SupportsScalabilityMode(*scalability_mode)) {
+ return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
+ }
+
+ num_active_streams_ = 0;
+ for (int i = 0; i < inst->numberOfSimulcastStreams; ++i) {
+ if (inst->simulcastStream[i].active) {
+ ++num_active_streams_;
+ }
+ }
+ if (inst->numberOfSimulcastStreams == 0 && inst->active) {
+ num_active_streams_ = 1;
+ }
+
+ if (inst->VP8().automaticResizeOn && num_active_streams_ > 1) {
+ return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
+ }
+
+ // Use the previous pixel format to avoid extra image allocations.
+ vpx_img_fmt_t pixel_format =
+ raw_images_.empty() ? VPX_IMG_FMT_I420 : raw_images_[0].fmt;
+
+ int retVal = Release();
+ if (retVal < 0) {
+ return retVal;
+ }
+
+ int number_of_streams = SimulcastUtility::NumberOfSimulcastStreams(*inst);
+ if (number_of_streams > 1 &&
+ !SimulcastUtility::ValidSimulcastParameters(*inst, number_of_streams)) {
+ return WEBRTC_VIDEO_CODEC_ERR_SIMULCAST_PARAMETERS_NOT_SUPPORTED;
+ }
+
+ RTC_DCHECK(!frame_buffer_controller_);
+ if (frame_buffer_controller_factory_) {
+ frame_buffer_controller_ = frame_buffer_controller_factory_->Create(
+ *inst, settings, fec_controller_override_);
+ } else {
+ Vp8TemporalLayersFactory factory;
+ frame_buffer_controller_ =
+ factory.Create(*inst, settings, fec_controller_override_);
+ }
+ RTC_DCHECK(frame_buffer_controller_);
+
+ number_of_cores_ = settings.number_of_cores;
+ timestamp_ = 0;
+ codec_ = *inst;
+
+ // Code expects simulcastStream resolutions to be correct, make sure they are
+ // filled even when there are no simulcast layers.
+ if (codec_.numberOfSimulcastStreams == 0) {
+ codec_.simulcastStream[0].width = codec_.width;
+ codec_.simulcastStream[0].height = codec_.height;
+ }
+
+ encoded_images_.resize(number_of_streams);
+ encoders_.resize(number_of_streams);
+ vpx_configs_.resize(number_of_streams);
+ config_overrides_.resize(number_of_streams);
+ downsampling_factors_.resize(number_of_streams);
+ raw_images_.resize(number_of_streams);
+ send_stream_.resize(number_of_streams);
+ send_stream_[0] = true; // For non-simulcast case.
+ cpu_speed_.resize(number_of_streams);
+ std::fill(key_frame_request_.begin(), key_frame_request_.end(), false);
+
+ int idx = number_of_streams - 1;
+ for (int i = 0; i < (number_of_streams - 1); ++i, --idx) {
+ int gcd = GCD(inst->simulcastStream[idx].width,
+ inst->simulcastStream[idx - 1].width);
+ downsampling_factors_[i].num = inst->simulcastStream[idx].width / gcd;
+ downsampling_factors_[i].den = inst->simulcastStream[idx - 1].width / gcd;
+ send_stream_[i] = false;
+ }
+ if (number_of_streams > 1) {
+ send_stream_[number_of_streams - 1] = false;
+ downsampling_factors_[number_of_streams - 1].num = 1;
+ downsampling_factors_[number_of_streams - 1].den = 1;
+ }
+
+ // populate encoder configuration with default values
+ if (libvpx_->codec_enc_config_default(vpx_codec_vp8_cx(), &vpx_configs_[0],
+ 0)) {
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+ // setting the time base of the codec
+ vpx_configs_[0].g_timebase.num = 1;
+ vpx_configs_[0].g_timebase.den = kRtpTicksPerSecond;
+ vpx_configs_[0].g_lag_in_frames = 0; // 0- no frame lagging
+
+ // Set the error resilience mode for temporal layers (but not simulcast).
+ vpx_configs_[0].g_error_resilient =
+ (SimulcastUtility::NumberOfTemporalLayers(*inst, 0) > 1)
+ ? VPX_ERROR_RESILIENT_DEFAULT
+ : 0;
+
+ // Override the error resilience mode if this is not simulcast, but we are
+ // using temporal layers.
+ if (field_trial::IsEnabled(kVp8ForcePartitionResilience) &&
+ (number_of_streams == 1) &&
+ (SimulcastUtility::NumberOfTemporalLayers(*inst, 0) > 1)) {
+ RTC_LOG(LS_INFO) << "Overriding g_error_resilient from "
+ << vpx_configs_[0].g_error_resilient << " to "
+ << VPX_ERROR_RESILIENT_PARTITIONS;
+ vpx_configs_[0].g_error_resilient = VPX_ERROR_RESILIENT_PARTITIONS;
+ }
+
+ // rate control settings
+ vpx_configs_[0].rc_dropframe_thresh = FrameDropThreshold(0);
+ vpx_configs_[0].rc_end_usage = VPX_CBR;
+ vpx_configs_[0].g_pass = VPX_RC_ONE_PASS;
+ // Handle resizing outside of libvpx.
+ vpx_configs_[0].rc_resize_allowed = 0;
+ vpx_configs_[0].rc_min_quantizer =
+ codec_.mode == VideoCodecMode::kScreensharing ? 12 : 2;
+ if (inst->qpMax >= vpx_configs_[0].rc_min_quantizer) {
+ qp_max_ = inst->qpMax;
+ }
+ if (rate_control_settings_.LibvpxVp8QpMax()) {
+ qp_max_ = std::max(rate_control_settings_.LibvpxVp8QpMax().value(),
+ static_cast<int>(vpx_configs_[0].rc_min_quantizer));
+ }
+ vpx_configs_[0].rc_max_quantizer = qp_max_;
+ vpx_configs_[0].rc_undershoot_pct = 100;
+ vpx_configs_[0].rc_overshoot_pct = 15;
+ vpx_configs_[0].rc_buf_initial_sz = 500;
+ vpx_configs_[0].rc_buf_optimal_sz = 600;
+ vpx_configs_[0].rc_buf_sz = 1000;
+
+ // Set the maximum target size of any key-frame.
+ rc_max_intra_target_ = MaxIntraTarget(vpx_configs_[0].rc_buf_optimal_sz);
+
+ if (inst->VP8().keyFrameInterval > 0) {
+ vpx_configs_[0].kf_mode = VPX_KF_AUTO;
+ vpx_configs_[0].kf_max_dist = inst->VP8().keyFrameInterval;
+ } else {
+ vpx_configs_[0].kf_mode = VPX_KF_DISABLED;
+ }
+
+ // Allow the user to set the complexity for the base stream.
+ switch (inst->GetVideoEncoderComplexity()) {
+ case VideoCodecComplexity::kComplexityHigh:
+ cpu_speed_[0] = -5;
+ break;
+ case VideoCodecComplexity::kComplexityHigher:
+ cpu_speed_[0] = -4;
+ break;
+ case VideoCodecComplexity::kComplexityMax:
+ cpu_speed_[0] = -3;
+ break;
+ default:
+ cpu_speed_[0] = -6;
+ break;
+ }
+ cpu_speed_default_ = cpu_speed_[0];
+ // Set encoding complexity (cpu_speed) based on resolution and/or platform.
+ cpu_speed_[0] = GetCpuSpeed(inst->width, inst->height);
+ for (int i = 1; i < number_of_streams; ++i) {
+ cpu_speed_[i] =
+ GetCpuSpeed(inst->simulcastStream[number_of_streams - 1 - i].width,
+ inst->simulcastStream[number_of_streams - 1 - i].height);
+ }
+ vpx_configs_[0].g_w = inst->width;
+ vpx_configs_[0].g_h = inst->height;
+
+ // Determine number of threads based on the image size and #cores.
+ // TODO(fbarchard): Consider number of Simulcast layers.
+ vpx_configs_[0].g_threads = NumberOfThreads(
+ vpx_configs_[0].g_w, vpx_configs_[0].g_h, settings.number_of_cores);
+
+ // Creating a wrapper to the image - setting image data to NULL.
+ // Actual pointer will be set in encode. Setting align to 1, as it
+ // is meaningless (no memory allocation is done here).
+ libvpx_->img_wrap(&raw_images_[0], pixel_format, inst->width, inst->height, 1,
+ NULL);
+
+ // Note the order we use is different from webm, we have lowest resolution
+ // at position 0 and they have highest resolution at position 0.
+ const size_t stream_idx_cfg_0 = encoders_.size() - 1;
+ SimulcastRateAllocator init_allocator(codec_);
+ VideoBitrateAllocation allocation =
+ init_allocator.Allocate(VideoBitrateAllocationParameters(
+ inst->startBitrate * 1000, inst->maxFramerate));
+ std::vector<uint32_t> stream_bitrates;
+ for (int i = 0; i == 0 || i < inst->numberOfSimulcastStreams; ++i) {
+ uint32_t bitrate = allocation.GetSpatialLayerSum(i) / 1000;
+ stream_bitrates.push_back(bitrate);
+ }
+
+ vpx_configs_[0].rc_target_bitrate = stream_bitrates[stream_idx_cfg_0];
+ if (stream_bitrates[stream_idx_cfg_0] > 0) {
+ uint32_t maxFramerate =
+ inst->simulcastStream[stream_idx_cfg_0].maxFramerate;
+ if (!maxFramerate) {
+ maxFramerate = inst->maxFramerate;
+ }
+
+ frame_buffer_controller_->OnRatesUpdated(
+ stream_idx_cfg_0,
+ allocation.GetTemporalLayerAllocation(stream_idx_cfg_0), maxFramerate);
+ }
+ frame_buffer_controller_->SetQpLimits(stream_idx_cfg_0,
+ vpx_configs_[0].rc_min_quantizer,
+ vpx_configs_[0].rc_max_quantizer);
+ UpdateVpxConfiguration(stream_idx_cfg_0);
+ vpx_configs_[0].rc_dropframe_thresh = FrameDropThreshold(stream_idx_cfg_0);
+
+ for (size_t i = 1; i < encoders_.size(); ++i) {
+ const size_t stream_idx = encoders_.size() - 1 - i;
+ memcpy(&vpx_configs_[i], &vpx_configs_[0], sizeof(vpx_configs_[0]));
+
+ vpx_configs_[i].g_w = inst->simulcastStream[stream_idx].width;
+ vpx_configs_[i].g_h = inst->simulcastStream[stream_idx].height;
+
+ // Use 1 thread for lower resolutions.
+ vpx_configs_[i].g_threads = 1;
+
+ vpx_configs_[i].rc_dropframe_thresh = FrameDropThreshold(stream_idx);
+
+ // Setting alignment to 32 - as that ensures at least 16 for all
+ // planes (32 for Y, 16 for U,V). Libvpx sets the requested stride for
+ // the y plane, but only half of it to the u and v planes.
+ libvpx_->img_alloc(
+ &raw_images_[i], pixel_format, inst->simulcastStream[stream_idx].width,
+ inst->simulcastStream[stream_idx].height, kVp832ByteAlign);
+ SetStreamState(stream_bitrates[stream_idx] > 0, stream_idx);
+ vpx_configs_[i].rc_target_bitrate = stream_bitrates[stream_idx];
+ if (stream_bitrates[stream_idx] > 0) {
+ uint32_t maxFramerate = inst->simulcastStream[stream_idx].maxFramerate;
+ if (!maxFramerate) {
+ maxFramerate = inst->maxFramerate;
+ }
+ frame_buffer_controller_->OnRatesUpdated(
+ stream_idx, allocation.GetTemporalLayerAllocation(stream_idx),
+ maxFramerate);
+ }
+ frame_buffer_controller_->SetQpLimits(stream_idx,
+ vpx_configs_[i].rc_min_quantizer,
+ vpx_configs_[i].rc_max_quantizer);
+ UpdateVpxConfiguration(stream_idx);
+ }
+
+ return InitAndSetControlSettings();
+}
+
+int LibvpxVp8Encoder::GetCpuSpeed(int width, int height) {
+#if defined(WEBRTC_ARCH_ARM) || defined(WEBRTC_ARCH_ARM64) || \
+ defined(WEBRTC_ANDROID) || defined(WEBRTC_ARCH_MIPS)
+ // On mobile platform, use a lower speed setting for lower resolutions for
+ // CPUs with 4 or more cores.
+ RTC_DCHECK_GT(number_of_cores_, 0);
+ if (experimental_cpu_speed_config_arm_
+ .GetValue(width * height, number_of_cores_)
+ .has_value()) {
+ return experimental_cpu_speed_config_arm_
+ .GetValue(width * height, number_of_cores_)
+ .value();
+ }
+
+ if (number_of_cores_ <= 3)
+ return -12;
+
+ if (width * height <= 352 * 288)
+ return -8;
+ else if (width * height <= 640 * 480)
+ return -10;
+ else
+ return -12;
+#else
+ // For non-ARM, increase encoding complexity (i.e., use lower speed setting)
+ // if resolution is below CIF. Otherwise, keep the default/user setting
+ // (`cpu_speed_default_`) set on InitEncode via VP8().complexity.
+ if (width * height < 352 * 288)
+ return (cpu_speed_default_ < -4) ? -4 : cpu_speed_default_;
+ else
+ return cpu_speed_default_;
+#endif
+}
+
+int LibvpxVp8Encoder::NumberOfThreads(int width, int height, int cpus) {
+#if defined(WEBRTC_ANDROID) || defined(WEBRTC_ARCH_MIPS)
+ if (width * height >= 320 * 180) {
+ if (cpus >= 4) {
+ // 3 threads for CPUs with 4 and more cores since most of times only 4
+ // cores will be active.
+ return 3;
+ } else if (cpus == 3 || cpus == 2) {
+ return 2;
+ } else {
+ return 1;
+ }
+ }
+ return 1;
+#else
+#if defined(WEBRTC_IOS)
+ std::string trial_string =
+ field_trial::FindFullName(kVP8IosMaxNumberOfThreadFieldTrial);
+ FieldTrialParameter<int> max_thread_number(
+ kVP8IosMaxNumberOfThreadFieldTrialParameter, 0);
+ ParseFieldTrial({&max_thread_number}, trial_string);
+ if (max_thread_number.Get() > 0) {
+ if (width * height < 320 * 180) {
+ return 1; // Use single thread for small screens
+ }
+ // thread number must be less than or equal to the number of CPUs.
+ return std::min(cpus, max_thread_number.Get());
+ }
+#endif // defined(WEBRTC_IOS)
+ if (width * height >= 1920 * 1080 && cpus > 8) {
+ return 8; // 8 threads for 1080p on high perf machines.
+ } else if (width * height > 1280 * 960 && cpus >= 6) {
+ // 3 threads for 1080p.
+ return 3;
+ } else if (width * height > 640 * 480 && cpus >= 3) {
+ // Default 2 threads for qHD/HD, but allow 3 if core count is high enough,
+ // as this will allow more margin for high-core/low clock machines or if
+ // not built with highest optimization.
+ if (cpus >= 6) {
+ return 3;
+ }
+ return 2;
+ } else {
+ // 1 thread for VGA or less.
+ return 1;
+ }
+#endif
+}
+
+int LibvpxVp8Encoder::InitAndSetControlSettings() {
+ vpx_codec_flags_t flags = 0;
+ flags |= VPX_CODEC_USE_OUTPUT_PARTITION;
+
+ if (encoders_.size() > 1) {
+ int error = libvpx_->codec_enc_init_multi(
+ &encoders_[0], vpx_codec_vp8_cx(), &vpx_configs_[0], encoders_.size(),
+ flags, &downsampling_factors_[0]);
+ if (error) {
+ return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
+ }
+ } else {
+ if (libvpx_->codec_enc_init(&encoders_[0], vpx_codec_vp8_cx(),
+ &vpx_configs_[0], flags)) {
+ return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
+ }
+ }
+ // Enable denoising for the highest resolution stream, and for
+ // the second highest resolution if we are doing more than 2
+ // spatial layers/streams.
+ // TODO(holmer): Investigate possibility of adding a libvpx API
+ // for getting the denoised frame from the encoder and using that
+ // when encoding lower resolution streams. Would it work with the
+ // multi-res encoding feature?
+ denoiserState denoiser_state = kDenoiserOnYOnly;
+#if defined(WEBRTC_ARCH_ARM) || defined(WEBRTC_ARCH_ARM64) || \
+ defined(WEBRTC_ANDROID) || defined(WEBRTC_ARCH_MIPS)
+ denoiser_state = kDenoiserOnYOnly;
+#else
+ denoiser_state = kDenoiserOnAdaptive;
+#endif
+ libvpx_->codec_control(
+ &encoders_[0], VP8E_SET_NOISE_SENSITIVITY,
+ codec_.VP8()->denoisingOn ? denoiser_state : kDenoiserOff);
+ if (encoders_.size() > 2) {
+ libvpx_->codec_control(
+ &encoders_[1], VP8E_SET_NOISE_SENSITIVITY,
+ codec_.VP8()->denoisingOn ? denoiser_state : kDenoiserOff);
+ }
+ for (size_t i = 0; i < encoders_.size(); ++i) {
+ // Allow more screen content to be detected as static.
+ libvpx_->codec_control(
+ &(encoders_[i]), VP8E_SET_STATIC_THRESHOLD,
+ codec_.mode == VideoCodecMode::kScreensharing ? 100u : 1u);
+ libvpx_->codec_control(&(encoders_[i]), VP8E_SET_CPUUSED, cpu_speed_[i]);
+ libvpx_->codec_control(
+ &(encoders_[i]), VP8E_SET_TOKEN_PARTITIONS,
+ static_cast<vp8e_token_partitions>(kTokenPartitions));
+ libvpx_->codec_control(&(encoders_[i]), VP8E_SET_MAX_INTRA_BITRATE_PCT,
+ rc_max_intra_target_);
+ // VP8E_SET_SCREEN_CONTENT_MODE 2 = screen content with more aggressive
+ // rate control (drop frames on large target bitrate overshoot)
+ libvpx_->codec_control(
+ &(encoders_[i]), VP8E_SET_SCREEN_CONTENT_MODE,
+ codec_.mode == VideoCodecMode::kScreensharing ? 2u : 0u);
+ }
+ inited_ = true;
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+uint32_t LibvpxVp8Encoder::MaxIntraTarget(uint32_t optimalBuffersize) {
+ // Set max to the optimal buffer level (normalized by target BR),
+ // and scaled by a scalePar.
+ // Max target size = scalePar * optimalBufferSize * targetBR[Kbps].
+ // This values is presented in percentage of perFrameBw:
+ // perFrameBw = targetBR[Kbps] * 1000 / frameRate.
+ // The target in % is as follows:
+
+ float scalePar = 0.5;
+ uint32_t targetPct = optimalBuffersize * scalePar * codec_.maxFramerate / 10;
+
+ // Don't go below 3 times the per frame bandwidth.
+ const uint32_t minIntraTh = 300;
+ return (targetPct < minIntraTh) ? minIntraTh : targetPct;
+}
+
+uint32_t LibvpxVp8Encoder::FrameDropThreshold(size_t spatial_idx) const {
+ if (!codec_.GetFrameDropEnabled()) {
+ return 0;
+ }
+
+ // If temporal layers are used, they get to override the frame dropping
+ // setting, as eg. ScreenshareLayers does not work as intended with frame
+ // dropping on and DefaultTemporalLayers will have performance issues with
+ // frame dropping off.
+ RTC_DCHECK(frame_buffer_controller_);
+ RTC_DCHECK_LT(spatial_idx, frame_buffer_controller_->StreamCount());
+ return frame_buffer_controller_->SupportsEncoderFrameDropping(spatial_idx)
+ ? 30
+ : 0;
+}
+
+size_t LibvpxVp8Encoder::SteadyStateSize(int sid, int tid) {
+ const int encoder_id = encoders_.size() - 1 - sid;
+ size_t bitrate_bps;
+ float fps;
+ if ((SimulcastUtility::IsConferenceModeScreenshare(codec_) && sid == 0) ||
+ vpx_configs_[encoder_id].ts_number_layers <= 1) {
+ // In conference screenshare there's no defined per temporal layer bitrate
+ // and framerate.
+ bitrate_bps = vpx_configs_[encoder_id].rc_target_bitrate * 1000;
+ fps = codec_.maxFramerate;
+ } else {
+ bitrate_bps = vpx_configs_[encoder_id].ts_target_bitrate[tid] * 1000;
+ fps = codec_.maxFramerate /
+ fmax(vpx_configs_[encoder_id].ts_rate_decimator[tid], 1.0);
+ if (tid > 0) {
+ // Layer bitrate and fps are counted as a partial sums.
+ bitrate_bps -= vpx_configs_[encoder_id].ts_target_bitrate[tid - 1] * 1000;
+ fps = codec_.maxFramerate /
+ fmax(vpx_configs_[encoder_id].ts_rate_decimator[tid - 1], 1.0);
+ }
+ }
+
+ if (fps < 1e-9)
+ return 0;
+ return static_cast<size_t>(
+ bitrate_bps / (8 * fps) *
+ (100 -
+ variable_framerate_experiment_.steady_state_undershoot_percentage) /
+ 100 +
+ 0.5);
+}
+
+bool LibvpxVp8Encoder::UpdateVpxConfiguration(size_t stream_index) {
+ RTC_DCHECK(frame_buffer_controller_);
+
+ const size_t config_index = vpx_configs_.size() - 1 - stream_index;
+
+ RTC_DCHECK_LT(config_index, config_overrides_.size());
+ Vp8EncoderConfig* config = &config_overrides_[config_index];
+
+ const Vp8EncoderConfig new_config =
+ frame_buffer_controller_->UpdateConfiguration(stream_index);
+
+ if (new_config.reset_previous_configuration_overrides) {
+ *config = new_config;
+ return true;
+ }
+
+ const bool changes_made = MaybeExtendVp8EncoderConfig(new_config, config);
+
+ // Note that overrides must be applied even if they haven't changed.
+ RTC_DCHECK_LT(config_index, vpx_configs_.size());
+ vpx_codec_enc_cfg_t* vpx_config = &vpx_configs_[config_index];
+ ApplyVp8EncoderConfigToVpxConfig(*config, vpx_config);
+
+ return changes_made;
+}
+
+int LibvpxVp8Encoder::Encode(const VideoFrame& frame,
+ const std::vector<VideoFrameType>* frame_types) {
+ RTC_DCHECK_EQ(frame.width(), codec_.width);
+ RTC_DCHECK_EQ(frame.height(), codec_.height);
+
+ if (!inited_)
+ return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
+ if (encoded_complete_callback_ == NULL)
+ return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
+
+ bool key_frame_requested = false;
+ for (size_t i = 0; i < key_frame_request_.size() && i < send_stream_.size();
+ ++i) {
+ if (key_frame_request_[i] && send_stream_[i]) {
+ key_frame_requested = true;
+ break;
+ }
+ }
+ if (!key_frame_requested && frame_types) {
+ for (size_t i = 0; i < frame_types->size() && i < send_stream_.size();
+ ++i) {
+ if ((*frame_types)[i] == VideoFrameType::kVideoFrameKey &&
+ send_stream_[i]) {
+ key_frame_requested = true;
+ break;
+ }
+ }
+ }
+
+ if (frame.update_rect().IsEmpty() && num_steady_state_frames_ >= 3 &&
+ !key_frame_requested) {
+ if (variable_framerate_experiment_.enabled &&
+ framerate_controller_.DropFrame(frame.timestamp() / kRtpTicksPerMs)) {
+ return WEBRTC_VIDEO_CODEC_OK;
+ }
+ framerate_controller_.AddFrame(frame.timestamp() / kRtpTicksPerMs);
+ }
+
+ bool send_key_frame = key_frame_requested;
+ bool drop_frame = false;
+ bool retransmission_allowed = true;
+ Vp8FrameConfig tl_configs[kMaxSimulcastStreams];
+ for (size_t i = 0; i < encoders_.size(); ++i) {
+ tl_configs[i] =
+ frame_buffer_controller_->NextFrameConfig(i, frame.timestamp());
+ send_key_frame |= tl_configs[i].IntraFrame();
+ drop_frame |= tl_configs[i].drop_frame;
+ RTC_DCHECK(i == 0 ||
+ retransmission_allowed == tl_configs[i].retransmission_allowed);
+ retransmission_allowed = tl_configs[i].retransmission_allowed;
+ }
+
+ if (drop_frame && !send_key_frame) {
+ return WEBRTC_VIDEO_CODEC_OK;
+ }
+
+ vpx_enc_frame_flags_t flags[kMaxSimulcastStreams];
+ for (size_t i = 0; i < encoders_.size(); ++i) {
+ flags[i] = send_key_frame ? VPX_EFLAG_FORCE_KF : EncodeFlags(tl_configs[i]);
+ }
+
+ // Scale and map buffers and set `raw_images_` to hold pointers to the result.
+ // Because `raw_images_` are set to hold pointers to the prepared buffers, we
+ // need to keep these buffers alive through reference counting until after
+ // encoding is complete.
+ std::vector<rtc::scoped_refptr<VideoFrameBuffer>> prepared_buffers =
+ PrepareBuffers(frame.video_frame_buffer());
+ if (prepared_buffers.empty()) {
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+ struct CleanUpOnExit {
+ explicit CleanUpOnExit(
+ vpx_image_t* raw_image,
+ std::vector<rtc::scoped_refptr<VideoFrameBuffer>> prepared_buffers)
+ : raw_image_(raw_image),
+ prepared_buffers_(std::move(prepared_buffers)) {}
+ ~CleanUpOnExit() {
+ raw_image_->planes[VPX_PLANE_Y] = nullptr;
+ raw_image_->planes[VPX_PLANE_U] = nullptr;
+ raw_image_->planes[VPX_PLANE_V] = nullptr;
+ }
+ vpx_image_t* raw_image_;
+ std::vector<rtc::scoped_refptr<VideoFrameBuffer>> prepared_buffers_;
+ } clean_up_on_exit(&raw_images_[0], std::move(prepared_buffers));
+
+ if (send_key_frame) {
+ // Adapt the size of the key frame when in screenshare with 1 temporal
+ // layer.
+ if (encoders_.size() == 1 &&
+ codec_.mode == VideoCodecMode::kScreensharing &&
+ codec_.VP8()->numberOfTemporalLayers <= 1) {
+ const uint32_t forceKeyFrameIntraTh = 100;
+ libvpx_->codec_control(&(encoders_[0]), VP8E_SET_MAX_INTRA_BITRATE_PCT,
+ forceKeyFrameIntraTh);
+ }
+
+ std::fill(key_frame_request_.begin(), key_frame_request_.end(), false);
+ }
+
+ // Set the encoder frame flags and temporal layer_id for each spatial stream.
+ // Note that streams are defined starting from lowest resolution at
+ // position 0 to highest resolution at position |encoders_.size() - 1|,
+ // whereas `encoder_` is from highest to lowest resolution.
+ for (size_t i = 0; i < encoders_.size(); ++i) {
+ const size_t stream_idx = encoders_.size() - 1 - i;
+
+ if (UpdateVpxConfiguration(stream_idx)) {
+ if (libvpx_->codec_enc_config_set(&encoders_[i], &vpx_configs_[i]))
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+
+ libvpx_->codec_control(&encoders_[i], VP8E_SET_FRAME_FLAGS,
+ static_cast<int>(flags[stream_idx]));
+ libvpx_->codec_control(&encoders_[i], VP8E_SET_TEMPORAL_LAYER_ID,
+ tl_configs[i].encoder_layer_id);
+ }
+ // TODO(holmer): Ideally the duration should be the timestamp diff of this
+ // frame and the next frame to be encoded, which we don't have. Instead we
+ // would like to use the duration of the previous frame. Unfortunately the
+ // rate control seems to be off with that setup. Using the average input
+ // frame rate to calculate an average duration for now.
+ RTC_DCHECK_GT(codec_.maxFramerate, 0);
+ uint32_t duration = kRtpTicksPerSecond / codec_.maxFramerate;
+
+ int error = WEBRTC_VIDEO_CODEC_OK;
+ int num_tries = 0;
+ // If the first try returns WEBRTC_VIDEO_CODEC_TARGET_BITRATE_OVERSHOOT
+ // the frame must be reencoded with the same parameters again because
+ // target bitrate is exceeded and encoder state has been reset.
+ while (num_tries == 0 ||
+ (num_tries == 1 &&
+ error == WEBRTC_VIDEO_CODEC_TARGET_BITRATE_OVERSHOOT)) {
+ ++num_tries;
+ // Note we must pass 0 for `flags` field in encode call below since they are
+ // set above in `libvpx_interface_->vpx_codec_control_` function for each
+ // encoder/spatial layer.
+ error = libvpx_->codec_encode(&encoders_[0], &raw_images_[0], timestamp_,
+ duration, 0, VPX_DL_REALTIME);
+ // Reset specific intra frame thresholds, following the key frame.
+ if (send_key_frame) {
+ libvpx_->codec_control(&(encoders_[0]), VP8E_SET_MAX_INTRA_BITRATE_PCT,
+ rc_max_intra_target_);
+ }
+ if (error)
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ // Examines frame timestamps only.
+ error = GetEncodedPartitions(frame, retransmission_allowed);
+ }
+ // TODO(sprang): Shouldn't we use the frame timestamp instead?
+ timestamp_ += duration;
+ return error;
+}
+
+void LibvpxVp8Encoder::PopulateCodecSpecific(CodecSpecificInfo* codec_specific,
+ const vpx_codec_cx_pkt_t& pkt,
+ int stream_idx,
+ int encoder_idx,
+ uint32_t timestamp) {
+ RTC_DCHECK(codec_specific);
+ codec_specific->codecType = kVideoCodecVP8;
+ codec_specific->codecSpecific.VP8.keyIdx =
+ kNoKeyIdx; // TODO(hlundin) populate this
+ codec_specific->codecSpecific.VP8.nonReference =
+ (pkt.data.frame.flags & VPX_FRAME_IS_DROPPABLE) != 0;
+
+ int qp = 0;
+ vpx_codec_control(&encoders_[encoder_idx], VP8E_GET_LAST_QUANTIZER_64, &qp);
+ bool is_keyframe = (pkt.data.frame.flags & VPX_FRAME_IS_KEY) != 0;
+ frame_buffer_controller_->OnEncodeDone(stream_idx, timestamp,
+ encoded_images_[encoder_idx].size(),
+ is_keyframe, qp, codec_specific);
+ if (is_keyframe && codec_specific->template_structure != absl::nullopt) {
+ // Number of resolutions must match number of spatial layers, VP8 structures
+ // expected to use single spatial layer. Templates must be ordered by
+ // spatial_id, so assumption there is exactly one spatial layer is same as
+ // assumption last template uses spatial_id = 0.
+ // This check catches potential scenario where template_structure is shared
+ // across multiple vp8 streams and they are distinguished using spatial_id.
+ // Assigning single resolution doesn't support such scenario, i.e. assumes
+ // vp8 simulcast is sent using multiple ssrcs.
+ RTC_DCHECK(!codec_specific->template_structure->templates.empty());
+ RTC_DCHECK_EQ(
+ codec_specific->template_structure->templates.back().spatial_id, 0);
+ codec_specific->template_structure->resolutions = {
+ RenderResolution(pkt.data.frame.width[0], pkt.data.frame.height[0])};
+ }
+ switch (vpx_configs_[encoder_idx].ts_number_layers) {
+ case 1:
+ codec_specific->scalability_mode = ScalabilityMode::kL1T1;
+ break;
+ case 2:
+ codec_specific->scalability_mode = ScalabilityMode::kL1T2;
+ break;
+ case 3:
+ codec_specific->scalability_mode = ScalabilityMode::kL1T3;
+ break;
+ }
+}
+
+int LibvpxVp8Encoder::GetEncodedPartitions(const VideoFrame& input_image,
+ bool retransmission_allowed) {
+ int stream_idx = static_cast<int>(encoders_.size()) - 1;
+ int result = WEBRTC_VIDEO_CODEC_OK;
+ for (size_t encoder_idx = 0; encoder_idx < encoders_.size();
+ ++encoder_idx, --stream_idx) {
+ vpx_codec_iter_t iter = NULL;
+ encoded_images_[encoder_idx].set_size(0);
+ encoded_images_[encoder_idx]._frameType = VideoFrameType::kVideoFrameDelta;
+ CodecSpecificInfo codec_specific;
+ const vpx_codec_cx_pkt_t* pkt = NULL;
+
+ size_t encoded_size = 0;
+ while ((pkt = libvpx_->codec_get_cx_data(&encoders_[encoder_idx], &iter)) !=
+ NULL) {
+ if (pkt->kind == VPX_CODEC_CX_FRAME_PKT) {
+ encoded_size += pkt->data.frame.sz;
+ }
+ }
+
+ auto buffer = EncodedImageBuffer::Create(encoded_size);
+
+ iter = NULL;
+ size_t encoded_pos = 0;
+ while ((pkt = libvpx_->codec_get_cx_data(&encoders_[encoder_idx], &iter)) !=
+ NULL) {
+ switch (pkt->kind) {
+ case VPX_CODEC_CX_FRAME_PKT: {
+ RTC_CHECK_LE(encoded_pos + pkt->data.frame.sz, buffer->size());
+ memcpy(&buffer->data()[encoded_pos], pkt->data.frame.buf,
+ pkt->data.frame.sz);
+ encoded_pos += pkt->data.frame.sz;
+ break;
+ }
+ default:
+ break;
+ }
+ // End of frame
+ if ((pkt->data.frame.flags & VPX_FRAME_IS_FRAGMENT) == 0) {
+ // check if encoded frame is a key frame
+ if (pkt->data.frame.flags & VPX_FRAME_IS_KEY) {
+ encoded_images_[encoder_idx]._frameType =
+ VideoFrameType::kVideoFrameKey;
+ }
+ encoded_images_[encoder_idx].SetEncodedData(buffer);
+ encoded_images_[encoder_idx].set_size(encoded_pos);
+ encoded_images_[encoder_idx].SetSpatialIndex(stream_idx);
+ PopulateCodecSpecific(&codec_specific, *pkt, stream_idx, encoder_idx,
+ input_image.timestamp());
+ if (codec_specific.codecSpecific.VP8.temporalIdx != kNoTemporalIdx) {
+ encoded_images_[encoder_idx].SetTemporalIndex(
+ codec_specific.codecSpecific.VP8.temporalIdx);
+ }
+ break;
+ }
+ }
+ encoded_images_[encoder_idx].SetTimestamp(input_image.timestamp());
+ encoded_images_[encoder_idx].SetColorSpace(input_image.color_space());
+ encoded_images_[encoder_idx].SetRetransmissionAllowed(
+ retransmission_allowed);
+
+ if (send_stream_[stream_idx]) {
+ if (encoded_images_[encoder_idx].size() > 0) {
+ TRACE_COUNTER_ID1("webrtc", "EncodedFrameSize", encoder_idx,
+ encoded_images_[encoder_idx].size());
+ encoded_images_[encoder_idx]._encodedHeight =
+ codec_.simulcastStream[stream_idx].height;
+ encoded_images_[encoder_idx]._encodedWidth =
+ codec_.simulcastStream[stream_idx].width;
+ int qp_128 = -1;
+ libvpx_->codec_control(&encoders_[encoder_idx], VP8E_GET_LAST_QUANTIZER,
+ &qp_128);
+ encoded_images_[encoder_idx].qp_ = qp_128;
+ encoded_complete_callback_->OnEncodedImage(encoded_images_[encoder_idx],
+ &codec_specific);
+ const size_t steady_state_size = SteadyStateSize(
+ stream_idx, codec_specific.codecSpecific.VP8.temporalIdx);
+ if (qp_128 > variable_framerate_experiment_.steady_state_qp ||
+ encoded_images_[encoder_idx].size() > steady_state_size) {
+ num_steady_state_frames_ = 0;
+ } else {
+ ++num_steady_state_frames_;
+ }
+ } else if (!frame_buffer_controller_->SupportsEncoderFrameDropping(
+ stream_idx)) {
+ result = WEBRTC_VIDEO_CODEC_TARGET_BITRATE_OVERSHOOT;
+ if (encoded_images_[encoder_idx].size() == 0) {
+ // Dropped frame that will be re-encoded.
+ frame_buffer_controller_->OnFrameDropped(stream_idx,
+ input_image.timestamp());
+ }
+ }
+ }
+ }
+ return result;
+}
+
+VideoEncoder::EncoderInfo LibvpxVp8Encoder::GetEncoderInfo() const {
+ EncoderInfo info;
+ info.supports_native_handle = false;
+ info.implementation_name = "libvpx";
+ info.has_trusted_rate_controller =
+ rate_control_settings_.LibvpxVp8TrustedRateController();
+ info.is_hardware_accelerated = false;
+ info.supports_simulcast = true;
+ if (!resolution_bitrate_limits_.empty()) {
+ info.resolution_bitrate_limits = resolution_bitrate_limits_;
+ }
+ if (encoder_info_override_.requested_resolution_alignment()) {
+ info.requested_resolution_alignment =
+ *encoder_info_override_.requested_resolution_alignment();
+ info.apply_alignment_to_all_simulcast_layers =
+ encoder_info_override_.apply_alignment_to_all_simulcast_layers();
+ }
+ if (!encoder_info_override_.resolution_bitrate_limits().empty()) {
+ info.resolution_bitrate_limits =
+ encoder_info_override_.resolution_bitrate_limits();
+ }
+
+ const bool enable_scaling =
+ num_active_streams_ == 1 &&
+ (vpx_configs_.empty() || vpx_configs_[0].rc_dropframe_thresh > 0) &&
+ codec_.VP8().automaticResizeOn;
+
+ info.scaling_settings = enable_scaling
+ ? VideoEncoder::ScalingSettings(
+ kLowVp8QpThreshold, kHighVp8QpThreshold)
+ : VideoEncoder::ScalingSettings::kOff;
+ if (rate_control_settings_.LibvpxVp8MinPixels()) {
+ info.scaling_settings.min_pixels_per_frame =
+ rate_control_settings_.LibvpxVp8MinPixels().value();
+ }
+ info.preferred_pixel_formats = {VideoFrameBuffer::Type::kI420,
+ VideoFrameBuffer::Type::kNV12};
+
+ if (inited_) {
+ // `encoder_idx` is libvpx index where 0 is highest resolution.
+ // `si` is simulcast index, where 0 is lowest resolution.
+ for (size_t si = 0, encoder_idx = encoders_.size() - 1;
+ si < encoders_.size(); ++si, --encoder_idx) {
+ info.fps_allocation[si].clear();
+ if ((codec_.numberOfSimulcastStreams > si &&
+ !codec_.simulcastStream[si].active) ||
+ (si == 0 && SimulcastUtility::IsConferenceModeScreenshare(codec_))) {
+ // No defined frame rate fractions if not active or if using
+ // ScreenshareLayers, leave vector empty and continue;
+ continue;
+ }
+ if (vpx_configs_[encoder_idx].ts_number_layers <= 1) {
+ info.fps_allocation[si].push_back(EncoderInfo::kMaxFramerateFraction);
+ } else {
+ for (size_t ti = 0; ti < vpx_configs_[encoder_idx].ts_number_layers;
+ ++ti) {
+ RTC_DCHECK_GT(vpx_configs_[encoder_idx].ts_rate_decimator[ti], 0);
+ info.fps_allocation[si].push_back(rtc::saturated_cast<uint8_t>(
+ EncoderInfo::kMaxFramerateFraction /
+ vpx_configs_[encoder_idx].ts_rate_decimator[ti] +
+ 0.5));
+ }
+ }
+ }
+ }
+
+ return info;
+}
+
+int LibvpxVp8Encoder::RegisterEncodeCompleteCallback(
+ EncodedImageCallback* callback) {
+ encoded_complete_callback_ = callback;
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+void LibvpxVp8Encoder::MaybeUpdatePixelFormat(vpx_img_fmt fmt) {
+ RTC_DCHECK(!raw_images_.empty());
+ if (raw_images_[0].fmt == fmt) {
+ RTC_DCHECK(std::all_of(
+ std::next(raw_images_.begin()), raw_images_.end(),
+ [fmt](const vpx_image_t& raw_img) { return raw_img.fmt == fmt; }))
+ << "Not all raw images had the right format!";
+ return;
+ }
+ RTC_LOG(LS_INFO) << "Updating vp8 encoder pixel format to "
+ << (fmt == VPX_IMG_FMT_NV12 ? "NV12" : "I420");
+ for (size_t i = 0; i < raw_images_.size(); ++i) {
+ vpx_image_t& img = raw_images_[i];
+ auto d_w = img.d_w;
+ auto d_h = img.d_h;
+ libvpx_->img_free(&img);
+ // First image is wrapping the input frame, the rest are allocated.
+ if (i == 0) {
+ libvpx_->img_wrap(&img, fmt, d_w, d_h, 1, NULL);
+ } else {
+ libvpx_->img_alloc(&img, fmt, d_w, d_h, kVp832ByteAlign);
+ }
+ }
+}
+
+std::vector<rtc::scoped_refptr<VideoFrameBuffer>>
+LibvpxVp8Encoder::PrepareBuffers(rtc::scoped_refptr<VideoFrameBuffer> buffer) {
+ RTC_DCHECK_EQ(buffer->width(), raw_images_[0].d_w);
+ RTC_DCHECK_EQ(buffer->height(), raw_images_[0].d_h);
+ absl::InlinedVector<VideoFrameBuffer::Type, kMaxPreferredPixelFormats>
+ supported_formats = {VideoFrameBuffer::Type::kI420,
+ VideoFrameBuffer::Type::kNV12};
+
+ rtc::scoped_refptr<VideoFrameBuffer> mapped_buffer;
+ if (buffer->type() != VideoFrameBuffer::Type::kNative) {
+ // `buffer` is already mapped.
+ mapped_buffer = buffer;
+ } else {
+ // Attempt to map to one of the supported formats.
+ mapped_buffer = buffer->GetMappedFrameBuffer(supported_formats);
+ }
+ if (!mapped_buffer ||
+ (absl::c_find(supported_formats, mapped_buffer->type()) ==
+ supported_formats.end() &&
+ mapped_buffer->type() != VideoFrameBuffer::Type::kI420A)) {
+ // Unknown pixel format or unable to map, convert to I420 and prepare that
+ // buffer instead to ensure Scale() is safe to use.
+ auto converted_buffer = buffer->ToI420();
+ if (!converted_buffer) {
+ RTC_LOG(LS_ERROR) << "Failed to convert "
+ << VideoFrameBufferTypeToString(buffer->type())
+ << " image to I420. Can't encode frame.";
+ return {};
+ }
+ RTC_CHECK(converted_buffer->type() == VideoFrameBuffer::Type::kI420 ||
+ converted_buffer->type() == VideoFrameBuffer::Type::kI420A);
+
+ // Because `buffer` had to be converted, use `converted_buffer` instead...
+ buffer = mapped_buffer = converted_buffer;
+ }
+
+ // Maybe update pixel format.
+ absl::InlinedVector<VideoFrameBuffer::Type, kMaxPreferredPixelFormats>
+ mapped_type = {mapped_buffer->type()};
+ switch (mapped_buffer->type()) {
+ case VideoFrameBuffer::Type::kI420:
+ case VideoFrameBuffer::Type::kI420A:
+ MaybeUpdatePixelFormat(VPX_IMG_FMT_I420);
+ break;
+ case VideoFrameBuffer::Type::kNV12:
+ MaybeUpdatePixelFormat(VPX_IMG_FMT_NV12);
+ break;
+ default:
+ RTC_DCHECK_NOTREACHED();
+ }
+
+ // Prepare `raw_images_` from `mapped_buffer` and, if simulcast, scaled
+ // versions of `buffer`.
+ std::vector<rtc::scoped_refptr<VideoFrameBuffer>> prepared_buffers;
+ SetRawImagePlanes(&raw_images_[0], mapped_buffer.get());
+ prepared_buffers.push_back(mapped_buffer);
+ for (size_t i = 1; i < encoders_.size(); ++i) {
+ // Native buffers should implement optimized scaling and is the preferred
+ // buffer to scale. But if the buffer isn't native, it should be cheaper to
+ // scale from the previously prepared buffer which is smaller than `buffer`.
+ VideoFrameBuffer* buffer_to_scale =
+ buffer->type() == VideoFrameBuffer::Type::kNative
+ ? buffer.get()
+ : prepared_buffers.back().get();
+
+ auto scaled_buffer =
+ buffer_to_scale->Scale(raw_images_[i].d_w, raw_images_[i].d_h);
+ if (scaled_buffer->type() == VideoFrameBuffer::Type::kNative) {
+ auto mapped_scaled_buffer =
+ scaled_buffer->GetMappedFrameBuffer(mapped_type);
+ RTC_DCHECK(mapped_scaled_buffer) << "Unable to map the scaled buffer.";
+ if (!mapped_scaled_buffer) {
+ RTC_LOG(LS_ERROR) << "Failed to map scaled "
+ << VideoFrameBufferTypeToString(scaled_buffer->type())
+ << " image to "
+ << VideoFrameBufferTypeToString(mapped_buffer->type())
+ << ". Can't encode frame.";
+ return {};
+ }
+ scaled_buffer = mapped_scaled_buffer;
+ }
+ if (!IsCompatibleVideoFrameBufferType(scaled_buffer->type(),
+ mapped_buffer->type())) {
+ RTC_LOG(LS_ERROR) << "When scaling "
+ << VideoFrameBufferTypeToString(buffer_to_scale->type())
+ << ", the image was unexpectedly converted to "
+ << VideoFrameBufferTypeToString(scaled_buffer->type())
+ << " instead of "
+ << VideoFrameBufferTypeToString(mapped_buffer->type())
+ << ". Can't encode frame.";
+ RTC_DCHECK_NOTREACHED()
+ << "Scaled buffer type "
+ << VideoFrameBufferTypeToString(scaled_buffer->type())
+ << " is not compatible with mapped buffer type "
+ << VideoFrameBufferTypeToString(mapped_buffer->type());
+ return {};
+ }
+ SetRawImagePlanes(&raw_images_[i], scaled_buffer.get());
+ prepared_buffers.push_back(scaled_buffer);
+ }
+ return prepared_buffers;
+}
+
+// static
+LibvpxVp8Encoder::VariableFramerateExperiment
+LibvpxVp8Encoder::ParseVariableFramerateConfig(std::string group_name) {
+ FieldTrialFlag disabled = FieldTrialFlag("Disabled");
+ FieldTrialParameter<double> framerate_limit("min_fps", 5.0);
+ FieldTrialParameter<int> qp("min_qp", 15);
+ FieldTrialParameter<int> undershoot_percentage("undershoot", 30);
+ ParseFieldTrial({&disabled, &framerate_limit, &qp, &undershoot_percentage},
+ field_trial::FindFullName(group_name));
+ VariableFramerateExperiment config;
+ config.enabled = !disabled.Get();
+ config.framerate_limit = framerate_limit.Get();
+ config.steady_state_qp = qp.Get();
+ config.steady_state_undershoot_percentage = undershoot_percentage.Get();
+
+ return config;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/vp8/libvpx_vp8_encoder.h b/third_party/libwebrtc/modules/video_coding/codecs/vp8/libvpx_vp8_encoder.h
new file mode 100644
index 0000000000..74477eac7e
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/vp8/libvpx_vp8_encoder.h
@@ -0,0 +1,159 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_CODECS_VP8_LIBVPX_VP8_ENCODER_H_
+#define MODULES_VIDEO_CODING_CODECS_VP8_LIBVPX_VP8_ENCODER_H_
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "api/fec_controller_override.h"
+#include "api/video/encoded_image.h"
+#include "api/video/video_frame.h"
+#include "api/video_codecs/video_encoder.h"
+#include "api/video_codecs/vp8_frame_buffer_controller.h"
+#include "api/video_codecs/vp8_frame_config.h"
+#include "modules/video_coding/codecs/interface/libvpx_interface.h"
+#include "modules/video_coding/codecs/vp8/include/vp8.h"
+#include "modules/video_coding/include/video_codec_interface.h"
+#include "modules/video_coding/utility/framerate_controller_deprecated.h"
+#include "modules/video_coding/utility/vp8_constants.h"
+#include "rtc_base/experiments/cpu_speed_experiment.h"
+#include "rtc_base/experiments/encoder_info_settings.h"
+#include "rtc_base/experiments/rate_control_settings.h"
+#include "vpx/vp8cx.h"
+#include "vpx/vpx_encoder.h"
+
+namespace webrtc {
+
+class LibvpxVp8Encoder : public VideoEncoder {
+ public:
+ LibvpxVp8Encoder(std::unique_ptr<LibvpxInterface> interface,
+ VP8Encoder::Settings settings);
+ ~LibvpxVp8Encoder() override;
+
+ int Release() override;
+
+ void SetFecControllerOverride(
+ FecControllerOverride* fec_controller_override) override;
+
+ int InitEncode(const VideoCodec* codec_settings,
+ const VideoEncoder::Settings& settings) override;
+
+ int Encode(const VideoFrame& input_image,
+ const std::vector<VideoFrameType>* frame_types) override;
+
+ int RegisterEncodeCompleteCallback(EncodedImageCallback* callback) override;
+
+ void SetRates(const RateControlParameters& parameters) override;
+
+ void OnPacketLossRateUpdate(float packet_loss_rate) override;
+
+ void OnRttUpdate(int64_t rtt_ms) override;
+
+ void OnLossNotification(const LossNotification& loss_notification) override;
+
+ EncoderInfo GetEncoderInfo() const override;
+
+ static vpx_enc_frame_flags_t EncodeFlags(const Vp8FrameConfig& references);
+
+ private:
+ // Get the cpu_speed setting for encoder based on resolution and/or platform.
+ int GetCpuSpeed(int width, int height);
+
+ // Determine number of encoder threads to use.
+ int NumberOfThreads(int width, int height, int number_of_cores);
+
+ // Call encoder initialize function and set control settings.
+ int InitAndSetControlSettings();
+
+ void PopulateCodecSpecific(CodecSpecificInfo* codec_specific,
+ const vpx_codec_cx_pkt& pkt,
+ int stream_idx,
+ int encoder_idx,
+ uint32_t timestamp);
+
+ int GetEncodedPartitions(const VideoFrame& input_image,
+ bool retransmission_allowed);
+
+ // Set the stream state for stream `stream_idx`.
+ void SetStreamState(bool send_stream, int stream_idx);
+
+ uint32_t MaxIntraTarget(uint32_t optimal_buffer_size);
+
+ uint32_t FrameDropThreshold(size_t spatial_idx) const;
+
+ size_t SteadyStateSize(int sid, int tid);
+
+ bool UpdateVpxConfiguration(size_t stream_index);
+
+ void MaybeUpdatePixelFormat(vpx_img_fmt fmt);
+ // Prepares `raw_image_` to reference image data of `buffer`, or of mapped or
+ // scaled versions of `buffer`. Returns a list of buffers that got referenced
+ // as a result, allowing the caller to keep references to them until after
+ // encoding has finished. On failure to convert the buffer, an empty list is
+ // returned.
+ std::vector<rtc::scoped_refptr<VideoFrameBuffer>> PrepareBuffers(
+ rtc::scoped_refptr<VideoFrameBuffer> buffer);
+
+ const std::unique_ptr<LibvpxInterface> libvpx_;
+
+ const CpuSpeedExperiment experimental_cpu_speed_config_arm_;
+ const RateControlSettings rate_control_settings_;
+
+ EncodedImageCallback* encoded_complete_callback_ = nullptr;
+ VideoCodec codec_;
+ bool inited_ = false;
+ int64_t timestamp_ = 0;
+ int qp_max_ = 56;
+ int cpu_speed_default_ = -6;
+ int number_of_cores_ = 0;
+ uint32_t rc_max_intra_target_ = 0;
+ int num_active_streams_ = 0;
+ const std::unique_ptr<Vp8FrameBufferControllerFactory>
+ frame_buffer_controller_factory_;
+ std::unique_ptr<Vp8FrameBufferController> frame_buffer_controller_;
+ const std::vector<VideoEncoder::ResolutionBitrateLimits>
+ resolution_bitrate_limits_;
+ std::vector<bool> key_frame_request_;
+ std::vector<bool> send_stream_;
+ std::vector<int> cpu_speed_;
+ std::vector<vpx_image_t> raw_images_;
+ std::vector<EncodedImage> encoded_images_;
+ std::vector<vpx_codec_ctx_t> encoders_;
+ std::vector<vpx_codec_enc_cfg_t> vpx_configs_;
+ std::vector<Vp8EncoderConfig> config_overrides_;
+ std::vector<vpx_rational_t> downsampling_factors_;
+
+ // Variable frame-rate screencast related fields and methods.
+ const struct VariableFramerateExperiment {
+ bool enabled = false;
+ // Framerate is limited to this value in steady state.
+ float framerate_limit = 5.0;
+ // This qp or below is considered a steady state.
+ int steady_state_qp = kVp8SteadyStateQpThreshold;
+ // Frames of at least this percentage below ideal for configured bitrate are
+ // considered in a steady state.
+ int steady_state_undershoot_percentage = 30;
+ } variable_framerate_experiment_;
+ static VariableFramerateExperiment ParseVariableFramerateConfig(
+ std::string group_name);
+ FramerateControllerDeprecated framerate_controller_;
+ int num_steady_state_frames_ = 0;
+
+ FecControllerOverride* fec_controller_override_ = nullptr;
+
+ const LibvpxVp8EncoderInfoSettings encoder_info_override_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_CODECS_VP8_LIBVPX_VP8_ENCODER_H_
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/vp8/libvpx_vp8_simulcast_test.cc b/third_party/libwebrtc/modules/video_coding/codecs/vp8/libvpx_vp8_simulcast_test.cc
new file mode 100644
index 0000000000..4ca3de20d5
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/vp8/libvpx_vp8_simulcast_test.cc
@@ -0,0 +1,112 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <memory>
+
+#include "api/test/create_simulcast_test_fixture.h"
+#include "api/test/simulcast_test_fixture.h"
+#include "api/test/video/function_video_decoder_factory.h"
+#include "api/test/video/function_video_encoder_factory.h"
+#include "modules/video_coding/codecs/vp8/include/vp8.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace test {
+
+namespace {
+std::unique_ptr<SimulcastTestFixture> CreateSpecificSimulcastTestFixture() {
+ std::unique_ptr<VideoEncoderFactory> encoder_factory =
+ std::make_unique<FunctionVideoEncoderFactory>(
+ []() { return VP8Encoder::Create(); });
+ std::unique_ptr<VideoDecoderFactory> decoder_factory =
+ std::make_unique<FunctionVideoDecoderFactory>(
+ []() { return VP8Decoder::Create(); });
+ return CreateSimulcastTestFixture(std::move(encoder_factory),
+ std::move(decoder_factory),
+ SdpVideoFormat("VP8"));
+}
+} // namespace
+
+TEST(LibvpxVp8SimulcastTest, TestKeyFrameRequestsOnAllStreams) {
+ auto fixture = CreateSpecificSimulcastTestFixture();
+ fixture->TestKeyFrameRequestsOnAllStreams();
+}
+
+TEST(LibvpxVp8SimulcastTest, TestKeyFrameRequestsOnSpecificStreams) {
+ GTEST_SKIP() << "Not applicable to VP8.";
+}
+
+TEST(LibvpxVp8SimulcastTest, TestPaddingAllStreams) {
+ auto fixture = CreateSpecificSimulcastTestFixture();
+ fixture->TestPaddingAllStreams();
+}
+
+TEST(LibvpxVp8SimulcastTest, TestPaddingTwoStreams) {
+ auto fixture = CreateSpecificSimulcastTestFixture();
+ fixture->TestPaddingTwoStreams();
+}
+
+TEST(LibvpxVp8SimulcastTest, TestPaddingTwoStreamsOneMaxedOut) {
+ auto fixture = CreateSpecificSimulcastTestFixture();
+ fixture->TestPaddingTwoStreamsOneMaxedOut();
+}
+
+TEST(LibvpxVp8SimulcastTest, TestPaddingOneStream) {
+ auto fixture = CreateSpecificSimulcastTestFixture();
+ fixture->TestPaddingOneStream();
+}
+
+TEST(LibvpxVp8SimulcastTest, TestPaddingOneStreamTwoMaxedOut) {
+ auto fixture = CreateSpecificSimulcastTestFixture();
+ fixture->TestPaddingOneStreamTwoMaxedOut();
+}
+
+TEST(LibvpxVp8SimulcastTest, TestSendAllStreams) {
+ auto fixture = CreateSpecificSimulcastTestFixture();
+ fixture->TestSendAllStreams();
+}
+
+TEST(LibvpxVp8SimulcastTest, TestDisablingStreams) {
+ auto fixture = CreateSpecificSimulcastTestFixture();
+ fixture->TestDisablingStreams();
+}
+
+TEST(LibvpxVp8SimulcastTest, TestActiveStreams) {
+ auto fixture = CreateSpecificSimulcastTestFixture();
+ fixture->TestActiveStreams();
+}
+
+TEST(LibvpxVp8SimulcastTest, TestSwitchingToOneStream) {
+ auto fixture = CreateSpecificSimulcastTestFixture();
+ fixture->TestSwitchingToOneStream();
+}
+
+TEST(LibvpxVp8SimulcastTest, TestSwitchingToOneOddStream) {
+ auto fixture = CreateSpecificSimulcastTestFixture();
+ fixture->TestSwitchingToOneOddStream();
+}
+
+TEST(LibvpxVp8SimulcastTest, TestSwitchingToOneSmallStream) {
+ auto fixture = CreateSpecificSimulcastTestFixture();
+ fixture->TestSwitchingToOneSmallStream();
+}
+
+TEST(LibvpxVp8SimulcastTest, TestSpatioTemporalLayers333PatternEncoder) {
+ auto fixture = CreateSpecificSimulcastTestFixture();
+ fixture->TestSpatioTemporalLayers333PatternEncoder();
+}
+
+TEST(LibvpxVp8SimulcastTest, TestStrideEncodeDecode) {
+ auto fixture = CreateSpecificSimulcastTestFixture();
+ fixture->TestStrideEncodeDecode();
+}
+
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/vp8/screenshare_layers.cc b/third_party/libwebrtc/modules/video_coding/codecs/vp8/screenshare_layers.cc
new file mode 100644
index 0000000000..71db0b22c2
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/vp8/screenshare_layers.cc
@@ -0,0 +1,624 @@
+/* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/codecs/vp8/screenshare_layers.h"
+
+#include <stdlib.h>
+
+#include <algorithm>
+#include <memory>
+
+#include "modules/video_coding/include/video_codec_interface.h"
+#include "rtc_base/arraysize.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/time_utils.h"
+#include "system_wrappers/include/metrics.h"
+
+namespace webrtc {
+namespace {
+using BufferFlags = Vp8FrameConfig::BufferFlags;
+
+constexpr BufferFlags kNone = Vp8FrameConfig::BufferFlags::kNone;
+constexpr BufferFlags kReference = Vp8FrameConfig::BufferFlags::kReference;
+constexpr BufferFlags kUpdate = Vp8FrameConfig::BufferFlags::kUpdate;
+constexpr BufferFlags kReferenceAndUpdate =
+ Vp8FrameConfig::BufferFlags::kReferenceAndUpdate;
+
+constexpr int kOneSecond90Khz = 90000;
+constexpr int kMinTimeBetweenSyncs = kOneSecond90Khz * 2;
+constexpr int kMaxTimeBetweenSyncs = kOneSecond90Khz * 4;
+constexpr int kQpDeltaThresholdForSync = 8;
+constexpr int kMinBitrateKbpsForQpBoost = 500;
+constexpr auto kSwitch = DecodeTargetIndication::kSwitch;
+} // namespace
+
+const double ScreenshareLayers::kMaxTL0FpsReduction = 2.5;
+const double ScreenshareLayers::kAcceptableTargetOvershoot = 2.0;
+
+constexpr int ScreenshareLayers::kMaxNumTemporalLayers;
+
+// Always emit a frame with certain interval, even if bitrate targets have
+// been exceeded. This prevents needless keyframe requests.
+const int ScreenshareLayers::kMaxFrameIntervalMs = 2750;
+
+ScreenshareLayers::ScreenshareLayers(int num_temporal_layers)
+ : number_of_temporal_layers_(
+ std::min(kMaxNumTemporalLayers, num_temporal_layers)),
+ active_layer_(-1),
+ last_timestamp_(-1),
+ last_sync_timestamp_(-1),
+ last_emitted_tl0_timestamp_(-1),
+ last_frame_time_ms_(-1),
+ max_debt_bytes_(0),
+ encode_framerate_(1000.0f, 1000.0f), // 1 second window, second scale.
+ bitrate_updated_(false),
+ checker_(TemporalLayersChecker::CreateTemporalLayersChecker(
+ Vp8TemporalLayersType::kBitrateDynamic,
+ num_temporal_layers)) {
+ RTC_CHECK_GT(number_of_temporal_layers_, 0);
+ RTC_CHECK_LE(number_of_temporal_layers_, kMaxNumTemporalLayers);
+}
+
+ScreenshareLayers::~ScreenshareLayers() {
+ UpdateHistograms();
+}
+
+void ScreenshareLayers::SetQpLimits(size_t stream_index,
+ int min_qp,
+ int max_qp) {
+ RTC_DCHECK_LT(stream_index, StreamCount());
+ // 0 < min_qp <= max_qp
+ RTC_DCHECK_LT(0, min_qp);
+ RTC_DCHECK_LE(min_qp, max_qp);
+
+ RTC_DCHECK_EQ(min_qp_.has_value(), max_qp_.has_value());
+ if (!min_qp_.has_value()) {
+ min_qp_ = min_qp;
+ max_qp_ = max_qp;
+ } else {
+ RTC_DCHECK_EQ(min_qp, min_qp_.value());
+ RTC_DCHECK_EQ(max_qp, max_qp_.value());
+ }
+}
+
+size_t ScreenshareLayers::StreamCount() const {
+ return 1;
+}
+
+bool ScreenshareLayers::SupportsEncoderFrameDropping(
+ size_t stream_index) const {
+ RTC_DCHECK_LT(stream_index, StreamCount());
+ // Frame dropping is handled internally by this class.
+ return false;
+}
+
+Vp8FrameConfig ScreenshareLayers::NextFrameConfig(size_t stream_index,
+ uint32_t timestamp) {
+ RTC_DCHECK_LT(stream_index, StreamCount());
+
+ auto it = pending_frame_configs_.find(timestamp);
+ if (it != pending_frame_configs_.end()) {
+ // Drop and re-encode, reuse the previous config.
+ return it->second.frame_config;
+ }
+
+ if (number_of_temporal_layers_ <= 1) {
+ // No flags needed for 1 layer screenshare.
+ // TODO(pbos): Consider updating only last, and not all buffers.
+ DependencyInfo dependency_info{
+ "S", {kReferenceAndUpdate, kReferenceAndUpdate, kReferenceAndUpdate}};
+ pending_frame_configs_[timestamp] = dependency_info;
+ return dependency_info.frame_config;
+ }
+
+ const int64_t now_ms = rtc::TimeMillis();
+
+ int64_t unwrapped_timestamp = time_wrap_handler_.Unwrap(timestamp);
+ int64_t ts_diff;
+ if (last_timestamp_ == -1) {
+ ts_diff = kOneSecond90Khz / capture_framerate_.value_or(*target_framerate_);
+ } else {
+ ts_diff = unwrapped_timestamp - last_timestamp_;
+ }
+
+ if (target_framerate_) {
+ // If input frame rate exceeds target frame rate, either over a one second
+ // averaging window, or if frame interval is below 90% of desired value,
+ // drop frame.
+ if (encode_framerate_.Rate(now_ms).value_or(0) > *target_framerate_)
+ return Vp8FrameConfig(kNone, kNone, kNone);
+
+ // Primarily check if frame interval is too short using frame timestamps,
+ // as if they are correct they won't be affected by queuing in webrtc.
+ const int64_t expected_frame_interval_90khz =
+ kOneSecond90Khz / *target_framerate_;
+ if (last_timestamp_ != -1 && ts_diff > 0) {
+ if (ts_diff < 85 * expected_frame_interval_90khz / 100) {
+ return Vp8FrameConfig(kNone, kNone, kNone);
+ }
+ } else {
+ // Timestamps looks off, use realtime clock here instead.
+ const int64_t expected_frame_interval_ms = 1000 / *target_framerate_;
+ if (last_frame_time_ms_ != -1 &&
+ now_ms - last_frame_time_ms_ <
+ (85 * expected_frame_interval_ms) / 100) {
+ return Vp8FrameConfig(kNone, kNone, kNone);
+ }
+ }
+ }
+
+ if (stats_.first_frame_time_ms_ == -1)
+ stats_.first_frame_time_ms_ = now_ms;
+
+ // Make sure both frame droppers leak out bits.
+ layers_[0].UpdateDebt(ts_diff / 90);
+ layers_[1].UpdateDebt(ts_diff / 90);
+ last_timestamp_ = timestamp;
+ last_frame_time_ms_ = now_ms;
+
+ TemporalLayerState layer_state = TemporalLayerState::kDrop;
+
+ if (active_layer_ == -1 ||
+ layers_[active_layer_].state != TemporalLayer::State::kDropped) {
+ if (last_emitted_tl0_timestamp_ != -1 &&
+ (unwrapped_timestamp - last_emitted_tl0_timestamp_) / 90 >
+ kMaxFrameIntervalMs) {
+ // Too long time has passed since the last frame was emitted, cancel
+ // enough debt to allow a single frame.
+ layers_[0].debt_bytes_ = max_debt_bytes_ - 1;
+ }
+ if (layers_[0].debt_bytes_ > max_debt_bytes_) {
+ // Must drop TL0, encode TL1 instead.
+ if (layers_[1].debt_bytes_ > max_debt_bytes_) {
+ // Must drop both TL0 and TL1.
+ active_layer_ = -1;
+ } else {
+ active_layer_ = 1;
+ }
+ } else {
+ active_layer_ = 0;
+ }
+ }
+
+ switch (active_layer_) {
+ case 0:
+ layer_state = TemporalLayerState::kTl0;
+ last_emitted_tl0_timestamp_ = unwrapped_timestamp;
+ break;
+ case 1:
+ if (layers_[1].state != TemporalLayer::State::kDropped) {
+ if (TimeToSync(unwrapped_timestamp) ||
+ layers_[1].state == TemporalLayer::State::kKeyFrame) {
+ last_sync_timestamp_ = unwrapped_timestamp;
+ layer_state = TemporalLayerState::kTl1Sync;
+ } else {
+ layer_state = TemporalLayerState::kTl1;
+ }
+ } else {
+ layer_state = last_sync_timestamp_ == unwrapped_timestamp
+ ? TemporalLayerState::kTl1Sync
+ : TemporalLayerState::kTl1;
+ }
+ break;
+ case -1:
+ layer_state = TemporalLayerState::kDrop;
+ ++stats_.num_dropped_frames_;
+ break;
+ default:
+ RTC_DCHECK_NOTREACHED();
+ }
+
+ DependencyInfo dependency_info;
+ // TODO(pbos): Consider referencing but not updating the 'alt' buffer for all
+ // layers.
+ switch (layer_state) {
+ case TemporalLayerState::kDrop:
+ dependency_info = {"", {kNone, kNone, kNone}};
+ break;
+ case TemporalLayerState::kTl0:
+ // TL0 only references and updates 'last'.
+ dependency_info = {"SS", {kReferenceAndUpdate, kNone, kNone}};
+ dependency_info.frame_config.packetizer_temporal_idx = 0;
+ break;
+ case TemporalLayerState::kTl1:
+ // TL1 references both 'last' and 'golden' but only updates 'golden'.
+ dependency_info = {"-R", {kReference, kReferenceAndUpdate, kNone}};
+ dependency_info.frame_config.packetizer_temporal_idx = 1;
+ break;
+ case TemporalLayerState::kTl1Sync:
+ // Predict from only TL0 to allow participants to switch to the high
+ // bitrate stream. Updates 'golden' so that TL1 can continue to refer to
+ // and update 'golden' from this point on.
+ dependency_info = {"-S", {kReference, kUpdate, kNone}};
+ dependency_info.frame_config.packetizer_temporal_idx = 1;
+ dependency_info.frame_config.layer_sync = true;
+ break;
+ }
+
+ pending_frame_configs_[timestamp] = dependency_info;
+ return dependency_info.frame_config;
+}
+
+void ScreenshareLayers::OnRatesUpdated(
+ size_t stream_index,
+ const std::vector<uint32_t>& bitrates_bps,
+ int framerate_fps) {
+ RTC_DCHECK_LT(stream_index, StreamCount());
+ RTC_DCHECK_GT(framerate_fps, 0);
+ RTC_DCHECK_GE(bitrates_bps.size(), 1);
+ RTC_DCHECK_LE(bitrates_bps.size(), 2);
+
+ // `bitrates_bps` uses individual rates per layer, but we want to use the
+ // accumulated rate here.
+ uint32_t tl0_kbps = bitrates_bps[0] / 1000;
+ uint32_t tl1_kbps = tl0_kbps;
+ if (bitrates_bps.size() > 1) {
+ tl1_kbps += bitrates_bps[1] / 1000;
+ }
+
+ if (!target_framerate_) {
+ // First OnRatesUpdated() is called during construction, with the
+ // configured targets as parameters.
+ target_framerate_ = framerate_fps;
+ capture_framerate_ = target_framerate_;
+ bitrate_updated_ = true;
+ } else {
+ if ((capture_framerate_ &&
+ framerate_fps != static_cast<int>(*capture_framerate_)) ||
+ (tl0_kbps != layers_[0].target_rate_kbps_) ||
+ (tl1_kbps != layers_[1].target_rate_kbps_)) {
+ bitrate_updated_ = true;
+ }
+
+ if (framerate_fps < 0) {
+ capture_framerate_.reset();
+ } else {
+ capture_framerate_ = framerate_fps;
+ }
+ }
+
+ layers_[0].target_rate_kbps_ = tl0_kbps;
+ layers_[1].target_rate_kbps_ = tl1_kbps;
+}
+
+void ScreenshareLayers::OnEncodeDone(size_t stream_index,
+ uint32_t rtp_timestamp,
+ size_t size_bytes,
+ bool is_keyframe,
+ int qp,
+ CodecSpecificInfo* info) {
+ RTC_DCHECK_LT(stream_index, StreamCount());
+
+ if (size_bytes == 0) {
+ RTC_LOG(LS_WARNING) << "Empty frame; treating as dropped.";
+ OnFrameDropped(stream_index, rtp_timestamp);
+ return;
+ }
+
+ absl::optional<DependencyInfo> dependency_info;
+ auto it = pending_frame_configs_.find(rtp_timestamp);
+ if (it != pending_frame_configs_.end()) {
+ dependency_info = it->second;
+ pending_frame_configs_.erase(it);
+
+ if (checker_) {
+ RTC_DCHECK(checker_->CheckTemporalConfig(is_keyframe,
+ dependency_info->frame_config));
+ }
+ }
+
+ CodecSpecificInfoVP8& vp8_info = info->codecSpecific.VP8;
+ GenericFrameInfo& generic_frame_info = info->generic_frame_info.emplace();
+
+ if (number_of_temporal_layers_ == 1) {
+ vp8_info.temporalIdx = kNoTemporalIdx;
+ vp8_info.layerSync = false;
+ generic_frame_info.temporal_id = 0;
+ generic_frame_info.decode_target_indications = {kSwitch};
+ generic_frame_info.encoder_buffers.emplace_back(
+ 0, /*referenced=*/!is_keyframe, /*updated=*/true);
+ } else {
+ int64_t unwrapped_timestamp = time_wrap_handler_.Unwrap(rtp_timestamp);
+ if (dependency_info) {
+ vp8_info.temporalIdx =
+ dependency_info->frame_config.packetizer_temporal_idx;
+ vp8_info.layerSync = dependency_info->frame_config.layer_sync;
+ generic_frame_info.temporal_id = vp8_info.temporalIdx;
+ generic_frame_info.decode_target_indications =
+ dependency_info->decode_target_indications;
+ } else {
+ RTC_DCHECK(is_keyframe);
+ }
+
+ if (is_keyframe) {
+ vp8_info.temporalIdx = 0;
+ last_sync_timestamp_ = unwrapped_timestamp;
+ vp8_info.layerSync = true;
+ layers_[0].state = TemporalLayer::State::kKeyFrame;
+ layers_[1].state = TemporalLayer::State::kKeyFrame;
+ active_layer_ = 1;
+ info->template_structure =
+ GetTemplateStructure(number_of_temporal_layers_);
+ generic_frame_info.temporal_id = vp8_info.temporalIdx;
+ generic_frame_info.decode_target_indications = {kSwitch, kSwitch};
+ } else if (active_layer_ >= 0 && layers_[active_layer_].state ==
+ TemporalLayer::State::kKeyFrame) {
+ layers_[active_layer_].state = TemporalLayer::State::kNormal;
+ }
+
+ vp8_info.useExplicitDependencies = true;
+ RTC_DCHECK_EQ(vp8_info.referencedBuffersCount, 0u);
+ RTC_DCHECK_EQ(vp8_info.updatedBuffersCount, 0u);
+
+ // Note that `frame_config` is not derefernced if `is_keyframe`,
+ // meaning it's never dereferenced if the optional may be unset.
+ for (int i = 0; i < static_cast<int>(Vp8FrameConfig::Buffer::kCount); ++i) {
+ bool references = false;
+ bool updates = is_keyframe;
+ if (!is_keyframe && dependency_info->frame_config.References(
+ static_cast<Vp8FrameConfig::Buffer>(i))) {
+ RTC_DCHECK_LT(vp8_info.referencedBuffersCount,
+ arraysize(CodecSpecificInfoVP8::referencedBuffers));
+ references = true;
+ vp8_info.referencedBuffers[vp8_info.referencedBuffersCount++] = i;
+ }
+
+ if (is_keyframe || dependency_info->frame_config.Updates(
+ static_cast<Vp8FrameConfig::Buffer>(i))) {
+ RTC_DCHECK_LT(vp8_info.updatedBuffersCount,
+ arraysize(CodecSpecificInfoVP8::updatedBuffers));
+ updates = true;
+ vp8_info.updatedBuffers[vp8_info.updatedBuffersCount++] = i;
+ }
+
+ if (references || updates)
+ generic_frame_info.encoder_buffers.emplace_back(i, references, updates);
+ }
+ }
+
+ encode_framerate_.Update(1, rtc::TimeMillis());
+
+ if (number_of_temporal_layers_ == 1)
+ return;
+
+ RTC_DCHECK_NE(-1, active_layer_);
+ if (layers_[active_layer_].state == TemporalLayer::State::kDropped) {
+ layers_[active_layer_].state = TemporalLayer::State::kQualityBoost;
+ }
+
+ if (qp != -1)
+ layers_[active_layer_].last_qp = qp;
+
+ if (active_layer_ == 0) {
+ layers_[0].debt_bytes_ += size_bytes;
+ layers_[1].debt_bytes_ += size_bytes;
+ ++stats_.num_tl0_frames_;
+ stats_.tl0_target_bitrate_sum_ += layers_[0].target_rate_kbps_;
+ stats_.tl0_qp_sum_ += qp;
+ } else if (active_layer_ == 1) {
+ layers_[1].debt_bytes_ += size_bytes;
+ ++stats_.num_tl1_frames_;
+ stats_.tl1_target_bitrate_sum_ += layers_[1].target_rate_kbps_;
+ stats_.tl1_qp_sum_ += qp;
+ }
+}
+
+void ScreenshareLayers::OnFrameDropped(size_t stream_index,
+ uint32_t rtp_timestamp) {
+ layers_[active_layer_].state = TemporalLayer::State::kDropped;
+ ++stats_.num_overshoots_;
+}
+
+void ScreenshareLayers::OnPacketLossRateUpdate(float packet_loss_rate) {}
+
+void ScreenshareLayers::OnRttUpdate(int64_t rtt_ms) {}
+
+void ScreenshareLayers::OnLossNotification(
+ const VideoEncoder::LossNotification& loss_notification) {}
+
+FrameDependencyStructure ScreenshareLayers::GetTemplateStructure(
+ int num_layers) const {
+ RTC_CHECK_LT(num_layers, 3);
+ RTC_CHECK_GT(num_layers, 0);
+
+ FrameDependencyStructure template_structure;
+ template_structure.num_decode_targets = num_layers;
+
+ switch (num_layers) {
+ case 1: {
+ template_structure.templates.resize(2);
+ template_structure.templates[0].T(0).Dtis("S");
+ template_structure.templates[1].T(0).Dtis("S").FrameDiffs({1});
+ return template_structure;
+ }
+ case 2: {
+ template_structure.templates.resize(3);
+ template_structure.templates[0].T(0).Dtis("SS");
+ template_structure.templates[1].T(0).Dtis("SS").FrameDiffs({1});
+ template_structure.templates[2].T(1).Dtis("-S").FrameDiffs({1});
+ return template_structure;
+ }
+ default:
+ RTC_DCHECK_NOTREACHED();
+ // To make the compiler happy!
+ return template_structure;
+ }
+}
+
+bool ScreenshareLayers::TimeToSync(int64_t timestamp) const {
+ RTC_DCHECK_EQ(1, active_layer_);
+ RTC_DCHECK_NE(-1, layers_[0].last_qp);
+ if (layers_[1].last_qp == -1) {
+ // First frame in TL1 should only depend on TL0 since there are no
+ // previous frames in TL1.
+ return true;
+ }
+
+ RTC_DCHECK_NE(-1, last_sync_timestamp_);
+ int64_t timestamp_diff = timestamp - last_sync_timestamp_;
+ if (timestamp_diff > kMaxTimeBetweenSyncs) {
+ // After a certain time, force a sync frame.
+ return true;
+ } else if (timestamp_diff < kMinTimeBetweenSyncs) {
+ // If too soon from previous sync frame, don't issue a new one.
+ return false;
+ }
+ // Issue a sync frame if difference in quality between TL0 and TL1 isn't too
+ // large.
+ if (layers_[0].last_qp - layers_[1].last_qp < kQpDeltaThresholdForSync)
+ return true;
+ return false;
+}
+
+uint32_t ScreenshareLayers::GetCodecTargetBitrateKbps() const {
+ uint32_t target_bitrate_kbps = layers_[0].target_rate_kbps_;
+
+ if (number_of_temporal_layers_ > 1) {
+ // Calculate a codec target bitrate. This may be higher than TL0, gaining
+ // quality at the expense of frame rate at TL0. Constraints:
+ // - TL0 frame rate no less than framerate / kMaxTL0FpsReduction.
+ // - Target rate * kAcceptableTargetOvershoot should not exceed TL1 rate.
+ target_bitrate_kbps =
+ std::min(layers_[0].target_rate_kbps_ * kMaxTL0FpsReduction,
+ layers_[1].target_rate_kbps_ / kAcceptableTargetOvershoot);
+ }
+
+ return std::max(layers_[0].target_rate_kbps_, target_bitrate_kbps);
+}
+
+Vp8EncoderConfig ScreenshareLayers::UpdateConfiguration(size_t stream_index) {
+ RTC_DCHECK_LT(stream_index, StreamCount());
+ RTC_DCHECK(min_qp_.has_value());
+ RTC_DCHECK(max_qp_.has_value());
+
+ const uint32_t target_bitrate_kbps = GetCodecTargetBitrateKbps();
+
+ // TODO(sprang): We _really_ need to make an overhaul of this class. :(
+ // If we're dropping frames in order to meet a target framerate, adjust the
+ // bitrate assigned to the encoder so the total average bitrate is correct.
+ float encoder_config_bitrate_kbps = target_bitrate_kbps;
+ if (target_framerate_ && capture_framerate_ &&
+ *target_framerate_ < *capture_framerate_) {
+ encoder_config_bitrate_kbps *=
+ static_cast<float>(*capture_framerate_) / *target_framerate_;
+ }
+
+ if (bitrate_updated_ ||
+ encoder_config_.rc_target_bitrate !=
+ absl::make_optional(encoder_config_bitrate_kbps)) {
+ encoder_config_.rc_target_bitrate = encoder_config_bitrate_kbps;
+
+ // Don't reconfigure qp limits during quality boost frames.
+ if (active_layer_ == -1 ||
+ layers_[active_layer_].state != TemporalLayer::State::kQualityBoost) {
+ const int min_qp = min_qp_.value();
+ const int max_qp = max_qp_.value();
+
+ // After a dropped frame, a frame with max qp will be encoded and the
+ // quality will then ramp up from there. To boost the speed of recovery,
+ // encode the next frame with lower max qp, if there is sufficient
+ // bandwidth to do so without causing excessive delay.
+ // TL0 is the most important to improve since the errors in this layer
+ // will propagate to TL1.
+ // Currently, reduce max qp by 20% for TL0 and 15% for TL1.
+ if (layers_[1].target_rate_kbps_ >= kMinBitrateKbpsForQpBoost) {
+ layers_[0].enhanced_max_qp = min_qp + (((max_qp - min_qp) * 80) / 100);
+ layers_[1].enhanced_max_qp = min_qp + (((max_qp - min_qp) * 85) / 100);
+ } else {
+ layers_[0].enhanced_max_qp = -1;
+ layers_[1].enhanced_max_qp = -1;
+ }
+ }
+
+ if (capture_framerate_) {
+ int avg_frame_size =
+ (target_bitrate_kbps * 1000) / (8 * *capture_framerate_);
+ // Allow max debt to be the size of a single optimal frame.
+ // TODO(sprang): Determine if this needs to be adjusted by some factor.
+ // (Lower values may cause more frame drops, higher may lead to queuing
+ // delays.)
+ max_debt_bytes_ = avg_frame_size;
+ }
+
+ bitrate_updated_ = false;
+ }
+
+ // Don't try to update boosts state if not active yet.
+ if (active_layer_ == -1)
+ return encoder_config_;
+
+ if (number_of_temporal_layers_ <= 1)
+ return encoder_config_;
+
+ // If layer is in the quality boost state (following a dropped frame), update
+ // the configuration with the adjusted (lower) qp and set the state back to
+ // normal.
+ unsigned int adjusted_max_qp = max_qp_.value(); // Set the normal max qp.
+ if (layers_[active_layer_].state == TemporalLayer::State::kQualityBoost) {
+ if (layers_[active_layer_].enhanced_max_qp != -1) {
+ // Bitrate is high enough for quality boost, update max qp.
+ adjusted_max_qp = layers_[active_layer_].enhanced_max_qp;
+ }
+ // Regardless of qp, reset the boost state for the next frame.
+ layers_[active_layer_].state = TemporalLayer::State::kNormal;
+ }
+ encoder_config_.rc_max_quantizer = adjusted_max_qp;
+
+ return encoder_config_;
+}
+
+void ScreenshareLayers::TemporalLayer::UpdateDebt(int64_t delta_ms) {
+ uint32_t debt_reduction_bytes = target_rate_kbps_ * delta_ms / 8;
+ if (debt_reduction_bytes >= debt_bytes_) {
+ debt_bytes_ = 0;
+ } else {
+ debt_bytes_ -= debt_reduction_bytes;
+ }
+}
+
+void ScreenshareLayers::UpdateHistograms() {
+ if (stats_.first_frame_time_ms_ == -1)
+ return;
+ int64_t duration_sec =
+ (rtc::TimeMillis() - stats_.first_frame_time_ms_ + 500) / 1000;
+ if (duration_sec >= metrics::kMinRunTimeInSeconds) {
+ RTC_HISTOGRAM_COUNTS_10000(
+ "WebRTC.Video.Screenshare.Layer0.FrameRate",
+ (stats_.num_tl0_frames_ + (duration_sec / 2)) / duration_sec);
+ RTC_HISTOGRAM_COUNTS_10000(
+ "WebRTC.Video.Screenshare.Layer1.FrameRate",
+ (stats_.num_tl1_frames_ + (duration_sec / 2)) / duration_sec);
+ int total_frames = stats_.num_tl0_frames_ + stats_.num_tl1_frames_;
+ RTC_HISTOGRAM_COUNTS_10000(
+ "WebRTC.Video.Screenshare.FramesPerDrop",
+ (stats_.num_dropped_frames_ == 0
+ ? 0
+ : total_frames / stats_.num_dropped_frames_));
+ RTC_HISTOGRAM_COUNTS_10000(
+ "WebRTC.Video.Screenshare.FramesPerOvershoot",
+ (stats_.num_overshoots_ == 0 ? 0
+ : total_frames / stats_.num_overshoots_));
+ if (stats_.num_tl0_frames_ > 0) {
+ RTC_HISTOGRAM_COUNTS_10000("WebRTC.Video.Screenshare.Layer0.Qp",
+ stats_.tl0_qp_sum_ / stats_.num_tl0_frames_);
+ RTC_HISTOGRAM_COUNTS_10000(
+ "WebRTC.Video.Screenshare.Layer0.TargetBitrate",
+ stats_.tl0_target_bitrate_sum_ / stats_.num_tl0_frames_);
+ }
+ if (stats_.num_tl1_frames_ > 0) {
+ RTC_HISTOGRAM_COUNTS_10000("WebRTC.Video.Screenshare.Layer1.Qp",
+ stats_.tl1_qp_sum_ / stats_.num_tl1_frames_);
+ RTC_HISTOGRAM_COUNTS_10000(
+ "WebRTC.Video.Screenshare.Layer1.TargetBitrate",
+ stats_.tl1_target_bitrate_sum_ / stats_.num_tl1_frames_);
+ }
+ }
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/vp8/screenshare_layers.h b/third_party/libwebrtc/modules/video_coding/codecs/vp8/screenshare_layers.h
new file mode 100644
index 0000000000..47d6b401f4
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/vp8/screenshare_layers.h
@@ -0,0 +1,164 @@
+/* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#ifndef MODULES_VIDEO_CODING_CODECS_VP8_SCREENSHARE_LAYERS_H_
+#define MODULES_VIDEO_CODING_CODECS_VP8_SCREENSHARE_LAYERS_H_
+
+#include <map>
+#include <memory>
+#include <utility>
+#include <vector>
+
+#include "api/video_codecs/vp8_frame_config.h"
+#include "api/video_codecs/vp8_temporal_layers.h"
+#include "modules/video_coding/codecs/vp8/include/temporal_layers_checker.h"
+#include "modules/video_coding/include/video_codec_interface.h"
+#include "modules/video_coding/utility/frame_dropper.h"
+#include "rtc_base/numerics/sequence_number_unwrapper.h"
+#include "rtc_base/rate_statistics.h"
+
+namespace webrtc {
+
+struct CodecSpecificInfoVP8;
+class Clock;
+
+class ScreenshareLayers final : public Vp8FrameBufferController {
+ public:
+ static const double kMaxTL0FpsReduction;
+ static const double kAcceptableTargetOvershoot;
+ static const int kMaxFrameIntervalMs;
+
+ explicit ScreenshareLayers(int num_temporal_layers);
+ ~ScreenshareLayers() override;
+
+ void SetQpLimits(size_t stream_index, int min_qp, int max_qp) override;
+
+ size_t StreamCount() const override;
+
+ bool SupportsEncoderFrameDropping(size_t stream_index) const override;
+
+ // Returns the recommended VP8 encode flags needed. May refresh the decoder
+ // and/or update the reference buffers.
+ Vp8FrameConfig NextFrameConfig(size_t stream_index,
+ uint32_t rtp_timestamp) override;
+
+ // New target bitrate, per temporal layer.
+ void OnRatesUpdated(size_t stream_index,
+ const std::vector<uint32_t>& bitrates_bps,
+ int framerate_fps) override;
+
+ Vp8EncoderConfig UpdateConfiguration(size_t stream_index) override;
+
+ void OnEncodeDone(size_t stream_index,
+ uint32_t rtp_timestamp,
+ size_t size_bytes,
+ bool is_keyframe,
+ int qp,
+ CodecSpecificInfo* info) override;
+
+ void OnFrameDropped(size_t stream_index, uint32_t rtp_timestamp) override;
+
+ void OnPacketLossRateUpdate(float packet_loss_rate) override;
+
+ void OnRttUpdate(int64_t rtt_ms) override;
+
+ void OnLossNotification(
+ const VideoEncoder::LossNotification& loss_notification) override;
+
+ private:
+ enum class TemporalLayerState : int { kDrop, kTl0, kTl1, kTl1Sync };
+
+ struct DependencyInfo {
+ DependencyInfo() = default;
+ DependencyInfo(absl::string_view indication_symbols,
+ Vp8FrameConfig frame_config)
+ : decode_target_indications(
+ webrtc_impl::StringToDecodeTargetIndications(indication_symbols)),
+ frame_config(frame_config) {}
+
+ absl::InlinedVector<DecodeTargetIndication, 10> decode_target_indications;
+ Vp8FrameConfig frame_config;
+ };
+
+ bool TimeToSync(int64_t timestamp) const;
+ uint32_t GetCodecTargetBitrateKbps() const;
+
+ const int number_of_temporal_layers_;
+
+ // TODO(eladalon/sprang): These should be made into const-int set in the ctor.
+ absl::optional<int> min_qp_;
+ absl::optional<int> max_qp_;
+
+ int active_layer_;
+ int64_t last_timestamp_;
+ int64_t last_sync_timestamp_;
+ int64_t last_emitted_tl0_timestamp_;
+ int64_t last_frame_time_ms_;
+ RtpTimestampUnwrapper time_wrap_handler_;
+ uint32_t max_debt_bytes_;
+
+ std::map<uint32_t, DependencyInfo> pending_frame_configs_;
+
+ // Configured max framerate.
+ absl::optional<uint32_t> target_framerate_;
+ // Incoming framerate from capturer.
+ absl::optional<uint32_t> capture_framerate_;
+
+ // Tracks what framerate we actually encode, and drops frames on overshoot.
+ RateStatistics encode_framerate_;
+ bool bitrate_updated_;
+
+ static constexpr int kMaxNumTemporalLayers = 2;
+ struct TemporalLayer {
+ TemporalLayer()
+ : state(State::kNormal),
+ enhanced_max_qp(-1),
+ last_qp(-1),
+ debt_bytes_(0),
+ target_rate_kbps_(0) {}
+
+ enum class State {
+ kNormal,
+ kDropped,
+ kReencoded,
+ kQualityBoost,
+ kKeyFrame
+ } state;
+
+ int enhanced_max_qp;
+ int last_qp;
+ uint32_t debt_bytes_;
+ uint32_t target_rate_kbps_;
+
+ void UpdateDebt(int64_t delta_ms);
+ } layers_[kMaxNumTemporalLayers];
+
+ void UpdateHistograms();
+ FrameDependencyStructure GetTemplateStructure(int num_layers) const;
+
+ // Data for histogram statistics.
+ struct Stats {
+ int64_t first_frame_time_ms_ = -1;
+ int64_t num_tl0_frames_ = 0;
+ int64_t num_tl1_frames_ = 0;
+ int64_t num_dropped_frames_ = 0;
+ int64_t num_overshoots_ = 0;
+ int64_t tl0_qp_sum_ = 0;
+ int64_t tl1_qp_sum_ = 0;
+ int64_t tl0_target_bitrate_sum_ = 0;
+ int64_t tl1_target_bitrate_sum_ = 0;
+ } stats_;
+
+ Vp8EncoderConfig encoder_config_;
+
+ // Optional utility used to verify reference validity.
+ std::unique_ptr<TemporalLayersChecker> checker_;
+};
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_CODECS_VP8_SCREENSHARE_LAYERS_H_
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/vp8/screenshare_layers_unittest.cc b/third_party/libwebrtc/modules/video_coding/codecs/vp8/screenshare_layers_unittest.cc
new file mode 100644
index 0000000000..e5b3bd4fdf
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/vp8/screenshare_layers_unittest.cc
@@ -0,0 +1,788 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/codecs/vp8/screenshare_layers.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+#include <cstdint>
+#include <memory>
+#include <vector>
+
+#include "api/video_codecs/vp8_frame_config.h"
+#include "modules/video_coding/codecs/interface/common_constants.h"
+#include "modules/video_coding/codecs/vp8/libvpx_vp8_encoder.h"
+#include "modules/video_coding/include/video_codec_interface.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/fake_clock.h"
+#include "system_wrappers/include/metrics.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+#include "vpx/vp8cx.h"
+
+using ::testing::_;
+using ::testing::ElementsAre;
+using ::testing::NiceMock;
+
+namespace webrtc {
+namespace {
+// 5 frames per second at 90 kHz.
+const uint32_t kTimestampDelta5Fps = 90000 / 5;
+const int kDefaultQp = 54;
+const int kDefaultTl0BitrateKbps = 200;
+const int kDefaultTl1BitrateKbps = 2000;
+const int kFrameRate = 5;
+const int kSyncPeriodSeconds = 2;
+const int kMaxSyncPeriodSeconds = 4;
+
+// Expected flags for corresponding temporal layers.
+const int kTl0Flags = VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF |
+ VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_REF_ARF;
+const int kTl1Flags =
+ VP8_EFLAG_NO_REF_ARF | VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_UPD_LAST;
+const int kTl1SyncFlags = VP8_EFLAG_NO_REF_ARF | VP8_EFLAG_NO_REF_GF |
+ VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_UPD_LAST;
+const std::vector<uint32_t> kDefault2TlBitratesBps = {
+ kDefaultTl0BitrateKbps * 1000,
+ (kDefaultTl1BitrateKbps - kDefaultTl0BitrateKbps) * 1000};
+
+} // namespace
+
+class ScreenshareLayerTest : public ::testing::Test {
+ protected:
+ ScreenshareLayerTest()
+ : min_qp_(2),
+ max_qp_(kDefaultQp),
+ frame_size_(-1),
+ timestamp_(90),
+ config_updated_(false) {}
+ virtual ~ScreenshareLayerTest() {}
+
+ void SetUp() override {
+ layers_.reset(new ScreenshareLayers(2));
+ cfg_ = ConfigureBitrates();
+ }
+
+ int EncodeFrame(bool base_sync, CodecSpecificInfo* info = nullptr) {
+ CodecSpecificInfo ignored_info;
+ if (!info) {
+ info = &ignored_info;
+ }
+
+ int flags = ConfigureFrame(base_sync);
+ if (flags != -1)
+ layers_->OnEncodeDone(0, timestamp_, frame_size_, base_sync, kDefaultQp,
+ info);
+ return flags;
+ }
+
+ int ConfigureFrame(bool key_frame) {
+ tl_config_ = NextFrameConfig(0, timestamp_);
+ EXPECT_EQ(0, tl_config_.encoder_layer_id)
+ << "ScreenshareLayers always encodes using the bitrate allocator for "
+ "layer 0, but may reference different buffers and packetize "
+ "differently.";
+ if (tl_config_.drop_frame) {
+ return -1;
+ }
+ const uint32_t prev_rc_target_bitrate = cfg_.rc_target_bitrate.value_or(-1);
+ const uint32_t prev_rc_max_quantizer = cfg_.rc_max_quantizer.value_or(-1);
+
+ cfg_ = layers_->UpdateConfiguration(0);
+
+ config_updated_ =
+ cfg_.temporal_layer_config.has_value() ||
+ (cfg_.rc_target_bitrate.has_value() &&
+ cfg_.rc_target_bitrate.value() != prev_rc_target_bitrate) ||
+ (cfg_.rc_max_quantizer.has_value() &&
+ cfg_.rc_max_quantizer.value() != prev_rc_max_quantizer) ||
+ cfg_.g_error_resilient.has_value();
+
+ int flags = LibvpxVp8Encoder::EncodeFlags(tl_config_);
+ EXPECT_NE(-1, frame_size_);
+ return flags;
+ }
+
+ Vp8FrameConfig NextFrameConfig(size_t stream_index, uint32_t timestamp) {
+ int64_t timestamp_ms = timestamp / 90;
+ clock_.AdvanceTime(TimeDelta::Millis(timestamp_ms - rtc::TimeMillis()));
+ return layers_->NextFrameConfig(stream_index, timestamp);
+ }
+
+ int FrameSizeForBitrate(int bitrate_kbps) {
+ return ((bitrate_kbps * 1000) / 8) / kFrameRate;
+ }
+
+ Vp8EncoderConfig ConfigureBitrates() {
+ layers_->SetQpLimits(0, min_qp_, max_qp_);
+ layers_->OnRatesUpdated(0, kDefault2TlBitratesBps, kFrameRate);
+ const Vp8EncoderConfig vp8_cfg = layers_->UpdateConfiguration(0);
+ EXPECT_TRUE(vp8_cfg.rc_target_bitrate.has_value());
+ frame_size_ = FrameSizeForBitrate(vp8_cfg.rc_target_bitrate.value());
+ return vp8_cfg;
+ }
+
+ void WithQpLimits(int min_qp, int max_qp) {
+ min_qp_ = min_qp;
+ max_qp_ = max_qp;
+ }
+
+ // Runs a few initial frames and makes sure we have seen frames on both
+ // temporal layers, including sync and non-sync frames.
+ bool RunGracePeriod() {
+ bool got_tl0 = false;
+ bool got_tl1 = false;
+ bool got_tl1_sync = false;
+ for (int i = 0; i < 10; ++i) {
+ CodecSpecificInfo info;
+ EXPECT_NE(-1, EncodeFrame(false, &info));
+ timestamp_ += kTimestampDelta5Fps;
+ if (info.codecSpecific.VP8.temporalIdx == 0) {
+ got_tl0 = true;
+ } else if (info.codecSpecific.VP8.layerSync) {
+ got_tl1_sync = true;
+ } else {
+ got_tl1 = true;
+ }
+ if (got_tl0 && got_tl1 && got_tl1_sync)
+ return true;
+ }
+ return false;
+ }
+
+ // Adds frames until we get one in the specified temporal layer. The last
+ // FrameEncoded() call will be omitted and needs to be done by the caller.
+ // Returns the flags for the last frame.
+ int SkipUntilTl(int layer) {
+ return SkipUntilTlAndSync(layer, absl::nullopt);
+ }
+
+ // Same as SkipUntilTl, but also waits until the sync bit condition is met.
+ int SkipUntilTlAndSync(int layer, absl::optional<bool> sync) {
+ int flags = 0;
+ const int kMaxFramesToSkip =
+ 1 + (sync.value_or(false) ? kMaxSyncPeriodSeconds : 1) * kFrameRate;
+ for (int i = 0; i < kMaxFramesToSkip; ++i) {
+ flags = ConfigureFrame(false);
+ if (tl_config_.packetizer_temporal_idx != layer ||
+ (sync && *sync != tl_config_.layer_sync)) {
+ if (flags != -1) {
+ // If flags do not request a frame drop, report some default values
+ // for frame size etc.
+ CodecSpecificInfo info;
+ layers_->OnEncodeDone(0, timestamp_, frame_size_, false, kDefaultQp,
+ &info);
+ }
+ timestamp_ += kTimestampDelta5Fps;
+ } else {
+ // Found frame from sought after layer.
+ return flags;
+ }
+ }
+ ADD_FAILURE() << "Did not get a frame of TL" << layer << " in time.";
+ return -1;
+ }
+
+ int min_qp_;
+ uint32_t max_qp_;
+ int frame_size_;
+ rtc::ScopedFakeClock clock_;
+ std::unique_ptr<ScreenshareLayers> layers_;
+
+ uint32_t timestamp_;
+ Vp8FrameConfig tl_config_;
+ Vp8EncoderConfig cfg_;
+ bool config_updated_;
+
+ CodecSpecificInfo* IgnoredCodecSpecificInfo() {
+ ignored_codec_specific_info_ = std::make_unique<CodecSpecificInfo>();
+ return ignored_codec_specific_info_.get();
+ }
+
+ private:
+ std::unique_ptr<CodecSpecificInfo> ignored_codec_specific_info_;
+};
+
+TEST_F(ScreenshareLayerTest, 1Layer) {
+ layers_.reset(new ScreenshareLayers(1));
+ ConfigureBitrates();
+ // One layer screenshare should not use the frame dropper as all frames will
+ // belong to the base layer.
+ const int kSingleLayerFlags = 0;
+ auto info = std::make_unique<CodecSpecificInfo>();
+ int flags = EncodeFrame(/*base_sync=*/false, info.get());
+ timestamp_ += kTimestampDelta5Fps;
+ EXPECT_EQ(static_cast<uint8_t>(kNoTemporalIdx),
+ info->codecSpecific.VP8.temporalIdx);
+ EXPECT_FALSE(info->codecSpecific.VP8.layerSync);
+ EXPECT_EQ(info->generic_frame_info->temporal_id, 0);
+
+ info = std::make_unique<CodecSpecificInfo>();
+ flags = EncodeFrame(/*base_sync=*/false, info.get());
+ EXPECT_EQ(kSingleLayerFlags, flags);
+ EXPECT_EQ(static_cast<uint8_t>(kNoTemporalIdx),
+ info->codecSpecific.VP8.temporalIdx);
+ EXPECT_FALSE(info->codecSpecific.VP8.layerSync);
+ EXPECT_EQ(info->generic_frame_info->temporal_id, 0);
+}
+
+TEST_F(ScreenshareLayerTest, 2LayersPeriodicSync) {
+ std::vector<int> sync_times;
+ const int kNumFrames = kSyncPeriodSeconds * kFrameRate * 2 - 1;
+ for (int i = 0; i < kNumFrames; ++i) {
+ CodecSpecificInfo info;
+ EncodeFrame(false, &info);
+ timestamp_ += kTimestampDelta5Fps;
+ if (info.codecSpecific.VP8.temporalIdx == 1 &&
+ info.codecSpecific.VP8.layerSync) {
+ sync_times.push_back(timestamp_);
+ }
+ }
+
+ ASSERT_EQ(2u, sync_times.size());
+ EXPECT_GE(sync_times[1] - sync_times[0], 90000 * kSyncPeriodSeconds);
+}
+
+TEST_F(ScreenshareLayerTest, 2LayersSyncAfterTimeout) {
+ std::vector<int> sync_times;
+ const int kNumFrames = kMaxSyncPeriodSeconds * kFrameRate * 2 - 1;
+ for (int i = 0; i < kNumFrames; ++i) {
+ CodecSpecificInfo info;
+
+ tl_config_ = NextFrameConfig(0, timestamp_);
+ cfg_ = layers_->UpdateConfiguration(0);
+
+ // Simulate TL1 being at least 8 qp steps better.
+ if (tl_config_.packetizer_temporal_idx == 0) {
+ layers_->OnEncodeDone(0, timestamp_, frame_size_, false, kDefaultQp,
+ &info);
+ } else {
+ layers_->OnEncodeDone(0, timestamp_, frame_size_, false, kDefaultQp - 8,
+ &info);
+ }
+
+ if (info.codecSpecific.VP8.temporalIdx == 1 &&
+ info.codecSpecific.VP8.layerSync)
+ sync_times.push_back(timestamp_);
+
+ timestamp_ += kTimestampDelta5Fps;
+ }
+
+ ASSERT_EQ(2u, sync_times.size());
+ EXPECT_GE(sync_times[1] - sync_times[0], 90000 * kMaxSyncPeriodSeconds);
+}
+
+TEST_F(ScreenshareLayerTest, 2LayersSyncAfterSimilarQP) {
+ std::vector<int> sync_times;
+
+ const int kNumFrames = (kSyncPeriodSeconds +
+ ((kMaxSyncPeriodSeconds - kSyncPeriodSeconds) / 2)) *
+ kFrameRate;
+ for (int i = 0; i < kNumFrames; ++i) {
+ CodecSpecificInfo info;
+
+ ConfigureFrame(false);
+
+ // Simulate TL1 being at least 8 qp steps better.
+ if (tl_config_.packetizer_temporal_idx == 0) {
+ layers_->OnEncodeDone(0, timestamp_, frame_size_, false, kDefaultQp,
+ &info);
+ } else {
+ layers_->OnEncodeDone(0, timestamp_, frame_size_, false, kDefaultQp - 8,
+ &info);
+ }
+
+ if (info.codecSpecific.VP8.temporalIdx == 1 &&
+ info.codecSpecific.VP8.layerSync)
+ sync_times.push_back(timestamp_);
+
+ timestamp_ += kTimestampDelta5Fps;
+ }
+
+ ASSERT_EQ(1u, sync_times.size());
+
+ bool bumped_tl0_quality = false;
+ for (int i = 0; i < 3; ++i) {
+ CodecSpecificInfo info;
+
+ int flags = ConfigureFrame(false);
+ layers_->OnEncodeDone(0, timestamp_, frame_size_, false, kDefaultQp - 8,
+ &info);
+ if (info.codecSpecific.VP8.temporalIdx == 0) {
+ // Bump TL0 to same quality as TL1.
+ bumped_tl0_quality = true;
+ } else {
+ if (bumped_tl0_quality) {
+ EXPECT_TRUE(info.codecSpecific.VP8.layerSync);
+ EXPECT_EQ(kTl1SyncFlags, flags);
+ return;
+ }
+ }
+ timestamp_ += kTimestampDelta5Fps;
+ }
+ ADD_FAILURE() << "No TL1 frame arrived within time limit.";
+}
+
+TEST_F(ScreenshareLayerTest, 2LayersToggling) {
+ EXPECT_TRUE(RunGracePeriod());
+
+ // Insert 50 frames. 2/5 should be TL0.
+ int tl0_frames = 0;
+ int tl1_frames = 0;
+ for (int i = 0; i < 50; ++i) {
+ CodecSpecificInfo info;
+ EncodeFrame(/*base_sync=*/false, &info);
+ EXPECT_EQ(info.codecSpecific.VP8.temporalIdx,
+ info.generic_frame_info->temporal_id);
+ timestamp_ += kTimestampDelta5Fps;
+ switch (info.codecSpecific.VP8.temporalIdx) {
+ case 0:
+ ++tl0_frames;
+ break;
+ case 1:
+ ++tl1_frames;
+ break;
+ default:
+ abort();
+ }
+ }
+ EXPECT_EQ(20, tl0_frames);
+ EXPECT_EQ(30, tl1_frames);
+}
+
+TEST_F(ScreenshareLayerTest, AllFitsLayer0) {
+ frame_size_ = FrameSizeForBitrate(kDefaultTl0BitrateKbps);
+
+ // Insert 50 frames, small enough that all fits in TL0.
+ for (int i = 0; i < 50; ++i) {
+ CodecSpecificInfo info;
+ int flags = EncodeFrame(false, &info);
+ timestamp_ += kTimestampDelta5Fps;
+ EXPECT_EQ(kTl0Flags, flags);
+ EXPECT_EQ(0, info.codecSpecific.VP8.temporalIdx);
+ }
+}
+
+TEST_F(ScreenshareLayerTest, TooHighBitrate) {
+ frame_size_ = 2 * FrameSizeForBitrate(kDefaultTl1BitrateKbps);
+
+ // Insert 100 frames. Half should be dropped.
+ int tl0_frames = 0;
+ int tl1_frames = 0;
+ int dropped_frames = 0;
+ for (int i = 0; i < 100; ++i) {
+ CodecSpecificInfo info;
+ int flags = EncodeFrame(false, &info);
+ timestamp_ += kTimestampDelta5Fps;
+ if (flags == -1) {
+ ++dropped_frames;
+ } else {
+ switch (info.codecSpecific.VP8.temporalIdx) {
+ case 0:
+ ++tl0_frames;
+ break;
+ case 1:
+ ++tl1_frames;
+ break;
+ default:
+ ADD_FAILURE() << "Unexpected temporal id";
+ }
+ }
+ }
+
+ EXPECT_NEAR(50, tl0_frames + tl1_frames, 1);
+ EXPECT_NEAR(50, dropped_frames, 1);
+}
+
+TEST_F(ScreenshareLayerTest, TargetBitrateCappedByTL0) {
+ const int kTl0_kbps = 100;
+ const int kTl1_kbps = 1000;
+ const std::vector<uint32_t> layer_rates = {kTl0_kbps * 1000,
+ (kTl1_kbps - kTl0_kbps) * 1000};
+ layers_->OnRatesUpdated(0, layer_rates, kFrameRate);
+ cfg_ = layers_->UpdateConfiguration(0);
+
+ EXPECT_EQ(static_cast<unsigned int>(
+ ScreenshareLayers::kMaxTL0FpsReduction * kTl0_kbps + 0.5),
+ cfg_.rc_target_bitrate);
+}
+
+TEST_F(ScreenshareLayerTest, TargetBitrateCappedByTL1) {
+ const int kTl0_kbps = 100;
+ const int kTl1_kbps = 450;
+ const std::vector<uint32_t> layer_rates = {kTl0_kbps * 1000,
+ (kTl1_kbps - kTl0_kbps) * 1000};
+ layers_->OnRatesUpdated(0, layer_rates, kFrameRate);
+ cfg_ = layers_->UpdateConfiguration(0);
+
+ EXPECT_EQ(static_cast<unsigned int>(
+ kTl1_kbps / ScreenshareLayers::kAcceptableTargetOvershoot),
+ cfg_.rc_target_bitrate);
+}
+
+TEST_F(ScreenshareLayerTest, TargetBitrateBelowTL0) {
+ const int kTl0_kbps = 100;
+ const std::vector<uint32_t> layer_rates = {kTl0_kbps * 1000};
+ layers_->OnRatesUpdated(0, layer_rates, kFrameRate);
+ cfg_ = layers_->UpdateConfiguration(0);
+
+ EXPECT_EQ(static_cast<uint32_t>(kTl0_kbps), cfg_.rc_target_bitrate);
+}
+
+TEST_F(ScreenshareLayerTest, EncoderDrop) {
+ EXPECT_TRUE(RunGracePeriod());
+ SkipUntilTl(0);
+
+ // Size 0 indicates dropped frame.
+ layers_->OnEncodeDone(0, timestamp_, 0, false, 0, IgnoredCodecSpecificInfo());
+
+ // Re-encode frame (so don't advance timestamp).
+ int flags = EncodeFrame(false);
+ timestamp_ += kTimestampDelta5Fps;
+ EXPECT_FALSE(config_updated_);
+ EXPECT_EQ(kTl0Flags, flags);
+
+ // Next frame should have boosted quality...
+ SkipUntilTl(0);
+ EXPECT_TRUE(config_updated_);
+ EXPECT_LT(cfg_.rc_max_quantizer, static_cast<unsigned int>(kDefaultQp));
+ layers_->OnEncodeDone(0, timestamp_, frame_size_, false, kDefaultQp,
+ IgnoredCodecSpecificInfo());
+ timestamp_ += kTimestampDelta5Fps;
+
+ // ...then back to standard setup.
+ SkipUntilTl(0);
+ layers_->OnEncodeDone(0, timestamp_, frame_size_, false, kDefaultQp,
+ IgnoredCodecSpecificInfo());
+ timestamp_ += kTimestampDelta5Fps;
+ EXPECT_EQ(cfg_.rc_max_quantizer, static_cast<unsigned int>(kDefaultQp));
+
+ // Next drop in TL1.
+ SkipUntilTl(1);
+ layers_->OnEncodeDone(0, timestamp_, 0, false, 0, IgnoredCodecSpecificInfo());
+
+ // Re-encode frame (so don't advance timestamp).
+ flags = EncodeFrame(false);
+ timestamp_ += kTimestampDelta5Fps;
+ EXPECT_FALSE(config_updated_);
+ EXPECT_EQ(kTl1Flags, flags);
+
+ // Next frame should have boosted QP.
+ SkipUntilTl(1);
+ EXPECT_TRUE(config_updated_);
+ EXPECT_LT(cfg_.rc_max_quantizer, static_cast<unsigned int>(kDefaultQp));
+ layers_->OnEncodeDone(0, timestamp_, frame_size_, false, kDefaultQp,
+ IgnoredCodecSpecificInfo());
+ timestamp_ += kTimestampDelta5Fps;
+
+ // ...and back to normal.
+ SkipUntilTl(1);
+ EXPECT_EQ(cfg_.rc_max_quantizer, static_cast<unsigned int>(kDefaultQp));
+ layers_->OnEncodeDone(0, timestamp_, frame_size_, false, kDefaultQp,
+ IgnoredCodecSpecificInfo());
+ timestamp_ += kTimestampDelta5Fps;
+}
+
+TEST_F(ScreenshareLayerTest, RespectsMaxIntervalBetweenFrames) {
+ const int kLowBitrateKbps = 50;
+ const int kLargeFrameSizeBytes = 100000;
+ const uint32_t kStartTimestamp = 1234;
+
+ const std::vector<uint32_t> layer_rates = {kLowBitrateKbps * 1000};
+ layers_->OnRatesUpdated(0, layer_rates, kFrameRate);
+ cfg_ = layers_->UpdateConfiguration(0);
+
+ EXPECT_EQ(kTl0Flags,
+ LibvpxVp8Encoder::EncodeFlags(NextFrameConfig(0, kStartTimestamp)));
+ layers_->OnEncodeDone(0, kStartTimestamp, kLargeFrameSizeBytes, false,
+ kDefaultQp, IgnoredCodecSpecificInfo());
+
+ const uint32_t kTwoSecondsLater =
+ kStartTimestamp + (ScreenshareLayers::kMaxFrameIntervalMs * 90);
+
+ // Sanity check, repayment time should exceed kMaxFrameIntervalMs.
+ ASSERT_GT(kStartTimestamp + 90 * (kLargeFrameSizeBytes * 8) / kLowBitrateKbps,
+ kStartTimestamp + (ScreenshareLayers::kMaxFrameIntervalMs * 90));
+
+ // Expect drop one frame interval before the two second timeout. If we try
+ // any later, the frame will be dropped anyway by the frame rate throttling
+ // logic.
+ EXPECT_TRUE(
+ NextFrameConfig(0, kTwoSecondsLater - kTimestampDelta5Fps).drop_frame);
+
+ // More than two seconds has passed since last frame, one should be emitted
+ // even if bitrate target is then exceeded.
+ EXPECT_EQ(kTl0Flags, LibvpxVp8Encoder::EncodeFlags(
+ NextFrameConfig(0, kTwoSecondsLater + 90)));
+}
+
+TEST_F(ScreenshareLayerTest, UpdatesHistograms) {
+ metrics::Reset();
+ bool trigger_drop = false;
+ bool dropped_frame = false;
+ bool overshoot = false;
+ const int kTl0Qp = 35;
+ const int kTl1Qp = 30;
+ for (int64_t timestamp = 0;
+ timestamp < kTimestampDelta5Fps * 5 * metrics::kMinRunTimeInSeconds;
+ timestamp += kTimestampDelta5Fps) {
+ tl_config_ = NextFrameConfig(0, timestamp);
+ if (tl_config_.drop_frame) {
+ dropped_frame = true;
+ continue;
+ }
+ int flags = LibvpxVp8Encoder::EncodeFlags(tl_config_);
+ if (flags != -1)
+ cfg_ = layers_->UpdateConfiguration(0);
+
+ if (timestamp >= kTimestampDelta5Fps * 5 && !overshoot && flags != -1) {
+ // Simulate one overshoot.
+ layers_->OnEncodeDone(0, timestamp, 0, false, 0, nullptr);
+ overshoot = true;
+ }
+
+ if (flags == kTl0Flags) {
+ if (timestamp >= kTimestampDelta5Fps * 20 && !trigger_drop) {
+ // Simulate a too large frame, to cause frame drop.
+ layers_->OnEncodeDone(0, timestamp, frame_size_ * 10, false, kTl0Qp,
+ IgnoredCodecSpecificInfo());
+ trigger_drop = true;
+ } else {
+ layers_->OnEncodeDone(0, timestamp, frame_size_, false, kTl0Qp,
+ IgnoredCodecSpecificInfo());
+ }
+ } else if (flags == kTl1Flags || flags == kTl1SyncFlags) {
+ layers_->OnEncodeDone(0, timestamp, frame_size_, false, kTl1Qp,
+ IgnoredCodecSpecificInfo());
+ } else if (flags == -1) {
+ dropped_frame = true;
+ } else {
+ RTC_DCHECK_NOTREACHED() << "Unexpected flags";
+ }
+ clock_.AdvanceTime(TimeDelta::Millis(1000 / 5));
+ }
+
+ EXPECT_TRUE(overshoot);
+ EXPECT_TRUE(dropped_frame);
+
+ layers_.reset(); // Histograms are reported on destruction.
+
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.Screenshare.Layer0.FrameRate"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.Screenshare.Layer1.FrameRate"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.Screenshare.FramesPerDrop"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.Screenshare.FramesPerOvershoot"));
+ EXPECT_METRIC_EQ(1,
+ metrics::NumSamples("WebRTC.Video.Screenshare.Layer0.Qp"));
+ EXPECT_METRIC_EQ(1,
+ metrics::NumSamples("WebRTC.Video.Screenshare.Layer1.Qp"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.Screenshare.Layer0.TargetBitrate"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.Screenshare.Layer1.TargetBitrate"));
+
+ EXPECT_METRIC_GT(
+ metrics::MinSample("WebRTC.Video.Screenshare.Layer0.FrameRate"), 1);
+ EXPECT_METRIC_GT(
+ metrics::MinSample("WebRTC.Video.Screenshare.Layer1.FrameRate"), 1);
+ EXPECT_METRIC_GT(metrics::MinSample("WebRTC.Video.Screenshare.FramesPerDrop"),
+ 1);
+ EXPECT_METRIC_GT(
+ metrics::MinSample("WebRTC.Video.Screenshare.FramesPerOvershoot"), 1);
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.Screenshare.Layer0.Qp", kTl0Qp));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.Screenshare.Layer1.Qp", kTl1Qp));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.Screenshare.Layer0.TargetBitrate",
+ kDefaultTl0BitrateKbps));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.Screenshare.Layer1.TargetBitrate",
+ kDefaultTl1BitrateKbps));
+}
+
+TEST_F(ScreenshareLayerTest, RespectsConfiguredFramerate) {
+ int64_t kTestSpanMs = 2000;
+ int64_t kFrameIntervalsMs = 1000 / kFrameRate;
+
+ uint32_t timestamp = 1234;
+ int num_input_frames = 0;
+ int num_discarded_frames = 0;
+
+ // Send at regular rate - no drops expected.
+ for (int64_t i = 0; i < kTestSpanMs; i += kFrameIntervalsMs) {
+ if (NextFrameConfig(0, timestamp).drop_frame) {
+ ++num_discarded_frames;
+ } else {
+ size_t frame_size_bytes = kDefaultTl0BitrateKbps * kFrameIntervalsMs / 8;
+ layers_->OnEncodeDone(0, timestamp, frame_size_bytes, false, kDefaultQp,
+ IgnoredCodecSpecificInfo());
+ }
+ timestamp += kFrameIntervalsMs * 90;
+ clock_.AdvanceTime(TimeDelta::Millis(kFrameIntervalsMs));
+
+ ++num_input_frames;
+ }
+ EXPECT_EQ(0, num_discarded_frames);
+
+ // Send at twice the configured rate - drop every other frame.
+ num_input_frames = 0;
+ num_discarded_frames = 0;
+ for (int64_t i = 0; i < kTestSpanMs; i += kFrameIntervalsMs / 2) {
+ if (NextFrameConfig(0, timestamp).drop_frame) {
+ ++num_discarded_frames;
+ } else {
+ size_t frame_size_bytes = kDefaultTl0BitrateKbps * kFrameIntervalsMs / 8;
+ layers_->OnEncodeDone(0, timestamp, frame_size_bytes, false, kDefaultQp,
+ IgnoredCodecSpecificInfo());
+ }
+ timestamp += kFrameIntervalsMs * 90 / 2;
+ clock_.AdvanceTime(TimeDelta::Millis(kFrameIntervalsMs));
+ ++num_input_frames;
+ }
+
+ // Allow for some rounding errors in the measurements.
+ EXPECT_NEAR(num_discarded_frames, num_input_frames / 2, 2);
+}
+
+TEST_F(ScreenshareLayerTest, 2LayersSyncAtOvershootDrop) {
+ // Run grace period so we have existing frames in both TL0 and Tl1.
+ EXPECT_TRUE(RunGracePeriod());
+
+ // Move ahead until we have a sync frame in TL1.
+ EXPECT_EQ(kTl1SyncFlags, SkipUntilTlAndSync(1, true));
+ ASSERT_TRUE(tl_config_.layer_sync);
+
+ // Simulate overshoot of this frame.
+ layers_->OnEncodeDone(0, timestamp_, 0, false, 0, nullptr);
+
+ cfg_ = layers_->UpdateConfiguration(0);
+ EXPECT_EQ(kTl1SyncFlags, LibvpxVp8Encoder::EncodeFlags(tl_config_));
+
+ CodecSpecificInfo new_info;
+ layers_->OnEncodeDone(0, timestamp_, frame_size_, false, kDefaultQp,
+ &new_info);
+ EXPECT_TRUE(new_info.codecSpecific.VP8.layerSync);
+}
+
+TEST_F(ScreenshareLayerTest, DropOnTooShortFrameInterval) {
+ // Run grace period so we have existing frames in both TL0 and Tl1.
+ EXPECT_TRUE(RunGracePeriod());
+
+ // Add a large gap, so there's plenty of room in the rate tracker.
+ timestamp_ += kTimestampDelta5Fps * 3;
+ EXPECT_FALSE(NextFrameConfig(0, timestamp_).drop_frame);
+ layers_->OnEncodeDone(0, timestamp_, frame_size_, false, kDefaultQp,
+ IgnoredCodecSpecificInfo());
+
+ // Frame interval below 90% if desired time is not allowed, try inserting
+ // frame just before this limit.
+ const int64_t kMinFrameInterval = (kTimestampDelta5Fps * 85) / 100;
+ timestamp_ += kMinFrameInterval - 90;
+ EXPECT_TRUE(NextFrameConfig(0, timestamp_).drop_frame);
+
+ // Try again at the limit, now it should pass.
+ timestamp_ += 90;
+ EXPECT_FALSE(NextFrameConfig(0, timestamp_).drop_frame);
+}
+
+TEST_F(ScreenshareLayerTest, AdjustsBitrateWhenDroppingFrames) {
+ const uint32_t kTimestampDelta10Fps = kTimestampDelta5Fps / 2;
+ const int kNumFrames = 30;
+ ASSERT_TRUE(cfg_.rc_target_bitrate.has_value());
+ const uint32_t default_bitrate = cfg_.rc_target_bitrate.value();
+ layers_->OnRatesUpdated(0, kDefault2TlBitratesBps, 10);
+
+ int num_dropped_frames = 0;
+ for (int i = 0; i < kNumFrames; ++i) {
+ if (EncodeFrame(false) == -1)
+ ++num_dropped_frames;
+ timestamp_ += kTimestampDelta10Fps;
+ }
+ cfg_ = layers_->UpdateConfiguration(0);
+
+ EXPECT_EQ(num_dropped_frames, kNumFrames / 2);
+ EXPECT_EQ(cfg_.rc_target_bitrate, default_bitrate * 2);
+}
+
+TEST_F(ScreenshareLayerTest, UpdatesConfigurationAfterRateChange) {
+ // Set inital rate again, no need to update configuration.
+ layers_->OnRatesUpdated(0, kDefault2TlBitratesBps, kFrameRate);
+ cfg_ = layers_->UpdateConfiguration(0);
+
+ // Rate changed, now update config.
+ std::vector<uint32_t> bitrates = kDefault2TlBitratesBps;
+ bitrates[1] -= 100000;
+ layers_->OnRatesUpdated(0, bitrates, 5);
+ cfg_ = layers_->UpdateConfiguration(0);
+
+ // Changed rate, but then set changed rate again before trying to update
+ // configuration, update should still apply.
+ bitrates[1] -= 100000;
+ layers_->OnRatesUpdated(0, bitrates, 5);
+ layers_->OnRatesUpdated(0, bitrates, 5);
+ cfg_ = layers_->UpdateConfiguration(0);
+}
+
+TEST_F(ScreenshareLayerTest, MaxQpRestoredAfterDoubleDrop) {
+ // Run grace period so we have existing frames in both TL0 and Tl1.
+ EXPECT_TRUE(RunGracePeriod());
+
+ // Move ahead until we have a sync frame in TL1.
+ EXPECT_EQ(kTl1SyncFlags, SkipUntilTlAndSync(1, true));
+ ASSERT_TRUE(tl_config_.layer_sync);
+
+ // Simulate overshoot of this frame.
+ layers_->OnEncodeDone(0, timestamp_, 0, false, -1, nullptr);
+
+ // Simulate re-encoded frame.
+ layers_->OnEncodeDone(0, timestamp_, 1, false, max_qp_,
+ IgnoredCodecSpecificInfo());
+
+ // Next frame, expect boosted quality.
+ // Slightly alter bitrate between each frame.
+ std::vector<uint32_t> kDefault2TlBitratesBpsAlt = kDefault2TlBitratesBps;
+ kDefault2TlBitratesBpsAlt[1] += 4000;
+ layers_->OnRatesUpdated(0, kDefault2TlBitratesBpsAlt, kFrameRate);
+ EXPECT_EQ(kTl1Flags, SkipUntilTlAndSync(1, false));
+ EXPECT_TRUE(config_updated_);
+ EXPECT_LT(cfg_.rc_max_quantizer, max_qp_);
+ ASSERT_TRUE(cfg_.rc_max_quantizer.has_value());
+ const uint32_t adjusted_qp = cfg_.rc_max_quantizer.value();
+
+ // Simulate overshoot of this frame.
+ layers_->OnEncodeDone(0, timestamp_, 0, false, -1, nullptr);
+
+ // Simulate re-encoded frame.
+ layers_->OnEncodeDone(0, timestamp_, frame_size_, false, max_qp_,
+ IgnoredCodecSpecificInfo());
+
+ // A third frame, expect boosted quality.
+ layers_->OnRatesUpdated(0, kDefault2TlBitratesBps, kFrameRate);
+ EXPECT_EQ(kTl1Flags, SkipUntilTlAndSync(1, false));
+ EXPECT_TRUE(config_updated_);
+ EXPECT_LT(cfg_.rc_max_quantizer, max_qp_);
+ EXPECT_EQ(adjusted_qp, cfg_.rc_max_quantizer);
+
+ // Frame encoded.
+ layers_->OnEncodeDone(0, timestamp_, frame_size_, false, max_qp_,
+ IgnoredCodecSpecificInfo());
+
+ // A fourth frame, max qp should be restored.
+ layers_->OnRatesUpdated(0, kDefault2TlBitratesBpsAlt, kFrameRate);
+ EXPECT_EQ(kTl1Flags, SkipUntilTlAndSync(1, false));
+ EXPECT_EQ(cfg_.rc_max_quantizer, max_qp_);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/vp8/temporal_layers.h b/third_party/libwebrtc/modules/video_coding/codecs/vp8/temporal_layers.h
new file mode 100644
index 0000000000..9576fb27be
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/vp8/temporal_layers.h
@@ -0,0 +1,17 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_CODECS_VP8_TEMPORAL_LAYERS_H_
+#define MODULES_VIDEO_CODING_CODECS_VP8_TEMPORAL_LAYERS_H_
+
+// TODO(webrtc:9012) Remove this file when downstream projects have updated.
+#include "api/video_codecs/vp8_temporal_layers.h"
+
+#endif // MODULES_VIDEO_CODING_CODECS_VP8_TEMPORAL_LAYERS_H_
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/vp8/temporal_layers_checker.cc b/third_party/libwebrtc/modules/video_coding/codecs/vp8/temporal_layers_checker.cc
new file mode 100644
index 0000000000..5aebd2c526
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/vp8/temporal_layers_checker.cc
@@ -0,0 +1,146 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/codecs/vp8/include/temporal_layers_checker.h"
+
+#include <memory>
+
+#include "modules/video_coding/codecs/interface/common_constants.h"
+#include "modules/video_coding/codecs/vp8/default_temporal_layers.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+
+std::unique_ptr<TemporalLayersChecker>
+TemporalLayersChecker::CreateTemporalLayersChecker(Vp8TemporalLayersType type,
+ int num_temporal_layers) {
+ switch (type) {
+ case Vp8TemporalLayersType::kFixedPattern:
+ return std::make_unique<DefaultTemporalLayersChecker>(
+ num_temporal_layers);
+ case Vp8TemporalLayersType::kBitrateDynamic:
+ // Conference mode temporal layering for screen content in base stream.
+ return std::make_unique<TemporalLayersChecker>(num_temporal_layers);
+ }
+ RTC_CHECK_NOTREACHED();
+}
+
+TemporalLayersChecker::TemporalLayersChecker(int num_temporal_layers)
+ : num_temporal_layers_(num_temporal_layers),
+ sequence_number_(0),
+ last_sync_sequence_number_(0),
+ last_tl0_sequence_number_(0) {}
+
+bool TemporalLayersChecker::CheckAndUpdateBufferState(
+ BufferState* state,
+ bool* need_sync,
+ bool frame_is_keyframe,
+ uint8_t temporal_layer,
+ Vp8FrameConfig::BufferFlags flags,
+ uint32_t sequence_number,
+ uint32_t* lowest_sequence_referenced) {
+ if (flags & Vp8FrameConfig::BufferFlags::kReference) {
+ if (state->temporal_layer > 0 && !state->is_keyframe) {
+ *need_sync = false;
+ }
+ if (!state->is_keyframe && !frame_is_keyframe &&
+ state->sequence_number < *lowest_sequence_referenced) {
+ *lowest_sequence_referenced = state->sequence_number;
+ }
+ if (!frame_is_keyframe && !state->is_keyframe &&
+ state->temporal_layer > temporal_layer) {
+ RTC_LOG(LS_ERROR) << "Frame is referencing higher temporal layer.";
+ return false;
+ }
+ }
+ if ((flags & Vp8FrameConfig::BufferFlags::kUpdate)) {
+ state->temporal_layer = temporal_layer;
+ state->sequence_number = sequence_number;
+ state->is_keyframe = frame_is_keyframe;
+ }
+ if (frame_is_keyframe)
+ state->is_keyframe = true;
+ return true;
+}
+
+bool TemporalLayersChecker::CheckTemporalConfig(
+ bool frame_is_keyframe,
+ const Vp8FrameConfig& frame_config) {
+ if (frame_config.drop_frame ||
+ frame_config.packetizer_temporal_idx == kNoTemporalIdx) {
+ return true;
+ }
+ ++sequence_number_;
+ if (frame_config.packetizer_temporal_idx >= num_temporal_layers_ ||
+ (frame_config.packetizer_temporal_idx == kNoTemporalIdx &&
+ num_temporal_layers_ > 1)) {
+ RTC_LOG(LS_ERROR) << "Incorrect temporal layer set for frame: "
+ << frame_config.packetizer_temporal_idx
+ << " num_temporal_layers: " << num_temporal_layers_;
+ return false;
+ }
+
+ uint32_t lowest_sequence_referenced = sequence_number_;
+ bool need_sync = frame_config.packetizer_temporal_idx > 0 &&
+ frame_config.packetizer_temporal_idx != kNoTemporalIdx;
+
+ if (!CheckAndUpdateBufferState(
+ &last_, &need_sync, frame_is_keyframe,
+ frame_config.packetizer_temporal_idx, frame_config.last_buffer_flags,
+ sequence_number_, &lowest_sequence_referenced)) {
+ RTC_LOG(LS_ERROR) << "Error in the Last buffer";
+ return false;
+ }
+ if (!CheckAndUpdateBufferState(&golden_, &need_sync, frame_is_keyframe,
+ frame_config.packetizer_temporal_idx,
+ frame_config.golden_buffer_flags,
+ sequence_number_,
+ &lowest_sequence_referenced)) {
+ RTC_LOG(LS_ERROR) << "Error in the Golden buffer";
+ return false;
+ }
+ if (!CheckAndUpdateBufferState(
+ &arf_, &need_sync, frame_is_keyframe,
+ frame_config.packetizer_temporal_idx, frame_config.arf_buffer_flags,
+ sequence_number_, &lowest_sequence_referenced)) {
+ RTC_LOG(LS_ERROR) << "Error in the Arf buffer";
+ return false;
+ }
+
+ if (lowest_sequence_referenced < last_sync_sequence_number_ &&
+ !frame_is_keyframe) {
+ RTC_LOG(LS_ERROR) << "Reference past the last sync frame. Referenced "
+ << lowest_sequence_referenced << ", but sync was at "
+ << last_sync_sequence_number_;
+ return false;
+ }
+
+ if (frame_config.packetizer_temporal_idx == 0) {
+ last_tl0_sequence_number_ = sequence_number_;
+ }
+
+ if (frame_is_keyframe) {
+ last_sync_sequence_number_ = sequence_number_;
+ }
+
+ if (need_sync) {
+ last_sync_sequence_number_ = last_tl0_sequence_number_;
+ }
+
+ // Ignore sync flag on key-frames as it really doesn't matter.
+ if (need_sync != frame_config.layer_sync && !frame_is_keyframe) {
+ RTC_LOG(LS_ERROR) << "Sync bit is set incorrectly on a frame. Expected: "
+ << need_sync << " Actual: " << frame_config.layer_sync;
+ return false;
+ }
+ return true;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/vp8/test/vp8_impl_unittest.cc b/third_party/libwebrtc/modules/video_coding/codecs/vp8/test/vp8_impl_unittest.cc
new file mode 100644
index 0000000000..c5a8b659c4
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/vp8/test/vp8_impl_unittest.cc
@@ -0,0 +1,913 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <stdio.h>
+
+#include <memory>
+
+#include "api/test/create_frame_generator.h"
+#include "api/test/frame_generator_interface.h"
+#include "api/test/mock_video_decoder.h"
+#include "api/test/mock_video_encoder.h"
+#include "api/video_codecs/video_encoder.h"
+#include "api/video_codecs/vp8_temporal_layers.h"
+#include "common_video/libyuv/include/webrtc_libyuv.h"
+#include "common_video/test/utilities.h"
+#include "modules/video_coding/codecs/interface/mock_libvpx_interface.h"
+#include "modules/video_coding/codecs/test/video_codec_unittest.h"
+#include "modules/video_coding/codecs/vp8/include/vp8.h"
+#include "modules/video_coding/codecs/vp8/libvpx_vp8_encoder.h"
+#include "modules/video_coding/utility/vp8_header_parser.h"
+#include "rtc_base/time_utils.h"
+#include "test/field_trial.h"
+#include "test/mappable_native_buffer.h"
+#include "test/video_codec_settings.h"
+
+namespace webrtc {
+
+using ::testing::_;
+using ::testing::AllOf;
+using ::testing::ElementsAre;
+using ::testing::ElementsAreArray;
+using ::testing::Field;
+using ::testing::Invoke;
+using ::testing::NiceMock;
+using ::testing::Return;
+using EncoderInfo = webrtc::VideoEncoder::EncoderInfo;
+using FramerateFractions =
+ absl::InlinedVector<uint8_t, webrtc::kMaxTemporalStreams>;
+
+namespace {
+constexpr uint32_t kLegacyScreenshareTl0BitrateKbps = 200;
+constexpr uint32_t kLegacyScreenshareTl1BitrateKbps = 1000;
+constexpr uint32_t kInitialTimestampRtp = 123;
+constexpr int64_t kTestNtpTimeMs = 456;
+constexpr int64_t kInitialTimestampMs = 789;
+constexpr int kNumCores = 1;
+constexpr size_t kMaxPayloadSize = 1440;
+constexpr int kWidth = 172;
+constexpr int kHeight = 144;
+constexpr float kFramerateFps = 30;
+
+const VideoEncoder::Capabilities kCapabilities(false);
+const VideoEncoder::Settings kSettings(kCapabilities,
+ kNumCores,
+ kMaxPayloadSize);
+} // namespace
+
+class TestVp8Impl : public VideoCodecUnitTest {
+ protected:
+ std::unique_ptr<VideoEncoder> CreateEncoder() override {
+ return VP8Encoder::Create();
+ }
+
+ std::unique_ptr<VideoDecoder> CreateDecoder() override {
+ return VP8Decoder::Create();
+ }
+
+ void ModifyCodecSettings(VideoCodec* codec_settings) override {
+ webrtc::test::CodecSettings(kVideoCodecVP8, codec_settings);
+ codec_settings->width = kWidth;
+ codec_settings->height = kHeight;
+ codec_settings->SetVideoEncoderComplexity(
+ VideoCodecComplexity::kComplexityNormal);
+ }
+
+ void EncodeAndWaitForFrame(const VideoFrame& input_frame,
+ EncodedImage* encoded_frame,
+ CodecSpecificInfo* codec_specific_info,
+ bool keyframe = false) {
+ std::vector<VideoFrameType> frame_types;
+ if (keyframe) {
+ frame_types.emplace_back(VideoFrameType::kVideoFrameKey);
+ } else {
+ frame_types.emplace_back(VideoFrameType::kVideoFrameDelta);
+ }
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->Encode(input_frame, &frame_types));
+ ASSERT_TRUE(WaitForEncodedFrame(encoded_frame, codec_specific_info));
+ VerifyQpParser(*encoded_frame);
+ EXPECT_EQ(kVideoCodecVP8, codec_specific_info->codecType);
+ EXPECT_EQ(0, encoded_frame->SpatialIndex());
+ }
+
+ void EncodeAndExpectFrameWith(const VideoFrame& input_frame,
+ uint8_t temporal_idx,
+ bool keyframe = false) {
+ EncodedImage encoded_frame;
+ CodecSpecificInfo codec_specific_info;
+ EncodeAndWaitForFrame(input_frame, &encoded_frame, &codec_specific_info,
+ keyframe);
+ EXPECT_EQ(temporal_idx, codec_specific_info.codecSpecific.VP8.temporalIdx);
+ }
+
+ void VerifyQpParser(const EncodedImage& encoded_frame) const {
+ int qp;
+ EXPECT_GT(encoded_frame.size(), 0u);
+ ASSERT_TRUE(vp8::GetQp(encoded_frame.data(), encoded_frame.size(), &qp));
+ EXPECT_EQ(encoded_frame.qp_, qp) << "Encoder QP != parsed bitstream QP.";
+ }
+};
+
+TEST_F(TestVp8Impl, ErrorResilienceDisabledForNoTemporalLayers) {
+ codec_settings_.simulcastStream[0].numberOfTemporalLayers = 1;
+
+ auto* const vpx = new NiceMock<MockLibvpxInterface>();
+ LibvpxVp8Encoder encoder((std::unique_ptr<LibvpxInterface>(vpx)),
+ VP8Encoder::Settings());
+ EXPECT_CALL(*vpx,
+ codec_enc_init(
+ _, _, Field(&vpx_codec_enc_cfg_t::g_error_resilient, 0), _));
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder.InitEncode(&codec_settings_, kSettings));
+}
+
+TEST_F(TestVp8Impl, DefaultErrorResilienceEnabledForTemporalLayers) {
+ codec_settings_.simulcastStream[0].numberOfTemporalLayers = 2;
+ codec_settings_.VP8()->numberOfTemporalLayers = 2;
+
+ auto* const vpx = new NiceMock<MockLibvpxInterface>();
+ LibvpxVp8Encoder encoder((std::unique_ptr<LibvpxInterface>(vpx)),
+ VP8Encoder::Settings());
+ EXPECT_CALL(*vpx,
+ codec_enc_init(_, _,
+ Field(&vpx_codec_enc_cfg_t::g_error_resilient,
+ VPX_ERROR_RESILIENT_DEFAULT),
+ _));
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder.InitEncode(&codec_settings_, kSettings));
+}
+
+TEST_F(TestVp8Impl,
+ PartitionErrorResilienceEnabledForTemporalLayersWithFieldTrial) {
+ test::ScopedFieldTrials field_trials(
+ "WebRTC-VP8-ForcePartitionResilience/Enabled/");
+ codec_settings_.simulcastStream[0].numberOfTemporalLayers = 2;
+ codec_settings_.VP8()->numberOfTemporalLayers = 2;
+
+ auto* const vpx = new NiceMock<MockLibvpxInterface>();
+ LibvpxVp8Encoder encoder((std::unique_ptr<LibvpxInterface>(vpx)),
+ VP8Encoder::Settings());
+ EXPECT_CALL(*vpx,
+ codec_enc_init(_, _,
+ Field(&vpx_codec_enc_cfg_t::g_error_resilient,
+ VPX_ERROR_RESILIENT_PARTITIONS),
+ _));
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder.InitEncode(&codec_settings_, kSettings));
+}
+
+TEST_F(TestVp8Impl, SetRates) {
+ codec_settings_.SetFrameDropEnabled(true);
+ auto* const vpx = new NiceMock<MockLibvpxInterface>();
+ LibvpxVp8Encoder encoder((std::unique_ptr<LibvpxInterface>(vpx)),
+ VP8Encoder::Settings());
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder.InitEncode(&codec_settings_,
+ VideoEncoder::Settings(kCapabilities, 1, 1000)));
+
+ const uint32_t kBitrateBps = 300000;
+ VideoBitrateAllocation bitrate_allocation;
+ bitrate_allocation.SetBitrate(0, 0, kBitrateBps);
+ EXPECT_CALL(
+ *vpx,
+ codec_enc_config_set(
+ _, AllOf(Field(&vpx_codec_enc_cfg_t::rc_target_bitrate,
+ kBitrateBps / 1000),
+ Field(&vpx_codec_enc_cfg_t::rc_undershoot_pct, 100u),
+ Field(&vpx_codec_enc_cfg_t::rc_overshoot_pct, 15u),
+ Field(&vpx_codec_enc_cfg_t::rc_buf_sz, 1000u),
+ Field(&vpx_codec_enc_cfg_t::rc_buf_optimal_sz, 600u),
+ Field(&vpx_codec_enc_cfg_t::rc_dropframe_thresh, 30u))))
+ .WillOnce(Return(VPX_CODEC_OK));
+ encoder.SetRates(VideoEncoder::RateControlParameters(
+ bitrate_allocation, static_cast<double>(codec_settings_.maxFramerate)));
+}
+
+TEST_F(TestVp8Impl, EncodeFrameAndRelease) {
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Release());
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->InitEncode(&codec_settings_, kSettings));
+
+ EncodedImage encoded_frame;
+ CodecSpecificInfo codec_specific_info;
+ EncodeAndWaitForFrame(NextInputFrame(), &encoded_frame, &codec_specific_info);
+
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Release());
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_UNINITIALIZED,
+ encoder_->Encode(NextInputFrame(), nullptr));
+}
+
+TEST_F(TestVp8Impl, EncodeNv12FrameSimulcast) {
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Release());
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->InitEncode(&codec_settings_, kSettings));
+
+ EncodedImage encoded_frame;
+ CodecSpecificInfo codec_specific_info;
+ input_frame_generator_ = test::CreateSquareFrameGenerator(
+ kWidth, kHeight, test::FrameGeneratorInterface::OutputType::kNV12,
+ absl::nullopt);
+ EncodeAndWaitForFrame(NextInputFrame(), &encoded_frame, &codec_specific_info);
+
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Release());
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_UNINITIALIZED,
+ encoder_->Encode(NextInputFrame(), nullptr));
+}
+
+TEST_F(TestVp8Impl, EncodeI420FrameAfterNv12Frame) {
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Release());
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->InitEncode(&codec_settings_, kSettings));
+
+ EncodedImage encoded_frame;
+ CodecSpecificInfo codec_specific_info;
+ input_frame_generator_ = test::CreateSquareFrameGenerator(
+ kWidth, kHeight, test::FrameGeneratorInterface::OutputType::kNV12,
+ absl::nullopt);
+ EncodeAndWaitForFrame(NextInputFrame(), &encoded_frame, &codec_specific_info);
+ input_frame_generator_ = test::CreateSquareFrameGenerator(
+ kWidth, kHeight, test::FrameGeneratorInterface::OutputType::kI420,
+ absl::nullopt);
+ EncodeAndWaitForFrame(NextInputFrame(), &encoded_frame, &codec_specific_info);
+
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Release());
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_UNINITIALIZED,
+ encoder_->Encode(NextInputFrame(), nullptr));
+}
+
+TEST_F(TestVp8Impl, Configure) {
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder_->Release());
+ EXPECT_TRUE(decoder_->Configure({}));
+}
+
+TEST_F(TestVp8Impl, OnEncodedImageReportsInfo) {
+ VideoFrame input_frame = NextInputFrame();
+ input_frame.set_timestamp(kInitialTimestampRtp);
+ input_frame.set_timestamp_us(kInitialTimestampMs *
+ rtc::kNumMicrosecsPerMillisec);
+ EncodedImage encoded_frame;
+ CodecSpecificInfo codec_specific_info;
+ EncodeAndWaitForFrame(input_frame, &encoded_frame, &codec_specific_info);
+
+ EXPECT_EQ(kInitialTimestampRtp, encoded_frame.Timestamp());
+ EXPECT_EQ(kWidth, static_cast<int>(encoded_frame._encodedWidth));
+ EXPECT_EQ(kHeight, static_cast<int>(encoded_frame._encodedHeight));
+}
+
+TEST_F(TestVp8Impl,
+ EncoderFillsResolutionInCodecAgnosticSectionOfCodecSpecificInfo) {
+ EncodedImage encoded_frame;
+ CodecSpecificInfo codec_specific_info;
+ EncodeAndWaitForFrame(NextInputFrame(), &encoded_frame, &codec_specific_info);
+
+ ASSERT_TRUE(codec_specific_info.template_structure);
+ EXPECT_THAT(codec_specific_info.template_structure->resolutions,
+ ElementsAre(RenderResolution(kWidth, kHeight)));
+}
+
+TEST_F(TestVp8Impl, DecodedQpEqualsEncodedQp) {
+ VideoFrame input_frame = NextInputFrame();
+ EncodedImage encoded_frame;
+ CodecSpecificInfo codec_specific_info;
+ EncodeAndWaitForFrame(input_frame, &encoded_frame, &codec_specific_info);
+
+ // First frame should be a key frame.
+ encoded_frame._frameType = VideoFrameType::kVideoFrameKey;
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder_->Decode(encoded_frame, false, -1));
+ std::unique_ptr<VideoFrame> decoded_frame;
+ absl::optional<uint8_t> decoded_qp;
+ ASSERT_TRUE(WaitForDecodedFrame(&decoded_frame, &decoded_qp));
+ ASSERT_TRUE(decoded_frame);
+ ASSERT_TRUE(decoded_qp);
+ EXPECT_GT(I420PSNR(&input_frame, decoded_frame.get()), 36);
+ EXPECT_EQ(encoded_frame.qp_, *decoded_qp);
+}
+
+TEST_F(TestVp8Impl, ChecksSimulcastSettings) {
+ codec_settings_.numberOfSimulcastStreams = 2;
+ // Resolutions are not in ascending order, temporal layers do not match.
+ codec_settings_.simulcastStream[0] = {.width = kWidth,
+ .height = kHeight,
+ .maxFramerate = kFramerateFps,
+ .numberOfTemporalLayers = 2,
+ .maxBitrate = 4000,
+ .targetBitrate = 3000,
+ .minBitrate = 2000,
+ .qpMax = 80};
+ codec_settings_.simulcastStream[1] = {.width = kWidth / 2,
+ .height = kHeight / 2,
+ .maxFramerate = 30,
+ .numberOfTemporalLayers = 3,
+ .maxBitrate = 4000,
+ .targetBitrate = 3000,
+ .minBitrate = 2000,
+ .qpMax = 80};
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_ERR_SIMULCAST_PARAMETERS_NOT_SUPPORTED,
+ encoder_->InitEncode(&codec_settings_, kSettings));
+ codec_settings_.numberOfSimulcastStreams = 3;
+ // Resolutions are not in ascending order.
+ codec_settings_.simulcastStream[0] = {.width = kWidth / 2,
+ .height = kHeight / 2,
+ .maxFramerate = kFramerateFps,
+ .numberOfTemporalLayers = 1,
+ .maxBitrate = 4000,
+ .targetBitrate = 3000,
+ .minBitrate = 2000,
+ .qpMax = 80};
+ codec_settings_.simulcastStream[1] = {.width = kWidth / 2 - 1,
+ .height = kHeight / 2 - 1,
+ .maxFramerate = kFramerateFps,
+ .numberOfTemporalLayers = 1,
+ .maxBitrate = 4000,
+ .targetBitrate = 3000,
+ .minBitrate = 2000,
+ .qpMax = 80};
+ codec_settings_.simulcastStream[2] = {.width = kWidth,
+ .height = kHeight,
+ .maxFramerate = 30,
+ .numberOfTemporalLayers = 1,
+ .maxBitrate = 4000,
+ .targetBitrate = 3000,
+ .minBitrate = 2000,
+ .qpMax = 80};
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_ERR_SIMULCAST_PARAMETERS_NOT_SUPPORTED,
+ encoder_->InitEncode(&codec_settings_, kSettings));
+ // Resolutions are not in ascending order.
+ codec_settings_.simulcastStream[0] = {.width = kWidth,
+ .height = kHeight,
+ .maxFramerate = kFramerateFps,
+ .numberOfTemporalLayers = 1,
+ .maxBitrate = 4000,
+ .targetBitrate = 3000,
+ .minBitrate = 2000,
+ .qpMax = 80};
+ codec_settings_.simulcastStream[1] = {.width = kWidth,
+ .height = kHeight,
+ .maxFramerate = kFramerateFps,
+ .numberOfTemporalLayers = 1,
+ .maxBitrate = 4000,
+ .targetBitrate = 3000,
+ .minBitrate = 2000,
+ .qpMax = 80};
+ codec_settings_.simulcastStream[2] = {.width = kWidth - 1,
+ .height = kHeight - 1,
+ .maxFramerate = kFramerateFps,
+ .numberOfTemporalLayers = 1,
+ .maxBitrate = 4000,
+ .targetBitrate = 3000,
+ .minBitrate = 2000,
+ .qpMax = 80};
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_ERR_SIMULCAST_PARAMETERS_NOT_SUPPORTED,
+ encoder_->InitEncode(&codec_settings_, kSettings));
+ // Temporal layers do not match.
+ codec_settings_.simulcastStream[0] = {.width = kWidth / 4,
+ .height = kHeight / 4,
+ .maxFramerate = kFramerateFps,
+ .numberOfTemporalLayers = 1,
+ .maxBitrate = 4000,
+ .targetBitrate = 3000,
+ .minBitrate = 2000,
+ .qpMax = 80};
+ codec_settings_.simulcastStream[1] = {.width = kWidth / 2,
+ .height = kHeight / 2,
+ .maxFramerate = kFramerateFps,
+ .numberOfTemporalLayers = 2,
+ .maxBitrate = 4000,
+ .targetBitrate = 3000,
+ .minBitrate = 2000,
+ .qpMax = 80};
+ codec_settings_.simulcastStream[2] = {.width = kWidth,
+ .height = kHeight,
+ .maxFramerate = kFramerateFps,
+ .numberOfTemporalLayers = 3,
+ .maxBitrate = 4000,
+ .targetBitrate = 3000,
+ .minBitrate = 2000,
+ .qpMax = 80};
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_ERR_SIMULCAST_PARAMETERS_NOT_SUPPORTED,
+ encoder_->InitEncode(&codec_settings_, kSettings));
+ // Resolutions do not match codec config.
+ codec_settings_.simulcastStream[0] = {.width = kWidth / 4 + 1,
+ .height = kHeight / 4 + 1,
+ .maxFramerate = kFramerateFps,
+ .numberOfTemporalLayers = 1,
+ .maxBitrate = 4000,
+ .targetBitrate = 3000,
+ .minBitrate = 2000,
+ .qpMax = 80};
+ codec_settings_.simulcastStream[1] = {.width = kWidth / 2 + 2,
+ .height = kHeight / 2 + 2,
+ .maxFramerate = kFramerateFps,
+ .numberOfTemporalLayers = 1,
+ .maxBitrate = 4000,
+ .targetBitrate = 3000,
+ .minBitrate = 2000,
+ .qpMax = 80};
+ codec_settings_.simulcastStream[2] = {.width = kWidth + 4,
+ .height = kHeight + 4,
+ .maxFramerate = kFramerateFps,
+ .numberOfTemporalLayers = 1,
+ .maxBitrate = 4000,
+ .targetBitrate = 3000,
+ .minBitrate = 2000,
+ .qpMax = 80};
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_ERR_SIMULCAST_PARAMETERS_NOT_SUPPORTED,
+ encoder_->InitEncode(&codec_settings_, kSettings));
+ // Everything fine: scaling by 2, top resolution matches video, temporal
+ // settings are the same for all layers.
+ codec_settings_.simulcastStream[0] = {.width = kWidth / 4,
+ .height = kHeight / 4,
+ .maxFramerate = kFramerateFps,
+ .numberOfTemporalLayers = 1,
+ .maxBitrate = 4000,
+ .targetBitrate = 3000,
+ .minBitrate = 2000,
+ .qpMax = 80};
+ codec_settings_.simulcastStream[1] = {.width = kWidth / 2,
+ .height = kHeight / 2,
+ .maxFramerate = kFramerateFps,
+ .numberOfTemporalLayers = 1,
+ .maxBitrate = 4000,
+ .targetBitrate = 3000,
+ .minBitrate = 2000,
+ .qpMax = 80};
+ codec_settings_.simulcastStream[2] = {.width = kWidth,
+ .height = kHeight,
+ .maxFramerate = kFramerateFps,
+ .numberOfTemporalLayers = 1,
+ .maxBitrate = 4000,
+ .targetBitrate = 3000,
+ .minBitrate = 2000,
+ .qpMax = 80};
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->InitEncode(&codec_settings_, kSettings));
+ // Everything fine: custom scaling, top resolution matches video, temporal
+ // settings are the same for all layers.
+ codec_settings_.simulcastStream[0] = {.width = kWidth / 4,
+ .height = kHeight / 4,
+ .maxFramerate = kFramerateFps,
+ .numberOfTemporalLayers = 1,
+ .maxBitrate = 4000,
+ .targetBitrate = 3000,
+ .minBitrate = 2000,
+ .qpMax = 80};
+ codec_settings_.simulcastStream[1] = {.width = kWidth,
+ .height = kHeight,
+ .maxFramerate = kFramerateFps,
+ .numberOfTemporalLayers = 1,
+ .maxBitrate = 4000,
+ .targetBitrate = 3000,
+ .minBitrate = 2000,
+ .qpMax = 80};
+ codec_settings_.simulcastStream[2] = {.width = kWidth,
+ .height = kHeight,
+ .maxFramerate = kFramerateFps,
+ .numberOfTemporalLayers = 1,
+ .maxBitrate = 4000,
+ .targetBitrate = 3000,
+ .minBitrate = 2000,
+ .qpMax = 80};
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->InitEncode(&codec_settings_, kSettings));
+}
+
+#if defined(WEBRTC_ANDROID)
+#define MAYBE_AlignedStrideEncodeDecode DISABLED_AlignedStrideEncodeDecode
+#else
+#define MAYBE_AlignedStrideEncodeDecode AlignedStrideEncodeDecode
+#endif
+TEST_F(TestVp8Impl, MAYBE_AlignedStrideEncodeDecode) {
+ VideoFrame input_frame = NextInputFrame();
+ input_frame.set_timestamp(kInitialTimestampRtp);
+ input_frame.set_timestamp_us(kInitialTimestampMs *
+ rtc::kNumMicrosecsPerMillisec);
+ EncodedImage encoded_frame;
+ CodecSpecificInfo codec_specific_info;
+ EncodeAndWaitForFrame(input_frame, &encoded_frame, &codec_specific_info);
+
+ // First frame should be a key frame.
+ encoded_frame._frameType = VideoFrameType::kVideoFrameKey;
+ encoded_frame.ntp_time_ms_ = kTestNtpTimeMs;
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder_->Decode(encoded_frame, false, -1));
+
+ std::unique_ptr<VideoFrame> decoded_frame;
+ absl::optional<uint8_t> decoded_qp;
+ ASSERT_TRUE(WaitForDecodedFrame(&decoded_frame, &decoded_qp));
+ ASSERT_TRUE(decoded_frame);
+ // Compute PSNR on all planes (faster than SSIM).
+ EXPECT_GT(I420PSNR(&input_frame, decoded_frame.get()), 36);
+ EXPECT_EQ(kInitialTimestampRtp, decoded_frame->timestamp());
+}
+
+TEST_F(TestVp8Impl, EncoderWith2TemporalLayers) {
+ codec_settings_.VP8()->numberOfTemporalLayers = 2;
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->InitEncode(&codec_settings_, kSettings));
+
+ // Temporal layer 0.
+ EncodedImage encoded_frame;
+ CodecSpecificInfo codec_specific_info;
+ EncodeAndWaitForFrame(NextInputFrame(), &encoded_frame, &codec_specific_info);
+
+ EXPECT_EQ(0, codec_specific_info.codecSpecific.VP8.temporalIdx);
+ // Temporal layer 1.
+ EncodeAndExpectFrameWith(NextInputFrame(), 1);
+ // Temporal layer 0.
+ EncodeAndExpectFrameWith(NextInputFrame(), 0);
+ // Temporal layer 1.
+ EncodeAndExpectFrameWith(NextInputFrame(), 1);
+}
+
+TEST_F(TestVp8Impl, ScalingDisabledIfAutomaticResizeOff) {
+ codec_settings_.VP8()->automaticResizeOn = false;
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->InitEncode(&codec_settings_, kSettings));
+
+ VideoEncoder::ScalingSettings settings =
+ encoder_->GetEncoderInfo().scaling_settings;
+ EXPECT_FALSE(settings.thresholds.has_value());
+}
+
+TEST_F(TestVp8Impl, ScalingEnabledIfAutomaticResizeOn) {
+ codec_settings_.SetFrameDropEnabled(true);
+ codec_settings_.VP8()->automaticResizeOn = true;
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->InitEncode(&codec_settings_, kSettings));
+
+ VideoEncoder::ScalingSettings settings =
+ encoder_->GetEncoderInfo().scaling_settings;
+ EXPECT_TRUE(settings.thresholds.has_value());
+ EXPECT_EQ(kDefaultMinPixelsPerFrame, settings.min_pixels_per_frame);
+}
+
+TEST_F(TestVp8Impl, DontDropKeyframes) {
+ // Set very high resolution to trigger overuse more easily.
+ const int kScreenWidth = 1920;
+ const int kScreenHeight = 1080;
+
+ codec_settings_.width = kScreenWidth;
+ codec_settings_.height = kScreenHeight;
+
+ // Screensharing has the internal frame dropper off, and instead per frame
+ // asks ScreenshareLayers to decide if it should be dropped or not.
+ codec_settings_.SetFrameDropEnabled(false);
+ codec_settings_.mode = VideoCodecMode::kScreensharing;
+ // ScreenshareLayers triggers on 2 temporal layers and 1000kbps max bitrate.
+ codec_settings_.VP8()->numberOfTemporalLayers = 2;
+ codec_settings_.maxBitrate = 1000;
+
+ // Reset the frame generator with large number of squares, leading to lots of
+ // details and high probability of overshoot.
+ input_frame_generator_ = test::CreateSquareFrameGenerator(
+ codec_settings_.width, codec_settings_.height,
+ test::FrameGeneratorInterface::OutputType::kI420,
+ /* num_squares = */ absl::optional<int>(300));
+
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->InitEncode(&codec_settings_, kSettings));
+
+ VideoBitrateAllocation bitrate_allocation;
+ // Bitrate only enough for TL0.
+ bitrate_allocation.SetBitrate(0, 0, 200000);
+ encoder_->SetRates(
+ VideoEncoder::RateControlParameters(bitrate_allocation, 5.0));
+
+ EncodedImage encoded_frame;
+ CodecSpecificInfo codec_specific_info;
+ EncodeAndWaitForFrame(NextInputFrame(), &encoded_frame, &codec_specific_info,
+ true);
+ EncodeAndExpectFrameWith(NextInputFrame(), 0, true);
+ EncodeAndExpectFrameWith(NextInputFrame(), 0, true);
+ EncodeAndExpectFrameWith(NextInputFrame(), 0, true);
+}
+
+TEST_F(TestVp8Impl, KeepsTimestampOnReencode) {
+ auto* const vpx = new NiceMock<MockLibvpxInterface>();
+ LibvpxVp8Encoder encoder((std::unique_ptr<LibvpxInterface>(vpx)),
+ VP8Encoder::Settings());
+
+ // Settings needed to trigger ScreenshareLayers usage, which is required for
+ // overshoot-drop-reencode logic.
+ codec_settings_.maxBitrate = 1000;
+ codec_settings_.mode = VideoCodecMode::kScreensharing;
+ codec_settings_.VP8()->numberOfTemporalLayers = 2;
+ codec_settings_.legacy_conference_mode = true;
+
+ EXPECT_CALL(*vpx, img_wrap(_, _, _, _, _, _))
+ .WillOnce(Invoke([](vpx_image_t* img, vpx_img_fmt_t fmt, unsigned int d_w,
+ unsigned int d_h, unsigned int stride_align,
+ unsigned char* img_data) {
+ img->fmt = fmt;
+ img->d_w = d_w;
+ img->d_h = d_h;
+ img->img_data = img_data;
+ return img;
+ }));
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder.InitEncode(&codec_settings_,
+ VideoEncoder::Settings(kCapabilities, 1, 1000)));
+ MockEncodedImageCallback callback;
+ encoder.RegisterEncodeCompleteCallback(&callback);
+
+ // Simulate overshoot drop, re-encode: encode function will be called twice
+ // with the same parameters. codec_get_cx_data() will by default return no
+ // image data and be interpreted as drop.
+ EXPECT_CALL(*vpx, codec_encode(_, _, /* pts = */ 0, _, _, _))
+ .Times(2)
+ .WillRepeatedly(Return(vpx_codec_err_t::VPX_CODEC_OK));
+
+ auto delta_frame =
+ std::vector<VideoFrameType>{VideoFrameType::kVideoFrameDelta};
+ encoder.Encode(NextInputFrame(), &delta_frame);
+}
+
+TEST(LibvpxVp8EncoderTest, GetEncoderInfoReturnsStaticInformation) {
+ auto* const vpx = new NiceMock<MockLibvpxInterface>();
+ LibvpxVp8Encoder encoder((std::unique_ptr<LibvpxInterface>(vpx)),
+ VP8Encoder::Settings());
+
+ const auto info = encoder.GetEncoderInfo();
+
+ EXPECT_FALSE(info.supports_native_handle);
+ EXPECT_FALSE(info.is_hardware_accelerated);
+ EXPECT_TRUE(info.supports_simulcast);
+ EXPECT_EQ(info.implementation_name, "libvpx");
+ EXPECT_EQ(info.requested_resolution_alignment, 1u);
+ EXPECT_THAT(info.preferred_pixel_formats,
+ testing::UnorderedElementsAre(VideoFrameBuffer::Type::kNV12,
+ VideoFrameBuffer::Type::kI420));
+}
+
+TEST(LibvpxVp8EncoderTest, RequestedResolutionAlignmentFromFieldTrial) {
+ test::ScopedFieldTrials field_trials(
+ "WebRTC-VP8-GetEncoderInfoOverride/"
+ "requested_resolution_alignment:10/");
+
+ auto* const vpx = new NiceMock<MockLibvpxInterface>();
+ LibvpxVp8Encoder encoder((std::unique_ptr<LibvpxInterface>(vpx)),
+ VP8Encoder::Settings());
+
+ EXPECT_EQ(encoder.GetEncoderInfo().requested_resolution_alignment, 10u);
+ EXPECT_FALSE(
+ encoder.GetEncoderInfo().apply_alignment_to_all_simulcast_layers);
+ EXPECT_TRUE(encoder.GetEncoderInfo().resolution_bitrate_limits.empty());
+}
+
+TEST(LibvpxVp8EncoderTest, ResolutionBitrateLimitsFromFieldTrial) {
+ test::ScopedFieldTrials field_trials(
+ "WebRTC-VP8-GetEncoderInfoOverride/"
+ "frame_size_pixels:123|456|789,"
+ "min_start_bitrate_bps:11000|22000|33000,"
+ "min_bitrate_bps:44000|55000|66000,"
+ "max_bitrate_bps:77000|88000|99000/");
+
+ auto* const vpx = new NiceMock<MockLibvpxInterface>();
+ LibvpxVp8Encoder encoder((std::unique_ptr<LibvpxInterface>(vpx)),
+ VP8Encoder::Settings());
+
+ EXPECT_THAT(
+ encoder.GetEncoderInfo().resolution_bitrate_limits,
+ ::testing::ElementsAre(
+ VideoEncoder::ResolutionBitrateLimits{123, 11000, 44000, 77000},
+ VideoEncoder::ResolutionBitrateLimits{456, 22000, 55000, 88000},
+ VideoEncoder::ResolutionBitrateLimits{789, 33000, 66000, 99000}));
+}
+
+TEST(LibvpxVp8EncoderTest,
+ GetEncoderInfoReturnsEmptyResolutionBitrateLimitsByDefault) {
+ auto* const vpx = new NiceMock<MockLibvpxInterface>();
+ LibvpxVp8Encoder encoder((std::unique_ptr<LibvpxInterface>(vpx)),
+ VP8Encoder::Settings());
+
+ const auto info = encoder.GetEncoderInfo();
+
+ EXPECT_TRUE(info.resolution_bitrate_limits.empty());
+}
+
+TEST(LibvpxVp8EncoderTest,
+ GetEncoderInfoReturnsResolutionBitrateLimitsAsConfigured) {
+ std::vector<VideoEncoder::ResolutionBitrateLimits> resolution_bitrate_limits =
+ {VideoEncoder::ResolutionBitrateLimits(/*frame_size_pixels=*/640 * 360,
+ /*min_start_bitrate_bps=*/300,
+ /*min_bitrate_bps=*/100,
+ /*max_bitrate_bps=*/1000),
+ VideoEncoder::ResolutionBitrateLimits(320 * 180, 100, 30, 500)};
+ VP8Encoder::Settings settings;
+ settings.resolution_bitrate_limits = resolution_bitrate_limits;
+
+ auto* const vpx = new NiceMock<MockLibvpxInterface>();
+ LibvpxVp8Encoder encoder((std::unique_ptr<LibvpxInterface>(vpx)),
+ std::move(settings));
+
+ const auto info = encoder.GetEncoderInfo();
+
+ EXPECT_EQ(info.resolution_bitrate_limits, resolution_bitrate_limits);
+}
+
+TEST_F(TestVp8Impl, GetEncoderInfoFpsAllocationNoLayers) {
+ FramerateFractions expected_fps_allocation[kMaxSpatialLayers] = {
+ FramerateFractions(1, EncoderInfo::kMaxFramerateFraction)};
+
+ EXPECT_THAT(encoder_->GetEncoderInfo().fps_allocation,
+ ::testing::ElementsAreArray(expected_fps_allocation));
+}
+
+TEST_F(TestVp8Impl, GetEncoderInfoFpsAllocationTwoTemporalLayers) {
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Release());
+ codec_settings_.numberOfSimulcastStreams = 1;
+ codec_settings_.simulcastStream[0].active = true;
+ codec_settings_.simulcastStream[0].targetBitrate = 100;
+ codec_settings_.simulcastStream[0].maxBitrate = 100;
+ codec_settings_.simulcastStream[0].numberOfTemporalLayers = 2;
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->InitEncode(&codec_settings_, kSettings));
+
+ FramerateFractions expected_fps_allocation[kMaxSpatialLayers];
+ expected_fps_allocation[0].push_back(EncoderInfo::kMaxFramerateFraction / 2);
+ expected_fps_allocation[0].push_back(EncoderInfo::kMaxFramerateFraction);
+
+ EXPECT_THAT(encoder_->GetEncoderInfo().fps_allocation,
+ ::testing::ElementsAreArray(expected_fps_allocation));
+}
+
+TEST_F(TestVp8Impl, GetEncoderInfoFpsAllocationThreeTemporalLayers) {
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Release());
+ codec_settings_.numberOfSimulcastStreams = 1;
+ codec_settings_.simulcastStream[0].active = true;
+ codec_settings_.simulcastStream[0].targetBitrate = 100;
+ codec_settings_.simulcastStream[0].maxBitrate = 100;
+ codec_settings_.simulcastStream[0].numberOfTemporalLayers = 3;
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->InitEncode(&codec_settings_, kSettings));
+
+ FramerateFractions expected_fps_allocation[kMaxSpatialLayers];
+ expected_fps_allocation[0].push_back(EncoderInfo::kMaxFramerateFraction / 4);
+ expected_fps_allocation[0].push_back(EncoderInfo::kMaxFramerateFraction / 2);
+ expected_fps_allocation[0].push_back(EncoderInfo::kMaxFramerateFraction);
+
+ EXPECT_THAT(encoder_->GetEncoderInfo().fps_allocation,
+ ::testing::ElementsAreArray(expected_fps_allocation));
+}
+
+TEST_F(TestVp8Impl, GetEncoderInfoFpsAllocationScreenshareLayers) {
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Release());
+ codec_settings_.numberOfSimulcastStreams = 1;
+ codec_settings_.mode = VideoCodecMode::kScreensharing;
+ codec_settings_.simulcastStream[0].active = true;
+ codec_settings_.simulcastStream[0].minBitrate = 30;
+ codec_settings_.simulcastStream[0].targetBitrate =
+ kLegacyScreenshareTl0BitrateKbps;
+ codec_settings_.simulcastStream[0].maxBitrate =
+ kLegacyScreenshareTl1BitrateKbps;
+ codec_settings_.simulcastStream[0].numberOfTemporalLayers = 2;
+ codec_settings_.legacy_conference_mode = true;
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->InitEncode(&codec_settings_, kSettings));
+
+ // Expect empty vector, since this mode doesn't have a fixed framerate.
+ FramerateFractions expected_fps_allocation[kMaxSpatialLayers];
+ EXPECT_THAT(encoder_->GetEncoderInfo().fps_allocation,
+ ::testing::ElementsAreArray(expected_fps_allocation));
+}
+
+TEST_F(TestVp8Impl, GetEncoderInfoFpsAllocationSimulcastVideo) {
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Release());
+
+ // Set up three simulcast streams with three temporal layers each.
+ codec_settings_.numberOfSimulcastStreams = 3;
+ for (int i = 0; i < codec_settings_.numberOfSimulcastStreams; ++i) {
+ codec_settings_.simulcastStream[i].active = true;
+ codec_settings_.simulcastStream[i].minBitrate = 30;
+ codec_settings_.simulcastStream[i].targetBitrate = 30;
+ codec_settings_.simulcastStream[i].maxBitrate = 30;
+ codec_settings_.simulcastStream[i].numberOfTemporalLayers = 3;
+ codec_settings_.simulcastStream[i].width =
+ codec_settings_.width >>
+ (codec_settings_.numberOfSimulcastStreams - i - 1);
+ codec_settings_.simulcastStream[i].height =
+ codec_settings_.height >>
+ (codec_settings_.numberOfSimulcastStreams - i - 1);
+ }
+
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->InitEncode(&codec_settings_, kSettings));
+
+ FramerateFractions expected_fps_allocation[kMaxSpatialLayers];
+ expected_fps_allocation[0].push_back(EncoderInfo::kMaxFramerateFraction / 4);
+ expected_fps_allocation[0].push_back(EncoderInfo::kMaxFramerateFraction / 2);
+ expected_fps_allocation[0].push_back(EncoderInfo::kMaxFramerateFraction);
+ expected_fps_allocation[1] = expected_fps_allocation[0];
+ expected_fps_allocation[2] = expected_fps_allocation[0];
+ EXPECT_THAT(encoder_->GetEncoderInfo().fps_allocation,
+ ::testing::ElementsAreArray(expected_fps_allocation));
+
+ // Release encoder and re-init without temporal layers.
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Release());
+
+ // Sanity check fps allocation when not inited.
+ FramerateFractions default_fps_fraction[kMaxSpatialLayers];
+ default_fps_fraction[0].push_back(EncoderInfo::kMaxFramerateFraction);
+ EXPECT_THAT(encoder_->GetEncoderInfo().fps_allocation,
+ ::testing::ElementsAreArray(default_fps_fraction));
+
+ for (int i = 0; i < codec_settings_.numberOfSimulcastStreams; ++i) {
+ codec_settings_.simulcastStream[i].numberOfTemporalLayers = 1;
+ }
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->InitEncode(&codec_settings_, kSettings));
+
+ for (size_t i = 0; i < 3; ++i) {
+ expected_fps_allocation[i].clear();
+ expected_fps_allocation[i].push_back(EncoderInfo::kMaxFramerateFraction);
+ }
+ EXPECT_THAT(encoder_->GetEncoderInfo().fps_allocation,
+ ::testing::ElementsAreArray(expected_fps_allocation));
+}
+
+class TestVp8ImplForPixelFormat
+ : public TestVp8Impl,
+ public ::testing::WithParamInterface<VideoFrameBuffer::Type> {
+ public:
+ TestVp8ImplForPixelFormat() : TestVp8Impl(), mappable_type_(GetParam()) {}
+
+ protected:
+ VideoFrameBuffer::Type mappable_type_;
+};
+
+TEST_P(TestVp8ImplForPixelFormat, EncodeNativeFrameSimulcast) {
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Release());
+
+ // Configure simulcast.
+ codec_settings_.numberOfSimulcastStreams = 3;
+ codec_settings_.simulcastStream[0] = {.width = kWidth / 4,
+ .height = kHeight / 4,
+ .maxFramerate = kFramerateFps,
+ .numberOfTemporalLayers = 1,
+ .maxBitrate = 4000,
+ .targetBitrate = 3000,
+ .minBitrate = 2000,
+ .qpMax = 80,
+ .active = true};
+ codec_settings_.simulcastStream[1] = {.width = kWidth / 2,
+ .height = kHeight / 2,
+ .maxFramerate = kFramerateFps,
+ .numberOfTemporalLayers = 1,
+ .maxBitrate = 4000,
+ .targetBitrate = 3000,
+ .minBitrate = 2000,
+ .qpMax = 80,
+ .active = true};
+ codec_settings_.simulcastStream[2] = {.width = kWidth,
+ .height = kHeight,
+ .maxFramerate = kFramerateFps,
+ .numberOfTemporalLayers = 1,
+ .maxBitrate = 4000,
+ .targetBitrate = 3000,
+ .minBitrate = 2000,
+ .qpMax = 80,
+ .active = true};
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->InitEncode(&codec_settings_, kSettings));
+
+ // Create a zero-conversion NV12 frame (calling ToI420 on it crashes).
+ VideoFrame input_frame =
+ test::CreateMappableNativeFrame(1, mappable_type_, kWidth, kHeight);
+
+ EncodedImage encoded_frame;
+ CodecSpecificInfo codec_specific_info;
+ EncodeAndWaitForFrame(input_frame, &encoded_frame, &codec_specific_info);
+
+ // After encoding, we expect one mapping per simulcast layer.
+ rtc::scoped_refptr<test::MappableNativeBuffer> mappable_buffer =
+ test::GetMappableNativeBufferFromVideoFrame(input_frame);
+ std::vector<rtc::scoped_refptr<VideoFrameBuffer>> mapped_buffers =
+ mappable_buffer->GetMappedFramedBuffers();
+ ASSERT_EQ(mapped_buffers.size(), 3u);
+ EXPECT_EQ(mapped_buffers[0]->type(), mappable_type_);
+ EXPECT_EQ(mapped_buffers[0]->width(), kWidth);
+ EXPECT_EQ(mapped_buffers[0]->height(), kHeight);
+ EXPECT_EQ(mapped_buffers[1]->type(), mappable_type_);
+ EXPECT_EQ(mapped_buffers[1]->width(), kWidth / 2);
+ EXPECT_EQ(mapped_buffers[1]->height(), kHeight / 2);
+ EXPECT_EQ(mapped_buffers[2]->type(), mappable_type_);
+ EXPECT_EQ(mapped_buffers[2]->width(), kWidth / 4);
+ EXPECT_EQ(mapped_buffers[2]->height(), kHeight / 4);
+ EXPECT_FALSE(mappable_buffer->DidConvertToI420());
+
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Release());
+}
+
+INSTANTIATE_TEST_SUITE_P(All,
+ TestVp8ImplForPixelFormat,
+ ::testing::Values(VideoFrameBuffer::Type::kI420,
+ VideoFrameBuffer::Type::kNV12));
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/vp8/vp8_scalability.cc b/third_party/libwebrtc/modules/video_coding/codecs/vp8/vp8_scalability.cc
new file mode 100644
index 0000000000..9c7495ddf7
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/vp8/vp8_scalability.cc
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/codecs/vp8/vp8_scalability.h"
+
+namespace webrtc {
+
+bool VP8SupportsScalabilityMode(ScalabilityMode scalability_mode) {
+ for (const auto& entry : kVP8SupportedScalabilityModes) {
+ if (entry == scalability_mode) {
+ return true;
+ }
+ }
+ return false;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/vp8/vp8_scalability.h b/third_party/libwebrtc/modules/video_coding/codecs/vp8/vp8_scalability.h
new file mode 100644
index 0000000000..923f159118
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/vp8/vp8_scalability.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_CODECS_VP8_VP8_SCALABILITY_H_
+#define MODULES_VIDEO_CODING_CODECS_VP8_VP8_SCALABILITY_H_
+
+#include "api/video_codecs/scalability_mode.h"
+
+namespace webrtc {
+
+inline constexpr ScalabilityMode kVP8SupportedScalabilityModes[] = {
+ ScalabilityMode::kL1T1, ScalabilityMode::kL1T2, ScalabilityMode::kL1T3};
+bool VP8SupportsScalabilityMode(ScalabilityMode scalability_mode);
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_CODECS_VP8_VP8_SCALABILITY_H_
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/vp9/DEPS b/third_party/libwebrtc/modules/video_coding/codecs/vp9/DEPS
new file mode 100644
index 0000000000..cc5cd70142
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/vp9/DEPS
@@ -0,0 +1,3 @@
+include_rules = [
+ "+media/base",
+]
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/vp9/include/vp9.h b/third_party/libwebrtc/modules/video_coding/codecs/vp9/include/vp9.h
new file mode 100644
index 0000000000..79d403ded3
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/vp9/include/vp9.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ *
+ */
+
+#ifndef MODULES_VIDEO_CODING_CODECS_VP9_INCLUDE_VP9_H_
+#define MODULES_VIDEO_CODING_CODECS_VP9_INCLUDE_VP9_H_
+
+#include <memory>
+#include <vector>
+
+#include "api/video_codecs/scalability_mode.h"
+#include "api/video_codecs/sdp_video_format.h"
+#include "media/base/codec.h"
+#include "modules/video_coding/include/video_codec_interface.h"
+
+namespace webrtc {
+
+// Returns a vector with all supported internal VP9 profiles that we can
+// negotiate in SDP, in order of preference.
+std::vector<SdpVideoFormat> SupportedVP9Codecs(
+ bool add_scalability_modes = false);
+
+// Returns a vector with all supported internal VP9 decode profiles in order of
+// preference. These will be availble for receive-only connections.
+std::vector<SdpVideoFormat> SupportedVP9DecoderCodecs();
+
+class VP9Encoder : public VideoEncoder {
+ public:
+ // Deprecated. Returns default implementation using VP9 Profile 0.
+ // TODO(emircan): Remove once this is no longer used.
+ static std::unique_ptr<VP9Encoder> Create();
+ // Parses VP9 Profile from `codec` and returns the appropriate implementation.
+ static std::unique_ptr<VP9Encoder> Create(const cricket::VideoCodec& codec);
+ static bool SupportsScalabilityMode(ScalabilityMode scalability_mode);
+
+ ~VP9Encoder() override {}
+};
+
+class VP9Decoder : public VideoDecoder {
+ public:
+ static std::unique_ptr<VP9Decoder> Create();
+
+ ~VP9Decoder() override {}
+};
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_CODECS_VP9_INCLUDE_VP9_H_
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/vp9/include/vp9_globals.h b/third_party/libwebrtc/modules/video_coding/codecs/vp9/include/vp9_globals.h
new file mode 100644
index 0000000000..f67215ec77
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/vp9/include/vp9_globals.h
@@ -0,0 +1,179 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+// This file contains codec dependent definitions that are needed in
+// order to compile the WebRTC codebase, even if this codec is not used.
+
+#ifndef MODULES_VIDEO_CODING_CODECS_VP9_INCLUDE_VP9_GLOBALS_H_
+#define MODULES_VIDEO_CODING_CODECS_VP9_INCLUDE_VP9_GLOBALS_H_
+
+#include <stdint.h>
+
+#include "modules/video_coding/codecs/interface/common_constants.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+const int16_t kMaxOneBytePictureId = 0x7F; // 7 bits
+const int16_t kMaxTwoBytePictureId = 0x7FFF; // 15 bits
+const uint8_t kNoSpatialIdx = 0xFF;
+const uint8_t kNoGofIdx = 0xFF;
+const uint8_t kNumVp9Buffers = 8;
+const size_t kMaxVp9RefPics = 3;
+const size_t kMaxVp9FramesInGof = 0xFF; // 8 bits
+const size_t kMaxVp9NumberOfSpatialLayers = 8;
+
+const size_t kMinVp9SpatialLayerLongSideLength = 240;
+const size_t kMinVp9SpatialLayerShortSideLength = 135;
+
+enum TemporalStructureMode {
+ kTemporalStructureMode1, // 1 temporal layer structure - i.e., IPPP...
+ kTemporalStructureMode2, // 2 temporal layers 01...
+ kTemporalStructureMode3, // 3 temporal layers 0212...
+};
+
+struct GofInfoVP9 {
+ void SetGofInfoVP9(TemporalStructureMode tm) {
+ switch (tm) {
+ case kTemporalStructureMode1:
+ num_frames_in_gof = 1;
+ temporal_idx[0] = 0;
+ temporal_up_switch[0] = true;
+ num_ref_pics[0] = 1;
+ pid_diff[0][0] = 1;
+ break;
+ case kTemporalStructureMode2:
+ num_frames_in_gof = 2;
+ temporal_idx[0] = 0;
+ temporal_up_switch[0] = true;
+ num_ref_pics[0] = 1;
+ pid_diff[0][0] = 2;
+
+ temporal_idx[1] = 1;
+ temporal_up_switch[1] = true;
+ num_ref_pics[1] = 1;
+ pid_diff[1][0] = 1;
+ break;
+ case kTemporalStructureMode3:
+ num_frames_in_gof = 4;
+ temporal_idx[0] = 0;
+ temporal_up_switch[0] = true;
+ num_ref_pics[0] = 1;
+ pid_diff[0][0] = 4;
+
+ temporal_idx[1] = 2;
+ temporal_up_switch[1] = true;
+ num_ref_pics[1] = 1;
+ pid_diff[1][0] = 1;
+
+ temporal_idx[2] = 1;
+ temporal_up_switch[2] = true;
+ num_ref_pics[2] = 1;
+ pid_diff[2][0] = 2;
+
+ temporal_idx[3] = 2;
+ temporal_up_switch[3] = true;
+ num_ref_pics[3] = 1;
+ pid_diff[3][0] = 1;
+ break;
+ default:
+ RTC_DCHECK_NOTREACHED();
+ }
+ }
+
+ void CopyGofInfoVP9(const GofInfoVP9& src) {
+ num_frames_in_gof = src.num_frames_in_gof;
+ for (size_t i = 0; i < num_frames_in_gof; ++i) {
+ temporal_idx[i] = src.temporal_idx[i];
+ temporal_up_switch[i] = src.temporal_up_switch[i];
+ num_ref_pics[i] = src.num_ref_pics[i];
+ for (uint8_t r = 0; r < num_ref_pics[i]; ++r) {
+ pid_diff[i][r] = src.pid_diff[i][r];
+ }
+ }
+ }
+
+ size_t num_frames_in_gof;
+ uint8_t temporal_idx[kMaxVp9FramesInGof];
+ bool temporal_up_switch[kMaxVp9FramesInGof];
+ uint8_t num_ref_pics[kMaxVp9FramesInGof];
+ uint8_t pid_diff[kMaxVp9FramesInGof][kMaxVp9RefPics];
+ uint16_t pid_start;
+};
+
+struct RTPVideoHeaderVP9 {
+ void InitRTPVideoHeaderVP9() {
+ inter_pic_predicted = false;
+ flexible_mode = false;
+ beginning_of_frame = false;
+ end_of_frame = false;
+ ss_data_available = false;
+ non_ref_for_inter_layer_pred = false;
+ picture_id = kNoPictureId;
+ max_picture_id = kMaxTwoBytePictureId;
+ tl0_pic_idx = kNoTl0PicIdx;
+ temporal_idx = kNoTemporalIdx;
+ spatial_idx = kNoSpatialIdx;
+ temporal_up_switch = false;
+ inter_layer_predicted = false;
+ gof_idx = kNoGofIdx;
+ num_ref_pics = 0;
+ num_spatial_layers = 1;
+ first_active_layer = 0;
+ end_of_picture = true;
+ }
+
+ bool inter_pic_predicted; // This layer frame is dependent on previously
+ // coded frame(s).
+ bool flexible_mode; // This frame is in flexible mode.
+ bool beginning_of_frame; // True if this packet is the first in a VP9 layer
+ // frame.
+ bool end_of_frame; // True if this packet is the last in a VP9 layer frame.
+ bool ss_data_available; // True if SS data is available in this payload
+ // descriptor.
+ bool non_ref_for_inter_layer_pred; // True for frame which is not used as
+ // reference for inter-layer prediction.
+ int16_t picture_id; // PictureID index, 15 bits;
+ // kNoPictureId if PictureID does not exist.
+ int16_t max_picture_id; // Maximum picture ID index; either 0x7F or 0x7FFF;
+ int16_t tl0_pic_idx; // TL0PIC_IDX, 8 bits;
+ // kNoTl0PicIdx means no value provided.
+ uint8_t temporal_idx; // Temporal layer index, or kNoTemporalIdx.
+ uint8_t spatial_idx; // Spatial layer index, or kNoSpatialIdx.
+ bool temporal_up_switch; // True if upswitch to higher frame rate is possible
+ // meaning subsequent higher temporal layer pictures
+ // will not depend on any picture before the current
+ // picture (in coding order) with temporal layer ID
+ // greater than `temporal_idx` of this frame.
+ bool inter_layer_predicted; // Frame is dependent on directly lower spatial
+ // layer frame.
+
+ uint8_t gof_idx; // Index to predefined temporal frame info in SS data.
+
+ uint8_t num_ref_pics; // Number of reference pictures used by this layer
+ // frame.
+ uint8_t pid_diff[kMaxVp9RefPics]; // P_DIFF signaled to derive the PictureID
+ // of the reference pictures.
+ int16_t ref_picture_id[kMaxVp9RefPics]; // PictureID of reference pictures.
+
+ // SS data.
+ size_t num_spatial_layers; // Always populated.
+ size_t first_active_layer; // Not sent on wire, used to adjust ss data.
+ bool spatial_layer_resolution_present;
+ uint16_t width[kMaxVp9NumberOfSpatialLayers];
+ uint16_t height[kMaxVp9NumberOfSpatialLayers];
+ GofInfoVP9 gof;
+
+ bool end_of_picture; // This frame is the last frame in picture.
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_CODECS_VP9_INCLUDE_VP9_GLOBALS_H_
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/vp9/libvpx_vp9_decoder.cc b/third_party/libwebrtc/modules/video_coding/codecs/vp9/libvpx_vp9_decoder.cc
new file mode 100644
index 0000000000..a981f259cf
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/vp9/libvpx_vp9_decoder.cc
@@ -0,0 +1,403 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ *
+ */
+
+#ifdef RTC_ENABLE_VP9
+
+#include "modules/video_coding/codecs/vp9/libvpx_vp9_decoder.h"
+
+#include <algorithm>
+
+#include "absl/strings/match.h"
+#include "api/transport/field_trial_based_config.h"
+#include "api/video/color_space.h"
+#include "api/video/i010_buffer.h"
+#include "common_video/include/video_frame_buffer.h"
+#include "modules/video_coding/utility/vp9_uncompressed_header_parser.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "third_party/libyuv/include/libyuv/convert.h"
+#include "vpx/vp8dx.h"
+#include "vpx/vpx_decoder.h"
+
+namespace webrtc {
+namespace {
+
+// Helper class for extracting VP9 colorspace.
+ColorSpace ExtractVP9ColorSpace(vpx_color_space_t space_t,
+ vpx_color_range_t range_t,
+ unsigned int bit_depth) {
+ ColorSpace::PrimaryID primaries = ColorSpace::PrimaryID::kUnspecified;
+ ColorSpace::TransferID transfer = ColorSpace::TransferID::kUnspecified;
+ ColorSpace::MatrixID matrix = ColorSpace::MatrixID::kUnspecified;
+ switch (space_t) {
+ case VPX_CS_BT_601:
+ case VPX_CS_SMPTE_170:
+ primaries = ColorSpace::PrimaryID::kSMPTE170M;
+ transfer = ColorSpace::TransferID::kSMPTE170M;
+ matrix = ColorSpace::MatrixID::kSMPTE170M;
+ break;
+ case VPX_CS_SMPTE_240:
+ primaries = ColorSpace::PrimaryID::kSMPTE240M;
+ transfer = ColorSpace::TransferID::kSMPTE240M;
+ matrix = ColorSpace::MatrixID::kSMPTE240M;
+ break;
+ case VPX_CS_BT_709:
+ primaries = ColorSpace::PrimaryID::kBT709;
+ transfer = ColorSpace::TransferID::kBT709;
+ matrix = ColorSpace::MatrixID::kBT709;
+ break;
+ case VPX_CS_BT_2020:
+ primaries = ColorSpace::PrimaryID::kBT2020;
+ switch (bit_depth) {
+ case 8:
+ transfer = ColorSpace::TransferID::kBT709;
+ break;
+ case 10:
+ transfer = ColorSpace::TransferID::kBT2020_10;
+ break;
+ default:
+ RTC_DCHECK_NOTREACHED();
+ break;
+ }
+ matrix = ColorSpace::MatrixID::kBT2020_NCL;
+ break;
+ case VPX_CS_SRGB:
+ primaries = ColorSpace::PrimaryID::kBT709;
+ transfer = ColorSpace::TransferID::kIEC61966_2_1;
+ matrix = ColorSpace::MatrixID::kBT709;
+ break;
+ default:
+ break;
+ }
+
+ ColorSpace::RangeID range = ColorSpace::RangeID::kInvalid;
+ switch (range_t) {
+ case VPX_CR_STUDIO_RANGE:
+ range = ColorSpace::RangeID::kLimited;
+ break;
+ case VPX_CR_FULL_RANGE:
+ range = ColorSpace::RangeID::kFull;
+ break;
+ default:
+ break;
+ }
+ return ColorSpace(primaries, transfer, matrix, range);
+}
+
+} // namespace
+
+LibvpxVp9Decoder::LibvpxVp9Decoder()
+ : decode_complete_callback_(nullptr),
+ inited_(false),
+ decoder_(nullptr),
+ key_frame_required_(true) {}
+
+LibvpxVp9Decoder::~LibvpxVp9Decoder() {
+ inited_ = true; // in order to do the actual release
+ Release();
+ int num_buffers_in_use = libvpx_buffer_pool_.GetNumBuffersInUse();
+ if (num_buffers_in_use > 0) {
+ // The frame buffers are reference counted and frames are exposed after
+ // decoding. There may be valid usage cases where previous frames are still
+ // referenced after ~LibvpxVp9Decoder that is not a leak.
+ RTC_LOG(LS_INFO) << num_buffers_in_use
+ << " Vp9FrameBuffers are still "
+ "referenced during ~LibvpxVp9Decoder.";
+ }
+}
+
+bool LibvpxVp9Decoder::Configure(const Settings& settings) {
+ if (Release() < 0) {
+ return false;
+ }
+
+ if (decoder_ == nullptr) {
+ decoder_ = new vpx_codec_ctx_t;
+ memset(decoder_, 0, sizeof(*decoder_));
+ }
+ vpx_codec_dec_cfg_t cfg;
+ memset(&cfg, 0, sizeof(cfg));
+
+#ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
+ // We focus on webrtc fuzzing here, not libvpx itself. Use single thread for
+ // fuzzing, because:
+ // - libvpx's VP9 single thread decoder is more fuzzer friendly. It detects
+ // errors earlier than the multi-threads version.
+ // - Make peak CPU usage under control (not depending on input)
+ cfg.threads = 1;
+#else
+ const RenderResolution& resolution = settings.max_render_resolution();
+ if (!resolution.Valid()) {
+ // Postpone configuring number of threads until resolution is known.
+ cfg.threads = 1;
+ } else {
+ // We want to use multithreading when decoding high resolution videos. But
+ // not too many in order to avoid overhead when many stream are decoded
+ // concurrently.
+ // Set 2 thread as target for 1280x720 pixel count, and then scale up
+ // linearly from there - but cap at physical core count.
+ // For common resolutions this results in:
+ // 1 for 360p
+ // 2 for 720p
+ // 4 for 1080p
+ // 8 for 1440p
+ // 18 for 4K
+ int num_threads = std::max(
+ 1, 2 * resolution.Width() * resolution.Height() / (1280 * 720));
+ cfg.threads = std::min(settings.number_of_cores(), num_threads);
+ }
+#endif
+
+ current_settings_ = settings;
+
+ vpx_codec_flags_t flags = 0;
+ if (vpx_codec_dec_init(decoder_, vpx_codec_vp9_dx(), &cfg, flags)) {
+ return false;
+ }
+
+ if (!libvpx_buffer_pool_.InitializeVpxUsePool(decoder_)) {
+ return false;
+ }
+
+ inited_ = true;
+ // Always start with a complete key frame.
+ key_frame_required_ = true;
+ if (absl::optional<int> buffer_pool_size = settings.buffer_pool_size()) {
+ if (!libvpx_buffer_pool_.Resize(*buffer_pool_size)) {
+ return false;
+ }
+ }
+
+ vpx_codec_err_t status =
+ vpx_codec_control(decoder_, VP9D_SET_LOOP_FILTER_OPT, 1);
+ if (status != VPX_CODEC_OK) {
+ RTC_LOG(LS_ERROR) << "Failed to enable VP9D_SET_LOOP_FILTER_OPT. "
+ << vpx_codec_error(decoder_);
+ return false;
+ }
+
+ return true;
+}
+
+int LibvpxVp9Decoder::Decode(const EncodedImage& input_image,
+ bool missing_frames,
+ int64_t /*render_time_ms*/) {
+ if (!inited_) {
+ return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
+ }
+ if (decode_complete_callback_ == nullptr) {
+ return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
+ }
+
+ if (input_image._frameType == VideoFrameType::kVideoFrameKey) {
+ absl::optional<Vp9UncompressedHeader> frame_info =
+ ParseUncompressedVp9Header(
+ rtc::MakeArrayView(input_image.data(), input_image.size()));
+ if (frame_info) {
+ RenderResolution frame_resolution(frame_info->frame_width,
+ frame_info->frame_height);
+ if (frame_resolution != current_settings_.max_render_resolution()) {
+ // Resolution has changed, tear down and re-init a new decoder in
+ // order to get correct sizing.
+ Release();
+ current_settings_.set_max_render_resolution(frame_resolution);
+ if (!Configure(current_settings_)) {
+ RTC_LOG(LS_WARNING) << "Failed to re-init decoder.";
+ return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
+ }
+ }
+ } else {
+ RTC_LOG(LS_WARNING) << "Failed to parse VP9 header from key-frame.";
+ }
+ }
+
+ // Always start with a complete key frame.
+ if (key_frame_required_) {
+ if (input_image._frameType != VideoFrameType::kVideoFrameKey)
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ key_frame_required_ = false;
+ }
+ vpx_codec_iter_t iter = nullptr;
+ vpx_image_t* img;
+ const uint8_t* buffer = input_image.data();
+ if (input_image.size() == 0) {
+ buffer = nullptr; // Triggers full frame concealment.
+ }
+ // During decode libvpx may get and release buffers from
+ // `libvpx_buffer_pool_`. In practice libvpx keeps a few (~3-4) buffers alive
+ // at a time.
+ if (vpx_codec_decode(decoder_, buffer,
+ static_cast<unsigned int>(input_image.size()), 0,
+ VPX_DL_REALTIME)) {
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+ // `img->fb_priv` contains the image data, a reference counted Vp9FrameBuffer.
+ // It may be released by libvpx during future vpx_codec_decode or
+ // vpx_codec_destroy calls.
+ img = vpx_codec_get_frame(decoder_, &iter);
+ int qp;
+ vpx_codec_err_t vpx_ret =
+ vpx_codec_control(decoder_, VPXD_GET_LAST_QUANTIZER, &qp);
+ RTC_DCHECK_EQ(vpx_ret, VPX_CODEC_OK);
+ int ret =
+ ReturnFrame(img, input_image.Timestamp(), qp, input_image.ColorSpace());
+ if (ret != 0) {
+ return ret;
+ }
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+int LibvpxVp9Decoder::ReturnFrame(
+ const vpx_image_t* img,
+ uint32_t timestamp,
+ int qp,
+ const webrtc::ColorSpace* explicit_color_space) {
+ if (img == nullptr) {
+ // Decoder OK and nullptr image => No show frame.
+ return WEBRTC_VIDEO_CODEC_NO_OUTPUT;
+ }
+
+ // This buffer contains all of `img`'s image data, a reference counted
+ // Vp9FrameBuffer. (libvpx is done with the buffers after a few
+ // vpx_codec_decode calls or vpx_codec_destroy).
+ rtc::scoped_refptr<Vp9FrameBufferPool::Vp9FrameBuffer> img_buffer(
+ static_cast<Vp9FrameBufferPool::Vp9FrameBuffer*>(img->fb_priv));
+
+ // The buffer can be used directly by the VideoFrame (without copy) by
+ // using a Wrapped*Buffer.
+ rtc::scoped_refptr<VideoFrameBuffer> img_wrapped_buffer;
+ switch (img->fmt) {
+ case VPX_IMG_FMT_I420:
+ img_wrapped_buffer = WrapI420Buffer(
+ img->d_w, img->d_h, img->planes[VPX_PLANE_Y],
+ img->stride[VPX_PLANE_Y], img->planes[VPX_PLANE_U],
+ img->stride[VPX_PLANE_U], img->planes[VPX_PLANE_V],
+ img->stride[VPX_PLANE_V],
+ // WrappedI420Buffer's mechanism for allowing the release of its
+ // frame buffer is through a callback function. This is where we
+ // should release `img_buffer`.
+ [img_buffer] {});
+ break;
+ case VPX_IMG_FMT_I422:
+ img_wrapped_buffer = WrapI422Buffer(
+ img->d_w, img->d_h, img->planes[VPX_PLANE_Y],
+ img->stride[VPX_PLANE_Y], img->planes[VPX_PLANE_U],
+ img->stride[VPX_PLANE_U], img->planes[VPX_PLANE_V],
+ img->stride[VPX_PLANE_V],
+ // WrappedI444Buffer's mechanism for allowing the release of its
+ // frame buffer is through a callback function. This is where we
+ // should release `img_buffer`.
+ [img_buffer] {});
+ break;
+ case VPX_IMG_FMT_I444:
+ img_wrapped_buffer = WrapI444Buffer(
+ img->d_w, img->d_h, img->planes[VPX_PLANE_Y],
+ img->stride[VPX_PLANE_Y], img->planes[VPX_PLANE_U],
+ img->stride[VPX_PLANE_U], img->planes[VPX_PLANE_V],
+ img->stride[VPX_PLANE_V],
+ // WrappedI444Buffer's mechanism for allowing the release of its
+ // frame buffer is through a callback function. This is where we
+ // should release `img_buffer`.
+ [img_buffer] {});
+ break;
+ case VPX_IMG_FMT_I42016:
+ img_wrapped_buffer = WrapI010Buffer(
+ img->d_w, img->d_h,
+ reinterpret_cast<const uint16_t*>(img->planes[VPX_PLANE_Y]),
+ img->stride[VPX_PLANE_Y] / 2,
+ reinterpret_cast<const uint16_t*>(img->planes[VPX_PLANE_U]),
+ img->stride[VPX_PLANE_U] / 2,
+ reinterpret_cast<const uint16_t*>(img->planes[VPX_PLANE_V]),
+ img->stride[VPX_PLANE_V] / 2, [img_buffer] {});
+ break;
+ case VPX_IMG_FMT_I42216:
+ img_wrapped_buffer = WrapI210Buffer(
+ img->d_w, img->d_h,
+ reinterpret_cast<const uint16_t*>(img->planes[VPX_PLANE_Y]),
+ img->stride[VPX_PLANE_Y] / 2,
+ reinterpret_cast<const uint16_t*>(img->planes[VPX_PLANE_U]),
+ img->stride[VPX_PLANE_U] / 2,
+ reinterpret_cast<const uint16_t*>(img->planes[VPX_PLANE_V]),
+ img->stride[VPX_PLANE_V] / 2, [img_buffer] {});
+ break;
+ case VPX_IMG_FMT_I44416:
+ img_wrapped_buffer = WrapI410Buffer(
+ img->d_w, img->d_h,
+ reinterpret_cast<const uint16_t*>(img->planes[VPX_PLANE_Y]),
+ img->stride[VPX_PLANE_Y] / 2,
+ reinterpret_cast<const uint16_t*>(img->planes[VPX_PLANE_U]),
+ img->stride[VPX_PLANE_U] / 2,
+ reinterpret_cast<const uint16_t*>(img->planes[VPX_PLANE_V]),
+ img->stride[VPX_PLANE_V] / 2, [img_buffer] {});
+ break;
+ default:
+ RTC_LOG(LS_ERROR) << "Unsupported pixel format produced by the decoder: "
+ << static_cast<int>(img->fmt);
+ return WEBRTC_VIDEO_CODEC_NO_OUTPUT;
+ }
+
+ auto builder = VideoFrame::Builder()
+ .set_video_frame_buffer(img_wrapped_buffer)
+ .set_timestamp_rtp(timestamp);
+ if (explicit_color_space) {
+ builder.set_color_space(*explicit_color_space);
+ } else {
+ builder.set_color_space(
+ ExtractVP9ColorSpace(img->cs, img->range, img->bit_depth));
+ }
+ VideoFrame decoded_image = builder.build();
+
+ decode_complete_callback_->Decoded(decoded_image, absl::nullopt, qp);
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+int LibvpxVp9Decoder::RegisterDecodeCompleteCallback(
+ DecodedImageCallback* callback) {
+ decode_complete_callback_ = callback;
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+int LibvpxVp9Decoder::Release() {
+ int ret_val = WEBRTC_VIDEO_CODEC_OK;
+
+ if (decoder_ != nullptr) {
+ if (inited_) {
+ // When a codec is destroyed libvpx will release any buffers of
+ // `libvpx_buffer_pool_` it is currently using.
+ if (vpx_codec_destroy(decoder_)) {
+ ret_val = WEBRTC_VIDEO_CODEC_MEMORY;
+ }
+ }
+ delete decoder_;
+ decoder_ = nullptr;
+ }
+ // Releases buffers from the pool. Any buffers not in use are deleted. Buffers
+ // still referenced externally are deleted once fully released, not returning
+ // to the pool.
+ libvpx_buffer_pool_.ClearPool();
+ inited_ = false;
+ return ret_val;
+}
+
+VideoDecoder::DecoderInfo LibvpxVp9Decoder::GetDecoderInfo() const {
+ DecoderInfo info;
+ info.implementation_name = "libvpx";
+ info.is_hardware_accelerated = false;
+ return info;
+}
+
+const char* LibvpxVp9Decoder::ImplementationName() const {
+ return "libvpx";
+}
+
+} // namespace webrtc
+
+#endif // RTC_ENABLE_VP9
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/vp9/libvpx_vp9_decoder.h b/third_party/libwebrtc/modules/video_coding/codecs/vp9/libvpx_vp9_decoder.h
new file mode 100644
index 0000000000..65fc553b82
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/vp9/libvpx_vp9_decoder.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ *
+ */
+
+#ifndef MODULES_VIDEO_CODING_CODECS_VP9_LIBVPX_VP9_DECODER_H_
+#define MODULES_VIDEO_CODING_CODECS_VP9_LIBVPX_VP9_DECODER_H_
+
+#ifdef RTC_ENABLE_VP9
+
+#include "api/video_codecs/video_decoder.h"
+#include "modules/video_coding/codecs/vp9/include/vp9.h"
+#include "modules/video_coding/codecs/vp9/vp9_frame_buffer_pool.h"
+#include "vpx/vp8cx.h"
+
+namespace webrtc {
+
+class LibvpxVp9Decoder : public VP9Decoder {
+ public:
+ LibvpxVp9Decoder();
+ virtual ~LibvpxVp9Decoder();
+
+ bool Configure(const Settings& settings) override;
+
+ int Decode(const EncodedImage& input_image,
+ bool missing_frames,
+ int64_t /*render_time_ms*/) override;
+
+ int RegisterDecodeCompleteCallback(DecodedImageCallback* callback) override;
+
+ int Release() override;
+
+ DecoderInfo GetDecoderInfo() const override;
+ const char* ImplementationName() const override;
+
+ private:
+ int ReturnFrame(const vpx_image_t* img,
+ uint32_t timestamp,
+ int qp,
+ const webrtc::ColorSpace* explicit_color_space);
+
+ // Memory pool used to share buffers between libvpx and webrtc.
+ Vp9FrameBufferPool libvpx_buffer_pool_;
+ DecodedImageCallback* decode_complete_callback_;
+ bool inited_;
+ vpx_codec_ctx_t* decoder_;
+ bool key_frame_required_;
+ Settings current_settings_;
+};
+} // namespace webrtc
+
+#endif // RTC_ENABLE_VP9
+
+#endif // MODULES_VIDEO_CODING_CODECS_VP9_LIBVPX_VP9_DECODER_H_
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/vp9/libvpx_vp9_encoder.cc b/third_party/libwebrtc/modules/video_coding/codecs/vp9/libvpx_vp9_encoder.cc
new file mode 100644
index 0000000000..5877373b76
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/vp9/libvpx_vp9_encoder.cc
@@ -0,0 +1,2194 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ *
+ */
+
+#include <memory>
+#ifdef RTC_ENABLE_VP9
+
+#include <algorithm>
+#include <limits>
+#include <tuple>
+#include <utility>
+#include <vector>
+
+#include "absl/algorithm/container.h"
+#include "absl/memory/memory.h"
+#include "absl/strings/match.h"
+#include "absl/types/optional.h"
+#include "api/video/color_space.h"
+#include "api/video/i010_buffer.h"
+#include "api/video_codecs/scalability_mode.h"
+#include "common_video/include/video_frame_buffer.h"
+#include "common_video/libyuv/include/webrtc_libyuv.h"
+#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
+#include "modules/video_coding/codecs/vp9/libvpx_vp9_encoder.h"
+#include "modules/video_coding/svc/create_scalability_structure.h"
+#include "modules/video_coding/svc/scalability_mode_util.h"
+#include "modules/video_coding/svc/scalable_video_controller.h"
+#include "modules/video_coding/svc/scalable_video_controller_no_layering.h"
+#include "modules/video_coding/svc/svc_rate_allocator.h"
+#include "modules/video_coding/utility/vp9_uncompressed_header_parser.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/experiments/field_trial_list.h"
+#include "rtc_base/experiments/field_trial_parser.h"
+#include "rtc_base/experiments/rate_control_settings.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/strings/string_builder.h"
+#include "rtc_base/time_utils.h"
+#include "rtc_base/trace_event.h"
+#include "third_party/libyuv/include/libyuv/convert.h"
+#include "vpx/vp8cx.h"
+#include "vpx/vpx_encoder.h"
+
+namespace webrtc {
+
+namespace {
+// Maps from gof_idx to encoder internal reference frame buffer index. These
+// maps work for 1,2 and 3 temporal layers with GOF length of 1,2 and 4 frames.
+uint8_t kRefBufIdx[4] = {0, 0, 0, 1};
+uint8_t kUpdBufIdx[4] = {0, 0, 1, 0};
+
+// Maximum allowed PID difference for differnet per-layer frame-rate case.
+const int kMaxAllowedPidDiff = 30;
+
+// TODO(ilink): Tune these thresholds further.
+// Selected using ConverenceMotion_1280_720_50.yuv clip.
+// No toggling observed on any link capacity from 100-2000kbps.
+// HD was reached consistently when link capacity was 1500kbps.
+// Set resolutions are a bit more conservative than svc_config.cc sets, e.g.
+// for 300kbps resolution converged to 270p instead of 360p.
+constexpr int kLowVp9QpThreshold = 149;
+constexpr int kHighVp9QpThreshold = 205;
+
+std::pair<size_t, size_t> GetActiveLayers(
+ const VideoBitrateAllocation& allocation) {
+ for (size_t sl_idx = 0; sl_idx < kMaxSpatialLayers; ++sl_idx) {
+ if (allocation.GetSpatialLayerSum(sl_idx) > 0) {
+ size_t last_layer = sl_idx + 1;
+ while (last_layer < kMaxSpatialLayers &&
+ allocation.GetSpatialLayerSum(last_layer) > 0) {
+ ++last_layer;
+ }
+ return std::make_pair(sl_idx, last_layer);
+ }
+ }
+ return {0, 0};
+}
+
+using Vp9ScalabilityStructure =
+ std::tuple<std::unique_ptr<ScalableVideoController>, ScalabilityMode>;
+absl::optional<Vp9ScalabilityStructure> CreateVp9ScalabilityStructure(
+ const VideoCodec& codec) {
+ int num_spatial_layers = codec.VP9().numberOfSpatialLayers;
+ int num_temporal_layers =
+ std::max(1, int{codec.VP9().numberOfTemporalLayers});
+ if (num_spatial_layers == 1 && num_temporal_layers == 1) {
+ return absl::make_optional<Vp9ScalabilityStructure>(
+ std::make_unique<ScalableVideoControllerNoLayering>(),
+ ScalabilityMode::kL1T1);
+ }
+
+ char name[20];
+ rtc::SimpleStringBuilder ss(name);
+ if (codec.mode == VideoCodecMode::kScreensharing) {
+ // TODO(bugs.webrtc.org/11999): Compose names of the structures when they
+ // are implemented.
+ return absl::nullopt;
+ } else if (codec.VP9().interLayerPred == InterLayerPredMode::kOn ||
+ num_spatial_layers == 1) {
+ ss << "L" << num_spatial_layers << "T" << num_temporal_layers;
+ } else if (codec.VP9().interLayerPred == InterLayerPredMode::kOnKeyPic) {
+ ss << "L" << num_spatial_layers << "T" << num_temporal_layers << "_KEY";
+ } else {
+ RTC_DCHECK_EQ(codec.VP9().interLayerPred, InterLayerPredMode::kOff);
+ ss << "S" << num_spatial_layers << "T" << num_temporal_layers;
+ }
+
+ // Check spatial ratio.
+ if (num_spatial_layers > 1 && codec.spatialLayers[0].targetBitrate > 0) {
+ if (codec.width != codec.spatialLayers[num_spatial_layers - 1].width ||
+ codec.height != codec.spatialLayers[num_spatial_layers - 1].height) {
+ RTC_LOG(LS_WARNING)
+ << "Top layer resolution expected to match overall resolution";
+ return absl::nullopt;
+ }
+ // Check if the ratio is one of the supported.
+ int numerator;
+ int denominator;
+ if (codec.spatialLayers[1].width == 2 * codec.spatialLayers[0].width) {
+ numerator = 1;
+ denominator = 2;
+ // no suffix for 1:2 ratio.
+ } else if (2 * codec.spatialLayers[1].width ==
+ 3 * codec.spatialLayers[0].width) {
+ numerator = 2;
+ denominator = 3;
+ ss << "h";
+ } else {
+ RTC_LOG(LS_WARNING) << "Unsupported scalability ratio "
+ << codec.spatialLayers[0].width << ":"
+ << codec.spatialLayers[1].width;
+ return absl::nullopt;
+ }
+ // Validate ratio is consistent for all spatial layer transitions.
+ for (int sid = 1; sid < num_spatial_layers; ++sid) {
+ if (codec.spatialLayers[sid].width * numerator !=
+ codec.spatialLayers[sid - 1].width * denominator ||
+ codec.spatialLayers[sid].height * numerator !=
+ codec.spatialLayers[sid - 1].height * denominator) {
+ RTC_LOG(LS_WARNING) << "Inconsistent scalability ratio " << numerator
+ << ":" << denominator;
+ return absl::nullopt;
+ }
+ }
+ }
+
+ absl::optional<ScalabilityMode> scalability_mode =
+ ScalabilityModeFromString(name);
+ if (!scalability_mode.has_value()) {
+ RTC_LOG(LS_WARNING) << "Invalid scalability mode " << name;
+ return absl::nullopt;
+ }
+ auto scalability_structure_controller =
+ CreateScalabilityStructure(*scalability_mode);
+ if (scalability_structure_controller == nullptr) {
+ RTC_LOG(LS_WARNING) << "Unsupported scalability structure " << name;
+ } else {
+ RTC_LOG(LS_INFO) << "Created scalability structure " << name;
+ }
+ return absl::make_optional<Vp9ScalabilityStructure>(
+ std::move(scalability_structure_controller), *scalability_mode);
+}
+
+vpx_svc_ref_frame_config_t Vp9References(
+ rtc::ArrayView<const ScalableVideoController::LayerFrameConfig> layers) {
+ vpx_svc_ref_frame_config_t ref_config = {};
+ for (const ScalableVideoController::LayerFrameConfig& layer_frame : layers) {
+ const auto& buffers = layer_frame.Buffers();
+ RTC_DCHECK_LE(buffers.size(), 3);
+ int sid = layer_frame.SpatialId();
+ if (!buffers.empty()) {
+ ref_config.lst_fb_idx[sid] = buffers[0].id;
+ ref_config.reference_last[sid] = buffers[0].referenced;
+ if (buffers[0].updated) {
+ ref_config.update_buffer_slot[sid] |= (1 << buffers[0].id);
+ }
+ }
+ if (buffers.size() > 1) {
+ ref_config.gld_fb_idx[sid] = buffers[1].id;
+ ref_config.reference_golden[sid] = buffers[1].referenced;
+ if (buffers[1].updated) {
+ ref_config.update_buffer_slot[sid] |= (1 << buffers[1].id);
+ }
+ }
+ if (buffers.size() > 2) {
+ ref_config.alt_fb_idx[sid] = buffers[2].id;
+ ref_config.reference_alt_ref[sid] = buffers[2].referenced;
+ if (buffers[2].updated) {
+ ref_config.update_buffer_slot[sid] |= (1 << buffers[2].id);
+ }
+ }
+ }
+ // TODO(bugs.webrtc.org/11999): Fill ref_config.duration
+ return ref_config;
+}
+
+bool AllowDenoising() {
+ // Do not enable the denoiser on ARM since optimization is pending.
+ // Denoiser is on by default on other platforms.
+#if !defined(WEBRTC_ARCH_ARM) && !defined(WEBRTC_ARCH_ARM64) && \
+ !defined(ANDROID)
+ return true;
+#else
+ return false;
+#endif
+}
+
+} // namespace
+
+void LibvpxVp9Encoder::EncoderOutputCodedPacketCallback(vpx_codec_cx_pkt* pkt,
+ void* user_data) {
+ LibvpxVp9Encoder* enc = static_cast<LibvpxVp9Encoder*>(user_data);
+ enc->GetEncodedLayerFrame(pkt);
+}
+
+LibvpxVp9Encoder::LibvpxVp9Encoder(const cricket::VideoCodec& codec,
+ std::unique_ptr<LibvpxInterface> interface,
+ const FieldTrialsView& trials)
+ : libvpx_(std::move(interface)),
+ encoded_image_(),
+ encoded_complete_callback_(nullptr),
+ profile_(
+ ParseSdpForVP9Profile(codec.params).value_or(VP9Profile::kProfile0)),
+ inited_(false),
+ timestamp_(0),
+ rc_max_intra_target_(0),
+ encoder_(nullptr),
+ config_(nullptr),
+ raw_(nullptr),
+ input_image_(nullptr),
+ force_key_frame_(true),
+ pics_since_key_(0),
+ num_temporal_layers_(0),
+ num_spatial_layers_(0),
+ num_active_spatial_layers_(0),
+ first_active_layer_(0),
+ layer_deactivation_requires_key_frame_(absl::StartsWith(
+ trials.Lookup("WebRTC-Vp9IssueKeyFrameOnLayerDeactivation"),
+ "Enabled")),
+ is_svc_(false),
+ inter_layer_pred_(InterLayerPredMode::kOn),
+ external_ref_control_(false), // Set in InitEncode because of tests.
+ trusted_rate_controller_(
+ RateControlSettings::ParseFromKeyValueConfig(&trials)
+ .LibvpxVp9TrustedRateController()),
+ layer_buffering_(false),
+ full_superframe_drop_(true),
+ first_frame_in_picture_(true),
+ ss_info_needed_(false),
+ force_all_active_layers_(false),
+ num_cores_(0),
+ is_flexible_mode_(false),
+ variable_framerate_experiment_(ParseVariableFramerateConfig(trials)),
+ variable_framerate_controller_(
+ variable_framerate_experiment_.framerate_limit),
+ quality_scaler_experiment_(ParseQualityScalerConfig(trials)),
+ external_ref_ctrl_(
+ !absl::StartsWith(trials.Lookup("WebRTC-Vp9ExternalRefCtrl"),
+ "Disabled")),
+ performance_flags_(ParsePerformanceFlagsFromTrials(trials)),
+ num_steady_state_frames_(0),
+ config_changed_(true) {
+ codec_ = {};
+ memset(&svc_params_, 0, sizeof(vpx_svc_extra_cfg_t));
+}
+
+LibvpxVp9Encoder::~LibvpxVp9Encoder() {
+ Release();
+}
+
+void LibvpxVp9Encoder::SetFecControllerOverride(FecControllerOverride*) {
+ // Ignored.
+}
+
+int LibvpxVp9Encoder::Release() {
+ int ret_val = WEBRTC_VIDEO_CODEC_OK;
+
+ if (encoder_ != nullptr) {
+ if (inited_) {
+ if (libvpx_->codec_destroy(encoder_)) {
+ ret_val = WEBRTC_VIDEO_CODEC_MEMORY;
+ }
+ }
+ delete encoder_;
+ encoder_ = nullptr;
+ }
+ if (config_ != nullptr) {
+ delete config_;
+ config_ = nullptr;
+ }
+ if (raw_ != nullptr) {
+ libvpx_->img_free(raw_);
+ raw_ = nullptr;
+ }
+ inited_ = false;
+ return ret_val;
+}
+
+bool LibvpxVp9Encoder::ExplicitlyConfiguredSpatialLayers() const {
+ // We check target_bitrate_bps of the 0th layer to see if the spatial layers
+ // (i.e. bitrates) were explicitly configured.
+ return codec_.spatialLayers[0].targetBitrate > 0;
+}
+
+bool LibvpxVp9Encoder::SetSvcRates(
+ const VideoBitrateAllocation& bitrate_allocation) {
+ std::pair<size_t, size_t> current_layers =
+ GetActiveLayers(current_bitrate_allocation_);
+ std::pair<size_t, size_t> new_layers = GetActiveLayers(bitrate_allocation);
+
+ const bool layer_activation_requires_key_frame =
+ inter_layer_pred_ == InterLayerPredMode::kOff ||
+ inter_layer_pred_ == InterLayerPredMode::kOnKeyPic;
+ const bool lower_layers_enabled = new_layers.first < current_layers.first;
+ const bool higher_layers_enabled = new_layers.second > current_layers.second;
+ const bool disabled_layers = new_layers.first > current_layers.first ||
+ new_layers.second < current_layers.second;
+
+ if (lower_layers_enabled ||
+ (higher_layers_enabled && layer_activation_requires_key_frame) ||
+ (disabled_layers && layer_deactivation_requires_key_frame_)) {
+ force_key_frame_ = true;
+ }
+
+ if (current_layers != new_layers) {
+ ss_info_needed_ = true;
+ }
+
+ config_->rc_target_bitrate = bitrate_allocation.get_sum_kbps();
+
+ if (ExplicitlyConfiguredSpatialLayers()) {
+ for (size_t sl_idx = 0; sl_idx < num_spatial_layers_; ++sl_idx) {
+ const bool was_layer_active = (config_->ss_target_bitrate[sl_idx] > 0);
+ config_->ss_target_bitrate[sl_idx] =
+ bitrate_allocation.GetSpatialLayerSum(sl_idx) / 1000;
+
+ for (size_t tl_idx = 0; tl_idx < num_temporal_layers_; ++tl_idx) {
+ config_->layer_target_bitrate[sl_idx * num_temporal_layers_ + tl_idx] =
+ bitrate_allocation.GetTemporalLayerSum(sl_idx, tl_idx) / 1000;
+ }
+
+ if (!was_layer_active) {
+ // Reset frame rate controller if layer is resumed after pause.
+ framerate_controller_[sl_idx].Reset();
+ }
+
+ framerate_controller_[sl_idx].SetTargetRate(
+ codec_.spatialLayers[sl_idx].maxFramerate);
+ }
+ } else {
+ float rate_ratio[VPX_MAX_LAYERS] = {0};
+ float total = 0;
+ for (int i = 0; i < num_spatial_layers_; ++i) {
+ if (svc_params_.scaling_factor_num[i] <= 0 ||
+ svc_params_.scaling_factor_den[i] <= 0) {
+ RTC_LOG(LS_ERROR) << "Scaling factors not specified!";
+ return false;
+ }
+ rate_ratio[i] = static_cast<float>(svc_params_.scaling_factor_num[i]) /
+ svc_params_.scaling_factor_den[i];
+ total += rate_ratio[i];
+ }
+
+ for (int i = 0; i < num_spatial_layers_; ++i) {
+ RTC_CHECK_GT(total, 0);
+ config_->ss_target_bitrate[i] = static_cast<unsigned int>(
+ config_->rc_target_bitrate * rate_ratio[i] / total);
+ if (num_temporal_layers_ == 1) {
+ config_->layer_target_bitrate[i] = config_->ss_target_bitrate[i];
+ } else if (num_temporal_layers_ == 2) {
+ config_->layer_target_bitrate[i * num_temporal_layers_] =
+ config_->ss_target_bitrate[i] * 2 / 3;
+ config_->layer_target_bitrate[i * num_temporal_layers_ + 1] =
+ config_->ss_target_bitrate[i];
+ } else if (num_temporal_layers_ == 3) {
+ config_->layer_target_bitrate[i * num_temporal_layers_] =
+ config_->ss_target_bitrate[i] / 2;
+ config_->layer_target_bitrate[i * num_temporal_layers_ + 1] =
+ config_->layer_target_bitrate[i * num_temporal_layers_] +
+ (config_->ss_target_bitrate[i] / 4);
+ config_->layer_target_bitrate[i * num_temporal_layers_ + 2] =
+ config_->ss_target_bitrate[i];
+ } else {
+ RTC_LOG(LS_ERROR) << "Unsupported number of temporal layers: "
+ << num_temporal_layers_;
+ return false;
+ }
+
+ framerate_controller_[i].SetTargetRate(codec_.maxFramerate);
+ }
+ }
+
+ num_active_spatial_layers_ = 0;
+ first_active_layer_ = 0;
+ bool seen_active_layer = false;
+ bool expect_no_more_active_layers = false;
+ for (int i = 0; i < num_spatial_layers_; ++i) {
+ if (config_->ss_target_bitrate[i] > 0) {
+ RTC_DCHECK(!expect_no_more_active_layers) << "Only middle layer is "
+ "deactivated.";
+ if (!seen_active_layer) {
+ first_active_layer_ = i;
+ }
+ num_active_spatial_layers_ = i + 1;
+ seen_active_layer = true;
+ } else {
+ expect_no_more_active_layers = seen_active_layer;
+ }
+ }
+
+ if (seen_active_layer && performance_flags_.use_per_layer_speed) {
+ bool denoiser_on =
+ AllowDenoising() && codec_.VP9()->denoisingOn &&
+ performance_flags_by_spatial_index_[num_active_spatial_layers_ - 1]
+ .allow_denoising;
+ libvpx_->codec_control(encoder_, VP9E_SET_NOISE_SENSITIVITY,
+ denoiser_on ? 1 : 0);
+ }
+
+ if (higher_layers_enabled && !force_key_frame_) {
+ // Prohibit drop of all layers for the next frame, so newly enabled
+ // layer would have a valid spatial reference.
+ for (size_t i = 0; i < num_spatial_layers_; ++i) {
+ svc_drop_frame_.framedrop_thresh[i] = 0;
+ }
+ force_all_active_layers_ = true;
+ }
+
+ if (svc_controller_) {
+ for (int sid = 0; sid < num_spatial_layers_; ++sid) {
+ // Bitrates in `layer_target_bitrate` are accumulated for each temporal
+ // layer but in `VideoBitrateAllocation` they should be separated.
+ int previous_bitrate_kbps = 0;
+ for (int tid = 0; tid < num_temporal_layers_; ++tid) {
+ int accumulated_bitrate_kbps =
+ config_->layer_target_bitrate[sid * num_temporal_layers_ + tid];
+ int single_layer_bitrate_kbps =
+ accumulated_bitrate_kbps - previous_bitrate_kbps;
+ RTC_DCHECK_GE(single_layer_bitrate_kbps, 0);
+ current_bitrate_allocation_.SetBitrate(
+ sid, tid, single_layer_bitrate_kbps * 1'000);
+ previous_bitrate_kbps = accumulated_bitrate_kbps;
+ }
+ }
+ svc_controller_->OnRatesUpdated(current_bitrate_allocation_);
+ } else {
+ current_bitrate_allocation_ = bitrate_allocation;
+ }
+ config_changed_ = true;
+ return true;
+}
+
+void LibvpxVp9Encoder::DisableSpatialLayer(int sid) {
+ RTC_DCHECK_LT(sid, num_spatial_layers_);
+ if (config_->ss_target_bitrate[sid] == 0) {
+ return;
+ }
+ config_->ss_target_bitrate[sid] = 0;
+ for (int tid = 0; tid < num_temporal_layers_; ++tid) {
+ config_->layer_target_bitrate[sid * num_temporal_layers_ + tid] = 0;
+ }
+ config_changed_ = true;
+}
+
+void LibvpxVp9Encoder::EnableSpatialLayer(int sid) {
+ RTC_DCHECK_LT(sid, num_spatial_layers_);
+ if (config_->ss_target_bitrate[sid] > 0) {
+ return;
+ }
+ for (int tid = 0; tid < num_temporal_layers_; ++tid) {
+ config_->layer_target_bitrate[sid * num_temporal_layers_ + tid] =
+ current_bitrate_allocation_.GetTemporalLayerSum(sid, tid) / 1000;
+ }
+ config_->ss_target_bitrate[sid] =
+ current_bitrate_allocation_.GetSpatialLayerSum(sid) / 1000;
+ RTC_DCHECK_GT(config_->ss_target_bitrate[sid], 0);
+ config_changed_ = true;
+}
+
+void LibvpxVp9Encoder::SetActiveSpatialLayers() {
+ // Svc controller may decide to skip a frame at certain spatial layer even
+ // when bitrate for it is non-zero, however libvpx uses configured bitrate as
+ // a signal which layers should be produced.
+ RTC_DCHECK(svc_controller_);
+ RTC_DCHECK(!layer_frames_.empty());
+ RTC_DCHECK(absl::c_is_sorted(
+ layer_frames_, [](const ScalableVideoController::LayerFrameConfig& lhs,
+ const ScalableVideoController::LayerFrameConfig& rhs) {
+ return lhs.SpatialId() < rhs.SpatialId();
+ }));
+
+ auto frame_it = layer_frames_.begin();
+ for (int sid = 0; sid < num_spatial_layers_; ++sid) {
+ if (frame_it != layer_frames_.end() && frame_it->SpatialId() == sid) {
+ EnableSpatialLayer(sid);
+ ++frame_it;
+ } else {
+ DisableSpatialLayer(sid);
+ }
+ }
+}
+
+void LibvpxVp9Encoder::SetRates(const RateControlParameters& parameters) {
+ if (!inited_) {
+ RTC_LOG(LS_WARNING) << "SetRates() called while uninitialized.";
+ return;
+ }
+ if (encoder_->err) {
+ RTC_LOG(LS_WARNING) << "Encoder in error state: " << encoder_->err;
+ return;
+ }
+ if (parameters.framerate_fps < 1.0) {
+ RTC_LOG(LS_WARNING) << "Unsupported framerate: "
+ << parameters.framerate_fps;
+ return;
+ }
+
+ codec_.maxFramerate = static_cast<uint32_t>(parameters.framerate_fps + 0.5);
+
+ bool res = SetSvcRates(parameters.bitrate);
+ RTC_DCHECK(res) << "Failed to set new bitrate allocation";
+ config_changed_ = true;
+}
+
+// TODO(eladalon): s/inst/codec_settings/g.
+int LibvpxVp9Encoder::InitEncode(const VideoCodec* inst,
+ const Settings& settings) {
+ if (inst == nullptr) {
+ return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
+ }
+ if (inst->maxFramerate < 1) {
+ return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
+ }
+ // Allow zero to represent an unspecified maxBitRate
+ if (inst->maxBitrate > 0 && inst->startBitrate > inst->maxBitrate) {
+ return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
+ }
+ if (inst->width < 1 || inst->height < 1) {
+ return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
+ }
+ if (settings.number_of_cores < 1) {
+ return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
+ }
+ if (inst->VP9().numberOfTemporalLayers > 3) {
+ return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
+ }
+ // libvpx probably does not support more than 3 spatial layers.
+ if (inst->VP9().numberOfSpatialLayers > 3) {
+ return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
+ }
+
+ absl::optional<vpx_img_fmt_t> previous_img_fmt =
+ raw_ ? absl::make_optional<vpx_img_fmt_t>(raw_->fmt) : absl::nullopt;
+
+ int ret_val = Release();
+ if (ret_val < 0) {
+ return ret_val;
+ }
+ if (encoder_ == nullptr) {
+ encoder_ = new vpx_codec_ctx_t;
+ memset(encoder_, 0, sizeof(*encoder_));
+ }
+ if (config_ == nullptr) {
+ config_ = new vpx_codec_enc_cfg_t;
+ memset(config_, 0, sizeof(*config_));
+ }
+ timestamp_ = 0;
+ if (&codec_ != inst) {
+ codec_ = *inst;
+ }
+ memset(&svc_params_, 0, sizeof(vpx_svc_extra_cfg_t));
+
+ force_key_frame_ = true;
+ pics_since_key_ = 0;
+ num_cores_ = settings.number_of_cores;
+
+ scalability_mode_ = inst->GetScalabilityMode();
+ if (scalability_mode_.has_value()) {
+ // Use settings from `ScalabilityMode` identifier.
+ RTC_LOG(LS_INFO) << "Create scalability structure "
+ << ScalabilityModeToString(*scalability_mode_);
+ svc_controller_ = CreateScalabilityStructure(*scalability_mode_);
+ if (!svc_controller_) {
+ RTC_LOG(LS_WARNING) << "Failed to create scalability structure.";
+ return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
+ }
+ ScalableVideoController::StreamLayersConfig info =
+ svc_controller_->StreamConfig();
+ num_spatial_layers_ = info.num_spatial_layers;
+ num_temporal_layers_ = info.num_temporal_layers;
+ inter_layer_pred_ = ScalabilityModeToInterLayerPredMode(*scalability_mode_);
+ } else {
+ num_spatial_layers_ = inst->VP9().numberOfSpatialLayers;
+ RTC_DCHECK_GT(num_spatial_layers_, 0);
+ num_temporal_layers_ = inst->VP9().numberOfTemporalLayers;
+ if (num_temporal_layers_ == 0) {
+ num_temporal_layers_ = 1;
+ }
+ inter_layer_pred_ = inst->VP9().interLayerPred;
+ auto vp9_scalability = CreateVp9ScalabilityStructure(*inst);
+ if (vp9_scalability.has_value()) {
+ std::tie(svc_controller_, scalability_mode_) =
+ std::move(vp9_scalability.value());
+ } else {
+ svc_controller_ = nullptr;
+ scalability_mode_ = absl::nullopt;
+ }
+ }
+
+ framerate_controller_ = std::vector<FramerateControllerDeprecated>(
+ num_spatial_layers_, FramerateControllerDeprecated(codec_.maxFramerate));
+
+ is_svc_ = (num_spatial_layers_ > 1 || num_temporal_layers_ > 1);
+
+ // Populate encoder configuration with default values.
+ if (libvpx_->codec_enc_config_default(vpx_codec_vp9_cx(), config_, 0)) {
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+
+ vpx_img_fmt img_fmt = VPX_IMG_FMT_NONE;
+ unsigned int bits_for_storage = 8;
+ switch (profile_) {
+ case VP9Profile::kProfile0:
+ img_fmt = previous_img_fmt.value_or(VPX_IMG_FMT_I420);
+ bits_for_storage = 8;
+ config_->g_bit_depth = VPX_BITS_8;
+ config_->g_profile = 0;
+ config_->g_input_bit_depth = 8;
+ break;
+ case VP9Profile::kProfile1:
+ // Encoding of profile 1 is not implemented. It would require extended
+ // support for I444, I422, and I440 buffers.
+ RTC_DCHECK_NOTREACHED();
+ break;
+ case VP9Profile::kProfile2:
+ img_fmt = VPX_IMG_FMT_I42016;
+ bits_for_storage = 16;
+ config_->g_bit_depth = VPX_BITS_10;
+ config_->g_profile = 2;
+ config_->g_input_bit_depth = 10;
+ break;
+ case VP9Profile::kProfile3:
+ // Encoding of profile 3 is not implemented.
+ RTC_DCHECK_NOTREACHED();
+ break;
+ }
+
+ // Creating a wrapper to the image - setting image data to nullptr. Actual
+ // pointer will be set in encode. Setting align to 1, as it is meaningless
+ // (actual memory is not allocated).
+ raw_ = libvpx_->img_wrap(nullptr, img_fmt, codec_.width, codec_.height, 1,
+ nullptr);
+ raw_->bit_depth = bits_for_storage;
+
+ config_->g_w = codec_.width;
+ config_->g_h = codec_.height;
+ config_->rc_target_bitrate = inst->startBitrate; // in kbit/s
+ config_->g_error_resilient = is_svc_ ? VPX_ERROR_RESILIENT_DEFAULT : 0;
+ // Setting the time base of the codec.
+ config_->g_timebase.num = 1;
+ config_->g_timebase.den = 90000;
+ config_->g_lag_in_frames = 0; // 0- no frame lagging
+ config_->g_threads = 1;
+ // Rate control settings.
+ config_->rc_dropframe_thresh = inst->GetFrameDropEnabled() ? 30 : 0;
+ config_->rc_end_usage = VPX_CBR;
+ config_->g_pass = VPX_RC_ONE_PASS;
+ config_->rc_min_quantizer =
+ codec_.mode == VideoCodecMode::kScreensharing ? 8 : 2;
+ config_->rc_max_quantizer = 52;
+ config_->rc_undershoot_pct = 50;
+ config_->rc_overshoot_pct = 50;
+ config_->rc_buf_initial_sz = 500;
+ config_->rc_buf_optimal_sz = 600;
+ config_->rc_buf_sz = 1000;
+ // Set the maximum target size of any key-frame.
+ rc_max_intra_target_ = MaxIntraTarget(config_->rc_buf_optimal_sz);
+ // Key-frame interval is enforced manually by this wrapper.
+ config_->kf_mode = VPX_KF_DISABLED;
+ // TODO(webm:1592): work-around for libvpx issue, as it can still
+ // put some key-frames at will even in VPX_KF_DISABLED kf_mode.
+ config_->kf_max_dist = inst->VP9().keyFrameInterval;
+ config_->kf_min_dist = config_->kf_max_dist;
+ if (quality_scaler_experiment_.enabled) {
+ // In that experiment webrtc wide quality scaler is used instead of libvpx
+ // internal scaler.
+ config_->rc_resize_allowed = 0;
+ } else {
+ config_->rc_resize_allowed = inst->VP9().automaticResizeOn ? 1 : 0;
+ }
+ // Determine number of threads based on the image size and #cores.
+ config_->g_threads =
+ NumberOfThreads(config_->g_w, config_->g_h, settings.number_of_cores);
+
+ is_flexible_mode_ = inst->VP9().flexibleMode;
+
+ if (num_spatial_layers_ > 1 &&
+ codec_.mode == VideoCodecMode::kScreensharing && !is_flexible_mode_) {
+ RTC_LOG(LS_ERROR) << "Flexible mode is required for screenshare with "
+ "several spatial layers";
+ return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
+ }
+
+ // External reference control is required for different frame rate on spatial
+ // layers because libvpx generates rtp incompatible references in this case.
+ external_ref_control_ = external_ref_ctrl_ ||
+ (num_spatial_layers_ > 1 &&
+ codec_.mode == VideoCodecMode::kScreensharing) ||
+ inter_layer_pred_ == InterLayerPredMode::kOn;
+
+ if (num_temporal_layers_ == 1) {
+ gof_.SetGofInfoVP9(kTemporalStructureMode1);
+ config_->temporal_layering_mode = VP9E_TEMPORAL_LAYERING_MODE_NOLAYERING;
+ config_->ts_number_layers = 1;
+ config_->ts_rate_decimator[0] = 1;
+ config_->ts_periodicity = 1;
+ config_->ts_layer_id[0] = 0;
+ } else if (num_temporal_layers_ == 2) {
+ gof_.SetGofInfoVP9(kTemporalStructureMode2);
+ config_->temporal_layering_mode = VP9E_TEMPORAL_LAYERING_MODE_0101;
+ config_->ts_number_layers = 2;
+ config_->ts_rate_decimator[0] = 2;
+ config_->ts_rate_decimator[1] = 1;
+ config_->ts_periodicity = 2;
+ config_->ts_layer_id[0] = 0;
+ config_->ts_layer_id[1] = 1;
+ } else if (num_temporal_layers_ == 3) {
+ gof_.SetGofInfoVP9(kTemporalStructureMode3);
+ config_->temporal_layering_mode = VP9E_TEMPORAL_LAYERING_MODE_0212;
+ config_->ts_number_layers = 3;
+ config_->ts_rate_decimator[0] = 4;
+ config_->ts_rate_decimator[1] = 2;
+ config_->ts_rate_decimator[2] = 1;
+ config_->ts_periodicity = 4;
+ config_->ts_layer_id[0] = 0;
+ config_->ts_layer_id[1] = 2;
+ config_->ts_layer_id[2] = 1;
+ config_->ts_layer_id[3] = 2;
+ } else {
+ return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
+ }
+
+ if (external_ref_control_) {
+ config_->temporal_layering_mode = VP9E_TEMPORAL_LAYERING_MODE_BYPASS;
+ if (num_temporal_layers_ > 1 && num_spatial_layers_ > 1 &&
+ codec_.mode == VideoCodecMode::kScreensharing) {
+ // External reference control for several temporal layers with different
+ // frame rates on spatial layers is not implemented yet.
+ return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
+ }
+ }
+ ref_buf_ = {};
+
+ return InitAndSetControlSettings(inst);
+}
+
+int LibvpxVp9Encoder::NumberOfThreads(int width,
+ int height,
+ int number_of_cores) {
+ // Keep the number of encoder threads equal to the possible number of column
+ // tiles, which is (1, 2, 4, 8). See comments below for VP9E_SET_TILE_COLUMNS.
+ if (width * height >= 1280 * 720 && number_of_cores > 4) {
+ return 4;
+ } else if (width * height >= 640 * 360 && number_of_cores > 2) {
+ return 2;
+ } else {
+// Use 2 threads for low res on ARM.
+#if defined(WEBRTC_ARCH_ARM) || defined(WEBRTC_ARCH_ARM64) || \
+ defined(WEBRTC_ANDROID)
+ if (width * height >= 320 * 180 && number_of_cores > 2) {
+ return 2;
+ }
+#endif
+ // 1 thread less than VGA.
+ return 1;
+ }
+}
+
+int LibvpxVp9Encoder::InitAndSetControlSettings(const VideoCodec* inst) {
+ // Set QP-min/max per spatial and temporal layer.
+ int tot_num_layers = num_spatial_layers_ * num_temporal_layers_;
+ for (int i = 0; i < tot_num_layers; ++i) {
+ svc_params_.max_quantizers[i] = config_->rc_max_quantizer;
+ svc_params_.min_quantizers[i] = config_->rc_min_quantizer;
+ }
+ config_->ss_number_layers = num_spatial_layers_;
+ if (svc_controller_) {
+ auto stream_config = svc_controller_->StreamConfig();
+ for (int i = 0; i < stream_config.num_spatial_layers; ++i) {
+ svc_params_.scaling_factor_num[i] = stream_config.scaling_factor_num[i];
+ svc_params_.scaling_factor_den[i] = stream_config.scaling_factor_den[i];
+ }
+ } else if (ExplicitlyConfiguredSpatialLayers()) {
+ for (int i = 0; i < num_spatial_layers_; ++i) {
+ const auto& layer = codec_.spatialLayers[i];
+ RTC_CHECK_GT(layer.width, 0);
+ const int scale_factor = codec_.width / layer.width;
+ RTC_DCHECK_GT(scale_factor, 0);
+
+ // Ensure scaler factor is integer.
+ if (scale_factor * layer.width != codec_.width) {
+ return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
+ }
+
+ // Ensure scale factor is the same in both dimensions.
+ if (scale_factor * layer.height != codec_.height) {
+ return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
+ }
+
+ // Ensure scale factor is power of two.
+ const bool is_pow_of_two = (scale_factor & (scale_factor - 1)) == 0;
+ if (!is_pow_of_two) {
+ return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
+ }
+
+ svc_params_.scaling_factor_num[i] = 1;
+ svc_params_.scaling_factor_den[i] = scale_factor;
+
+ RTC_DCHECK_GT(codec_.spatialLayers[i].maxFramerate, 0);
+ RTC_DCHECK_LE(codec_.spatialLayers[i].maxFramerate, codec_.maxFramerate);
+ if (i > 0) {
+ // Frame rate of high spatial layer is supposed to be equal or higher
+ // than frame rate of low spatial layer.
+ RTC_DCHECK_GE(codec_.spatialLayers[i].maxFramerate,
+ codec_.spatialLayers[i - 1].maxFramerate);
+ }
+ }
+ } else {
+ int scaling_factor_num = 256;
+ for (int i = num_spatial_layers_ - 1; i >= 0; --i) {
+ // 1:2 scaling in each dimension.
+ svc_params_.scaling_factor_num[i] = scaling_factor_num;
+ svc_params_.scaling_factor_den[i] = 256;
+ }
+ }
+
+ UpdatePerformanceFlags();
+ RTC_DCHECK_EQ(performance_flags_by_spatial_index_.size(),
+ static_cast<size_t>(num_spatial_layers_));
+
+ SvcRateAllocator init_allocator(codec_);
+ current_bitrate_allocation_ =
+ init_allocator.Allocate(VideoBitrateAllocationParameters(
+ inst->startBitrate * 1000, inst->maxFramerate));
+ if (!SetSvcRates(current_bitrate_allocation_)) {
+ return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
+ }
+
+ const vpx_codec_err_t rv = libvpx_->codec_enc_init(
+ encoder_, vpx_codec_vp9_cx(), config_,
+ config_->g_bit_depth == VPX_BITS_8 ? 0 : VPX_CODEC_USE_HIGHBITDEPTH);
+ if (rv != VPX_CODEC_OK) {
+ RTC_LOG(LS_ERROR) << "Init error: " << libvpx_->codec_err_to_string(rv);
+ return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
+ }
+
+ if (performance_flags_.use_per_layer_speed) {
+ for (int si = 0; si < num_spatial_layers_; ++si) {
+ svc_params_.speed_per_layer[si] =
+ performance_flags_by_spatial_index_[si].base_layer_speed;
+ svc_params_.loopfilter_ctrl[si] =
+ performance_flags_by_spatial_index_[si].deblock_mode;
+ }
+ bool denoiser_on =
+ AllowDenoising() && inst->VP9().denoisingOn &&
+ performance_flags_by_spatial_index_[num_spatial_layers_ - 1]
+ .allow_denoising;
+ libvpx_->codec_control(encoder_, VP9E_SET_NOISE_SENSITIVITY,
+ denoiser_on ? 1 : 0);
+ }
+
+ libvpx_->codec_control(encoder_, VP8E_SET_MAX_INTRA_BITRATE_PCT,
+ rc_max_intra_target_);
+ libvpx_->codec_control(encoder_, VP9E_SET_AQ_MODE,
+ inst->VP9().adaptiveQpMode ? 3 : 0);
+
+ libvpx_->codec_control(encoder_, VP9E_SET_FRAME_PARALLEL_DECODING, 0);
+ libvpx_->codec_control(encoder_, VP9E_SET_SVC_GF_TEMPORAL_REF, 0);
+
+ if (is_svc_) {
+ libvpx_->codec_control(encoder_, VP9E_SET_SVC, 1);
+ libvpx_->codec_control(encoder_, VP9E_SET_SVC_PARAMETERS, &svc_params_);
+ }
+ if (!is_svc_ || !performance_flags_.use_per_layer_speed) {
+ libvpx_->codec_control(
+ encoder_, VP8E_SET_CPUUSED,
+ performance_flags_by_spatial_index_.rbegin()->base_layer_speed);
+ }
+
+ if (num_spatial_layers_ > 1) {
+ switch (inter_layer_pred_) {
+ case InterLayerPredMode::kOn:
+ libvpx_->codec_control(encoder_, VP9E_SET_SVC_INTER_LAYER_PRED, 0);
+ break;
+ case InterLayerPredMode::kOff:
+ libvpx_->codec_control(encoder_, VP9E_SET_SVC_INTER_LAYER_PRED, 1);
+ break;
+ case InterLayerPredMode::kOnKeyPic:
+ libvpx_->codec_control(encoder_, VP9E_SET_SVC_INTER_LAYER_PRED, 2);
+ break;
+ default:
+ RTC_DCHECK_NOTREACHED();
+ }
+
+ memset(&svc_drop_frame_, 0, sizeof(svc_drop_frame_));
+ const bool reverse_constrained_drop_mode =
+ inter_layer_pred_ == InterLayerPredMode::kOn &&
+ codec_.mode == VideoCodecMode::kScreensharing &&
+ num_spatial_layers_ > 1;
+ if (reverse_constrained_drop_mode) {
+ // Screenshare dropping mode: drop a layer only together with all lower
+ // layers. This ensures that drops on lower layers won't reduce frame-rate
+ // for higher layers and reference structure is RTP-compatible.
+ svc_drop_frame_.framedrop_mode = CONSTRAINED_FROM_ABOVE_DROP;
+ svc_drop_frame_.max_consec_drop = 5;
+ for (size_t i = 0; i < num_spatial_layers_; ++i) {
+ svc_drop_frame_.framedrop_thresh[i] = config_->rc_dropframe_thresh;
+ }
+ // No buffering is needed because the highest layer is always present in
+ // all frames in CONSTRAINED_FROM_ABOVE drop mode.
+ layer_buffering_ = false;
+ } else {
+ // Configure encoder to drop entire superframe whenever it needs to drop
+ // a layer. This mode is preferred over per-layer dropping which causes
+ // quality flickering and is not compatible with RTP non-flexible mode.
+ svc_drop_frame_.framedrop_mode =
+ full_superframe_drop_ ? FULL_SUPERFRAME_DROP : CONSTRAINED_LAYER_DROP;
+ // Buffering is needed only for constrained layer drop, as it's not clear
+ // which frame is the last.
+ layer_buffering_ = !full_superframe_drop_;
+ svc_drop_frame_.max_consec_drop = std::numeric_limits<int>::max();
+ for (size_t i = 0; i < num_spatial_layers_; ++i) {
+ svc_drop_frame_.framedrop_thresh[i] = config_->rc_dropframe_thresh;
+ }
+ }
+ libvpx_->codec_control(encoder_, VP9E_SET_SVC_FRAME_DROP_LAYER,
+ &svc_drop_frame_);
+ }
+
+ // Register callback for getting each spatial layer.
+ vpx_codec_priv_output_cx_pkt_cb_pair_t cbp = {
+ LibvpxVp9Encoder::EncoderOutputCodedPacketCallback,
+ reinterpret_cast<void*>(this)};
+ libvpx_->codec_control(encoder_, VP9E_REGISTER_CX_CALLBACK,
+ reinterpret_cast<void*>(&cbp));
+
+ // Control function to set the number of column tiles in encoding a frame, in
+ // log2 unit: e.g., 0 = 1 tile column, 1 = 2 tile columns, 2 = 4 tile columns.
+ // The number tile columns will be capped by the encoder based on image size
+ // (minimum width of tile column is 256 pixels, maximum is 4096).
+ libvpx_->codec_control(encoder_, VP9E_SET_TILE_COLUMNS,
+ static_cast<int>((config_->g_threads >> 1)));
+
+ // Turn on row-based multithreading.
+ libvpx_->codec_control(encoder_, VP9E_SET_ROW_MT, 1);
+
+ if (AllowDenoising() && !performance_flags_.use_per_layer_speed) {
+ libvpx_->codec_control(encoder_, VP9E_SET_NOISE_SENSITIVITY,
+ inst->VP9().denoisingOn ? 1 : 0);
+ }
+
+ if (codec_.mode == VideoCodecMode::kScreensharing) {
+ // Adjust internal parameters to screen content.
+ libvpx_->codec_control(encoder_, VP9E_SET_TUNE_CONTENT, 1);
+ }
+ // Enable encoder skip of static/low content blocks.
+ libvpx_->codec_control(encoder_, VP8E_SET_STATIC_THRESHOLD, 1);
+ inited_ = true;
+ config_changed_ = true;
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+uint32_t LibvpxVp9Encoder::MaxIntraTarget(uint32_t optimal_buffer_size) {
+ // Set max to the optimal buffer level (normalized by target BR),
+ // and scaled by a scale_par.
+ // Max target size = scale_par * optimal_buffer_size * targetBR[Kbps].
+ // This value is presented in percentage of perFrameBw:
+ // perFrameBw = targetBR[Kbps] * 1000 / framerate.
+ // The target in % is as follows:
+ float scale_par = 0.5;
+ uint32_t target_pct =
+ optimal_buffer_size * scale_par * codec_.maxFramerate / 10;
+ // Don't go below 3 times the per frame bandwidth.
+ const uint32_t min_intra_size = 300;
+ return (target_pct < min_intra_size) ? min_intra_size : target_pct;
+}
+
+int LibvpxVp9Encoder::Encode(const VideoFrame& input_image,
+ const std::vector<VideoFrameType>* frame_types) {
+ if (!inited_) {
+ return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
+ }
+ if (encoded_complete_callback_ == nullptr) {
+ return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
+ }
+ if (num_active_spatial_layers_ == 0) {
+ // All spatial layers are disabled, return without encoding anything.
+ return WEBRTC_VIDEO_CODEC_OK;
+ }
+
+ // We only support one stream at the moment.
+ if (frame_types && !frame_types->empty()) {
+ if ((*frame_types)[0] == VideoFrameType::kVideoFrameKey) {
+ force_key_frame_ = true;
+ }
+ }
+
+ if (pics_since_key_ + 1 ==
+ static_cast<size_t>(codec_.VP9()->keyFrameInterval)) {
+ force_key_frame_ = true;
+ }
+
+ if (svc_controller_) {
+ layer_frames_ = svc_controller_->NextFrameConfig(force_key_frame_);
+ if (layer_frames_.empty()) {
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+ if (layer_frames_.front().IsKeyframe()) {
+ force_key_frame_ = true;
+ }
+ }
+
+ vpx_svc_layer_id_t layer_id = {0};
+ if (!force_key_frame_) {
+ const size_t gof_idx = (pics_since_key_ + 1) % gof_.num_frames_in_gof;
+ layer_id.temporal_layer_id = gof_.temporal_idx[gof_idx];
+
+ if (codec_.mode == VideoCodecMode::kScreensharing) {
+ const uint32_t frame_timestamp_ms =
+ 1000 * input_image.timestamp() / kVideoPayloadTypeFrequency;
+
+ // To ensure that several rate-limiters with different limits don't
+ // interfere, they must be queried in order of increasing limit.
+
+ bool use_steady_state_limiter =
+ variable_framerate_experiment_.enabled &&
+ input_image.update_rect().IsEmpty() &&
+ num_steady_state_frames_ >=
+ variable_framerate_experiment_.frames_before_steady_state;
+
+ // Need to check all frame limiters, even if lower layers are disabled,
+ // because variable frame-rate limiter should be checked after the first
+ // layer. It's easier to overwrite active layers after, then check all
+ // cases.
+ for (uint8_t sl_idx = 0; sl_idx < num_active_spatial_layers_; ++sl_idx) {
+ const float layer_fps =
+ framerate_controller_[layer_id.spatial_layer_id].GetTargetRate();
+ // Use steady state rate-limiter at the correct place.
+ if (use_steady_state_limiter &&
+ layer_fps > variable_framerate_experiment_.framerate_limit - 1e-9) {
+ if (variable_framerate_controller_.DropFrame(frame_timestamp_ms)) {
+ layer_id.spatial_layer_id = num_active_spatial_layers_;
+ }
+ // Break always: if rate limiter triggered frame drop, no need to
+ // continue; otherwise, the rate is less than the next limiters.
+ break;
+ }
+ if (framerate_controller_[sl_idx].DropFrame(frame_timestamp_ms)) {
+ ++layer_id.spatial_layer_id;
+ } else {
+ break;
+ }
+ }
+
+ if (use_steady_state_limiter &&
+ layer_id.spatial_layer_id < num_active_spatial_layers_) {
+ variable_framerate_controller_.AddFrame(frame_timestamp_ms);
+ }
+ }
+
+ if (force_all_active_layers_) {
+ layer_id.spatial_layer_id = first_active_layer_;
+ force_all_active_layers_ = false;
+ }
+
+ RTC_DCHECK_LE(layer_id.spatial_layer_id, num_active_spatial_layers_);
+ if (layer_id.spatial_layer_id >= num_active_spatial_layers_) {
+ // Drop entire picture.
+ return WEBRTC_VIDEO_CODEC_OK;
+ }
+ }
+
+ // Need to set temporal layer id on ALL layers, even disabled ones.
+ // Otherwise libvpx might produce frames on a disabled layer:
+ // http://crbug.com/1051476
+ for (int sl_idx = 0; sl_idx < num_spatial_layers_; ++sl_idx) {
+ layer_id.temporal_layer_id_per_spatial[sl_idx] = layer_id.temporal_layer_id;
+ }
+
+ if (layer_id.spatial_layer_id < first_active_layer_) {
+ layer_id.spatial_layer_id = first_active_layer_;
+ }
+
+ if (svc_controller_) {
+ layer_id.spatial_layer_id = layer_frames_.front().SpatialId();
+ layer_id.temporal_layer_id = layer_frames_.front().TemporalId();
+ for (const auto& layer : layer_frames_) {
+ layer_id.temporal_layer_id_per_spatial[layer.SpatialId()] =
+ layer.TemporalId();
+ }
+ SetActiveSpatialLayers();
+ }
+
+ if (is_svc_ && performance_flags_.use_per_layer_speed) {
+ // Update speed settings that might depend on temporal index.
+ bool speed_updated = false;
+ for (int sl_idx = 0; sl_idx < num_spatial_layers_; ++sl_idx) {
+ const int target_speed =
+ layer_id.temporal_layer_id_per_spatial[sl_idx] == 0
+ ? performance_flags_by_spatial_index_[sl_idx].base_layer_speed
+ : performance_flags_by_spatial_index_[sl_idx].high_layer_speed;
+ if (svc_params_.speed_per_layer[sl_idx] != target_speed) {
+ svc_params_.speed_per_layer[sl_idx] = target_speed;
+ speed_updated = true;
+ }
+ }
+ if (speed_updated) {
+ libvpx_->codec_control(encoder_, VP9E_SET_SVC_PARAMETERS, &svc_params_);
+ }
+ }
+
+ libvpx_->codec_control(encoder_, VP9E_SET_SVC_LAYER_ID, &layer_id);
+
+ if (num_spatial_layers_ > 1) {
+ // Update frame dropping settings as they may change on per-frame basis.
+ libvpx_->codec_control(encoder_, VP9E_SET_SVC_FRAME_DROP_LAYER,
+ &svc_drop_frame_);
+ }
+
+ if (config_changed_) {
+ if (libvpx_->codec_enc_config_set(encoder_, config_)) {
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+
+ if (!performance_flags_.use_per_layer_speed) {
+ // Not setting individual speeds per layer, find the highest active
+ // resolution instead and base the speed on that.
+ for (int i = num_spatial_layers_ - 1; i >= 0; --i) {
+ if (config_->ss_target_bitrate[i] > 0) {
+ int width = (svc_params_.scaling_factor_num[i] * config_->g_w) /
+ svc_params_.scaling_factor_den[i];
+ int height = (svc_params_.scaling_factor_num[i] * config_->g_h) /
+ svc_params_.scaling_factor_den[i];
+ int speed =
+ std::prev(performance_flags_.settings_by_resolution.lower_bound(
+ width * height))
+ ->second.base_layer_speed;
+ libvpx_->codec_control(encoder_, VP8E_SET_CPUUSED, speed);
+ break;
+ }
+ }
+ }
+ config_changed_ = false;
+ }
+
+ if (input_image.width() != codec_.width ||
+ input_image.height() != codec_.height) {
+ int ret = UpdateCodecFrameSize(input_image);
+ if (ret < 0) {
+ return ret;
+ }
+ }
+
+ RTC_DCHECK_EQ(input_image.width(), raw_->d_w);
+ RTC_DCHECK_EQ(input_image.height(), raw_->d_h);
+
+ // Set input image for use in the callback.
+ // This was necessary since you need some information from input_image.
+ // You can save only the necessary information (such as timestamp) instead of
+ // doing this.
+ input_image_ = &input_image;
+
+ // In case we need to map the buffer, `mapped_buffer` is used to keep it alive
+ // through reference counting until after encoding has finished.
+ rtc::scoped_refptr<const VideoFrameBuffer> mapped_buffer;
+ const I010BufferInterface* i010_buffer;
+ rtc::scoped_refptr<const I010BufferInterface> i010_copy;
+ switch (profile_) {
+ case VP9Profile::kProfile0: {
+ mapped_buffer =
+ PrepareBufferForProfile0(input_image.video_frame_buffer());
+ if (!mapped_buffer) {
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+ break;
+ }
+ case VP9Profile::kProfile1: {
+ RTC_DCHECK_NOTREACHED();
+ break;
+ }
+ case VP9Profile::kProfile2: {
+ // We can inject kI010 frames directly for encode. All other formats
+ // should be converted to it.
+ switch (input_image.video_frame_buffer()->type()) {
+ case VideoFrameBuffer::Type::kI010: {
+ i010_buffer = input_image.video_frame_buffer()->GetI010();
+ break;
+ }
+ default: {
+ auto i420_buffer = input_image.video_frame_buffer()->ToI420();
+ if (!i420_buffer) {
+ RTC_LOG(LS_ERROR) << "Failed to convert "
+ << VideoFrameBufferTypeToString(
+ input_image.video_frame_buffer()->type())
+ << " image to I420. Can't encode frame.";
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+ i010_copy = I010Buffer::Copy(*i420_buffer);
+ i010_buffer = i010_copy.get();
+ }
+ }
+ raw_->planes[VPX_PLANE_Y] = const_cast<uint8_t*>(
+ reinterpret_cast<const uint8_t*>(i010_buffer->DataY()));
+ raw_->planes[VPX_PLANE_U] = const_cast<uint8_t*>(
+ reinterpret_cast<const uint8_t*>(i010_buffer->DataU()));
+ raw_->planes[VPX_PLANE_V] = const_cast<uint8_t*>(
+ reinterpret_cast<const uint8_t*>(i010_buffer->DataV()));
+ raw_->stride[VPX_PLANE_Y] = i010_buffer->StrideY() * 2;
+ raw_->stride[VPX_PLANE_U] = i010_buffer->StrideU() * 2;
+ raw_->stride[VPX_PLANE_V] = i010_buffer->StrideV() * 2;
+ break;
+ }
+ case VP9Profile::kProfile3: {
+ RTC_DCHECK_NOTREACHED();
+ break;
+ }
+ }
+
+ vpx_enc_frame_flags_t flags = 0;
+ if (force_key_frame_) {
+ flags = VPX_EFLAG_FORCE_KF;
+ }
+
+ if (svc_controller_) {
+ vpx_svc_ref_frame_config_t ref_config = Vp9References(layer_frames_);
+ libvpx_->codec_control(encoder_, VP9E_SET_SVC_REF_FRAME_CONFIG,
+ &ref_config);
+ } else if (external_ref_control_) {
+ vpx_svc_ref_frame_config_t ref_config =
+ SetReferences(force_key_frame_, layer_id.spatial_layer_id);
+
+ if (VideoCodecMode::kScreensharing == codec_.mode) {
+ for (uint8_t sl_idx = 0; sl_idx < num_active_spatial_layers_; ++sl_idx) {
+ ref_config.duration[sl_idx] = static_cast<int64_t>(
+ 90000 / (std::min(static_cast<float>(codec_.maxFramerate),
+ framerate_controller_[sl_idx].GetTargetRate())));
+ }
+ }
+
+ libvpx_->codec_control(encoder_, VP9E_SET_SVC_REF_FRAME_CONFIG,
+ &ref_config);
+ }
+
+ first_frame_in_picture_ = true;
+
+ // TODO(ssilkin): Frame duration should be specified per spatial layer
+ // since their frame rate can be different. For now calculate frame duration
+ // based on target frame rate of the highest spatial layer, which frame rate
+ // is supposed to be equal or higher than frame rate of low spatial layers.
+ // Also, timestamp should represent actual time passed since previous frame
+ // (not 'expected' time). Then rate controller can drain buffer more
+ // accurately.
+ RTC_DCHECK_GE(framerate_controller_.size(), num_active_spatial_layers_);
+ float target_framerate_fps =
+ (codec_.mode == VideoCodecMode::kScreensharing)
+ ? std::min(static_cast<float>(codec_.maxFramerate),
+ framerate_controller_[num_active_spatial_layers_ - 1]
+ .GetTargetRate())
+ : codec_.maxFramerate;
+ uint32_t duration = static_cast<uint32_t>(90000 / target_framerate_fps);
+ const vpx_codec_err_t rv = libvpx_->codec_encode(
+ encoder_, raw_, timestamp_, duration, flags, VPX_DL_REALTIME);
+ if (rv != VPX_CODEC_OK) {
+ RTC_LOG(LS_ERROR) << "Encoding error: " << libvpx_->codec_err_to_string(rv)
+ << "\n"
+ "Details: "
+ << libvpx_->codec_error(encoder_) << "\n"
+ << libvpx_->codec_error_detail(encoder_);
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+ timestamp_ += duration;
+
+ if (layer_buffering_) {
+ const bool end_of_picture = true;
+ DeliverBufferedFrame(end_of_picture);
+ }
+
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+int LibvpxVp9Encoder::UpdateCodecFrameSize(
+ const VideoFrame& input_image) {
+ RTC_LOG(LS_INFO) << "Reconfiging VP from " <<
+ codec_.width << "x" << codec_.height << " to " <<
+ input_image.width() << "x" << input_image.height();
+ // Preserve latest bitrate/framerate setting
+ // TODO: Mozilla - see below, we need to save more state here.
+ //uint32_t old_bitrate_kbit = config_->rc_target_bitrate;
+ //uint32_t old_framerate = codec_.maxFramerate;
+
+ codec_.width = input_image.width();
+ codec_.height = input_image.height();
+
+ vpx_img_free(raw_);
+ raw_ = vpx_img_wrap(NULL, VPX_IMG_FMT_I420, codec_.width, codec_.height,
+ 1, NULL);
+ // Update encoder context for new frame size.
+ config_->g_w = codec_.width;
+ config_->g_h = codec_.height;
+
+ // Determine number of threads based on the image size and #cores.
+ config_->g_threads = NumberOfThreads(codec_.width, codec_.height,
+ num_cores_);
+
+ // NOTE: We would like to do this the same way vp8 does it
+ // (with vpx_codec_enc_config_set()), but that causes asserts
+ // in AQ 3 (cyclic); and in AQ 0 it works, but on a resize to smaller
+ // than 1/2 x 1/2 original it asserts in convolve(). Given these
+ // bugs in trying to do it the "right" way, we basically re-do
+ // the initialization.
+ vpx_codec_destroy(encoder_); // clean up old state
+ int result = InitAndSetControlSettings(&codec_);
+ if (result == WEBRTC_VIDEO_CODEC_OK) {
+ // TODO: Mozilla rates have become much more complicated, we need to store
+ // more state or find another way of doing this.
+ //return SetRates(old_bitrate_kbit, old_framerate);
+ RTC_CHECK(false);
+ return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
+ }
+ return result;
+}
+
+bool LibvpxVp9Encoder::PopulateCodecSpecific(CodecSpecificInfo* codec_specific,
+ absl::optional<int>* spatial_idx,
+ absl::optional<int>* temporal_idx,
+ const vpx_codec_cx_pkt& pkt) {
+ RTC_CHECK(codec_specific != nullptr);
+ codec_specific->codecType = kVideoCodecVP9;
+ CodecSpecificInfoVP9* vp9_info = &(codec_specific->codecSpecific.VP9);
+
+ vp9_info->first_frame_in_picture = first_frame_in_picture_;
+ vp9_info->flexible_mode = is_flexible_mode_;
+
+ if (pkt.data.frame.flags & VPX_FRAME_IS_KEY) {
+ pics_since_key_ = 0;
+ } else if (first_frame_in_picture_) {
+ ++pics_since_key_;
+ }
+
+ vpx_svc_layer_id_t layer_id = {0};
+ libvpx_->codec_control(encoder_, VP9E_GET_SVC_LAYER_ID, &layer_id);
+
+ // Can't have keyframe with non-zero temporal layer.
+ RTC_DCHECK(pics_since_key_ != 0 || layer_id.temporal_layer_id == 0);
+
+ RTC_CHECK_GT(num_temporal_layers_, 0);
+ RTC_CHECK_GT(num_active_spatial_layers_, 0);
+ if (num_temporal_layers_ == 1) {
+ RTC_CHECK_EQ(layer_id.temporal_layer_id, 0);
+ vp9_info->temporal_idx = kNoTemporalIdx;
+ *temporal_idx = absl::nullopt;
+ } else {
+ vp9_info->temporal_idx = layer_id.temporal_layer_id;
+ *temporal_idx = layer_id.temporal_layer_id;
+ }
+ if (num_active_spatial_layers_ == 1) {
+ RTC_CHECK_EQ(layer_id.spatial_layer_id, 0);
+ *spatial_idx = absl::nullopt;
+ } else {
+ *spatial_idx = layer_id.spatial_layer_id;
+ }
+
+ const bool is_key_pic = (pics_since_key_ == 0);
+ const bool is_inter_layer_pred_allowed =
+ (inter_layer_pred_ == InterLayerPredMode::kOn ||
+ (inter_layer_pred_ == InterLayerPredMode::kOnKeyPic && is_key_pic));
+
+ // Always set inter_layer_predicted to true on high layer frame if inter-layer
+ // prediction (ILP) is allowed even if encoder didn't actually use it.
+ // Setting inter_layer_predicted to false would allow receiver to decode high
+ // layer frame without decoding low layer frame. If that would happen (e.g.
+ // if low layer frame is lost) then receiver won't be able to decode next high
+ // layer frame which uses ILP.
+ vp9_info->inter_layer_predicted =
+ first_frame_in_picture_ ? false : is_inter_layer_pred_allowed;
+
+ // Mark all low spatial layer frames as references (not just frames of
+ // active low spatial layers) if inter-layer prediction is enabled since
+ // these frames are indirect references of high spatial layer, which can
+ // later be enabled without key frame.
+ vp9_info->non_ref_for_inter_layer_pred =
+ !is_inter_layer_pred_allowed ||
+ layer_id.spatial_layer_id + 1 == num_spatial_layers_;
+
+ // Always populate this, so that the packetizer can properly set the marker
+ // bit.
+ vp9_info->num_spatial_layers = num_active_spatial_layers_;
+ vp9_info->first_active_layer = first_active_layer_;
+
+ vp9_info->num_ref_pics = 0;
+ FillReferenceIndices(pkt, pics_since_key_, vp9_info->inter_layer_predicted,
+ vp9_info);
+ if (vp9_info->flexible_mode) {
+ vp9_info->gof_idx = kNoGofIdx;
+ if (!svc_controller_) {
+ if (num_temporal_layers_ == 1) {
+ vp9_info->temporal_up_switch = true;
+ } else {
+ // In flexible mode with > 1 temporal layer but no SVC controller we
+ // can't techincally determine if a frame is an upswitch point, use
+ // gof-based data as proxy for now.
+ // TODO(sprang): Remove once SVC controller is the only choice.
+ vp9_info->gof_idx =
+ static_cast<uint8_t>(pics_since_key_ % gof_.num_frames_in_gof);
+ vp9_info->temporal_up_switch =
+ gof_.temporal_up_switch[vp9_info->gof_idx];
+ }
+ }
+ } else {
+ vp9_info->gof_idx =
+ static_cast<uint8_t>(pics_since_key_ % gof_.num_frames_in_gof);
+ vp9_info->temporal_up_switch = gof_.temporal_up_switch[vp9_info->gof_idx];
+ RTC_DCHECK(vp9_info->num_ref_pics == gof_.num_ref_pics[vp9_info->gof_idx] ||
+ vp9_info->num_ref_pics == 0);
+ }
+
+ vp9_info->inter_pic_predicted = (!is_key_pic && vp9_info->num_ref_pics > 0);
+
+ // Write SS on key frame of independently coded spatial layers and on base
+ // temporal/spatial layer frame if number of layers changed without issuing
+ // of key picture (inter-layer prediction is enabled).
+ const bool is_key_frame = is_key_pic && !vp9_info->inter_layer_predicted;
+ if (is_key_frame || (ss_info_needed_ && layer_id.temporal_layer_id == 0 &&
+ layer_id.spatial_layer_id == first_active_layer_)) {
+ vp9_info->ss_data_available = true;
+ vp9_info->spatial_layer_resolution_present = true;
+ // Signal disabled layers.
+ for (size_t i = 0; i < first_active_layer_; ++i) {
+ vp9_info->width[i] = 0;
+ vp9_info->height[i] = 0;
+ }
+ for (size_t i = first_active_layer_; i < num_active_spatial_layers_; ++i) {
+ vp9_info->width[i] = codec_.width * svc_params_.scaling_factor_num[i] /
+ svc_params_.scaling_factor_den[i];
+ vp9_info->height[i] = codec_.height * svc_params_.scaling_factor_num[i] /
+ svc_params_.scaling_factor_den[i];
+ }
+ if (vp9_info->flexible_mode) {
+ vp9_info->gof.num_frames_in_gof = 0;
+ } else {
+ vp9_info->gof.CopyGofInfoVP9(gof_);
+ }
+
+ ss_info_needed_ = false;
+ } else {
+ vp9_info->ss_data_available = false;
+ }
+
+ first_frame_in_picture_ = false;
+
+ // Populate codec-agnostic section in the codec specific structure.
+ if (svc_controller_) {
+ auto it = absl::c_find_if(
+ layer_frames_,
+ [&](const ScalableVideoController::LayerFrameConfig& config) {
+ return config.SpatialId() == layer_id.spatial_layer_id;
+ });
+ if (it == layer_frames_.end()) {
+ RTC_LOG(LS_ERROR) << "Encoder produced a frame for layer S"
+ << layer_id.spatial_layer_id << "T"
+ << layer_id.temporal_layer_id
+ << " that wasn't requested.";
+ return false;
+ }
+ codec_specific->generic_frame_info = svc_controller_->OnEncodeDone(*it);
+ if (is_key_frame) {
+ codec_specific->template_structure =
+ svc_controller_->DependencyStructure();
+ auto& resolutions = codec_specific->template_structure->resolutions;
+ resolutions.resize(num_spatial_layers_);
+ for (int sid = 0; sid < num_spatial_layers_; ++sid) {
+ resolutions[sid] = RenderResolution(
+ /*width=*/codec_.width * svc_params_.scaling_factor_num[sid] /
+ svc_params_.scaling_factor_den[sid],
+ /*height=*/codec_.height * svc_params_.scaling_factor_num[sid] /
+ svc_params_.scaling_factor_den[sid]);
+ }
+ }
+ if (is_flexible_mode_) {
+ // Populate data for legacy temporal-upswitch state.
+ // We can switch up to a higher temporal layer only if all temporal layers
+ // higher than this (within the current spatial layer) are switch points.
+ vp9_info->temporal_up_switch = true;
+ for (int i = layer_id.temporal_layer_id + 1; i < num_temporal_layers_;
+ ++i) {
+ // Assumes decode targets are always ordered first by spatial then by
+ // temporal id.
+ size_t dti_index =
+ (layer_id.spatial_layer_id * num_temporal_layers_) + i;
+ vp9_info->temporal_up_switch &=
+ (codec_specific->generic_frame_info
+ ->decode_target_indications[dti_index] ==
+ DecodeTargetIndication::kSwitch);
+ }
+ }
+ }
+ codec_specific->scalability_mode = scalability_mode_;
+ return true;
+}
+
+void LibvpxVp9Encoder::FillReferenceIndices(const vpx_codec_cx_pkt& pkt,
+ const size_t pic_num,
+ const bool inter_layer_predicted,
+ CodecSpecificInfoVP9* vp9_info) {
+ vpx_svc_layer_id_t layer_id = {0};
+ libvpx_->codec_control(encoder_, VP9E_GET_SVC_LAYER_ID, &layer_id);
+
+ const bool is_key_frame =
+ (pkt.data.frame.flags & VPX_FRAME_IS_KEY) ? true : false;
+
+ std::vector<RefFrameBuffer> ref_buf_list;
+
+ if (is_svc_) {
+ vpx_svc_ref_frame_config_t enc_layer_conf = {{0}};
+ libvpx_->codec_control(encoder_, VP9E_GET_SVC_REF_FRAME_CONFIG,
+ &enc_layer_conf);
+ char ref_buf_flags[] = "00000000";
+ // There should be one character per buffer + 1 termination '\0'.
+ static_assert(sizeof(ref_buf_flags) == kNumVp9Buffers + 1);
+
+ if (enc_layer_conf.reference_last[layer_id.spatial_layer_id]) {
+ const size_t fb_idx =
+ enc_layer_conf.lst_fb_idx[layer_id.spatial_layer_id];
+ RTC_DCHECK_LT(fb_idx, ref_buf_.size());
+ if (std::find(ref_buf_list.begin(), ref_buf_list.end(),
+ ref_buf_[fb_idx]) == ref_buf_list.end()) {
+ ref_buf_list.push_back(ref_buf_[fb_idx]);
+ ref_buf_flags[fb_idx] = '1';
+ }
+ }
+
+ if (enc_layer_conf.reference_alt_ref[layer_id.spatial_layer_id]) {
+ const size_t fb_idx =
+ enc_layer_conf.alt_fb_idx[layer_id.spatial_layer_id];
+ RTC_DCHECK_LT(fb_idx, ref_buf_.size());
+ if (std::find(ref_buf_list.begin(), ref_buf_list.end(),
+ ref_buf_[fb_idx]) == ref_buf_list.end()) {
+ ref_buf_list.push_back(ref_buf_[fb_idx]);
+ ref_buf_flags[fb_idx] = '1';
+ }
+ }
+
+ if (enc_layer_conf.reference_golden[layer_id.spatial_layer_id]) {
+ const size_t fb_idx =
+ enc_layer_conf.gld_fb_idx[layer_id.spatial_layer_id];
+ RTC_DCHECK_LT(fb_idx, ref_buf_.size());
+ if (std::find(ref_buf_list.begin(), ref_buf_list.end(),
+ ref_buf_[fb_idx]) == ref_buf_list.end()) {
+ ref_buf_list.push_back(ref_buf_[fb_idx]);
+ ref_buf_flags[fb_idx] = '1';
+ }
+ }
+
+ RTC_LOG(LS_VERBOSE) << "Frame " << pic_num << " sl "
+ << layer_id.spatial_layer_id << " tl "
+ << layer_id.temporal_layer_id << " refered buffers "
+ << ref_buf_flags;
+
+ } else if (!is_key_frame) {
+ RTC_DCHECK_EQ(num_spatial_layers_, 1);
+ RTC_DCHECK_EQ(num_temporal_layers_, 1);
+ // In non-SVC mode encoder doesn't provide reference list. Assume each frame
+ // refers previous one, which is stored in buffer 0.
+ ref_buf_list.push_back(ref_buf_[0]);
+ }
+
+ std::vector<size_t> ref_pid_list;
+
+ vp9_info->num_ref_pics = 0;
+ for (const RefFrameBuffer& ref_buf : ref_buf_list) {
+ RTC_DCHECK_LE(ref_buf.pic_num, pic_num);
+ if (ref_buf.pic_num < pic_num) {
+ if (inter_layer_pred_ != InterLayerPredMode::kOn) {
+ // RTP spec limits temporal prediction to the same spatial layer.
+ // It is safe to ignore this requirement if inter-layer prediction is
+ // enabled for all frames when all base frames are relayed to receiver.
+ RTC_DCHECK_EQ(ref_buf.spatial_layer_id, layer_id.spatial_layer_id);
+ } else {
+ RTC_DCHECK_LE(ref_buf.spatial_layer_id, layer_id.spatial_layer_id);
+ }
+ RTC_DCHECK_LE(ref_buf.temporal_layer_id, layer_id.temporal_layer_id);
+
+ // Encoder may reference several spatial layers on the same previous
+ // frame in case if some spatial layers are skipped on the current frame.
+ // We shouldn't put duplicate references as it may break some old
+ // clients and isn't RTP compatible.
+ if (std::find(ref_pid_list.begin(), ref_pid_list.end(),
+ ref_buf.pic_num) != ref_pid_list.end()) {
+ continue;
+ }
+ ref_pid_list.push_back(ref_buf.pic_num);
+
+ const size_t p_diff = pic_num - ref_buf.pic_num;
+ RTC_DCHECK_LE(p_diff, 127UL);
+
+ vp9_info->p_diff[vp9_info->num_ref_pics] = static_cast<uint8_t>(p_diff);
+ ++vp9_info->num_ref_pics;
+ } else {
+ RTC_DCHECK(inter_layer_predicted);
+ // RTP spec only allows to use previous spatial layer for inter-layer
+ // prediction.
+ RTC_DCHECK_EQ(ref_buf.spatial_layer_id + 1, layer_id.spatial_layer_id);
+ }
+ }
+}
+
+void LibvpxVp9Encoder::UpdateReferenceBuffers(const vpx_codec_cx_pkt& pkt,
+ const size_t pic_num) {
+ vpx_svc_layer_id_t layer_id = {0};
+ libvpx_->codec_control(encoder_, VP9E_GET_SVC_LAYER_ID, &layer_id);
+
+ RefFrameBuffer frame_buf = {.pic_num = pic_num,
+ .spatial_layer_id = layer_id.spatial_layer_id,
+ .temporal_layer_id = layer_id.temporal_layer_id};
+
+ if (is_svc_) {
+ vpx_svc_ref_frame_config_t enc_layer_conf = {{0}};
+ libvpx_->codec_control(encoder_, VP9E_GET_SVC_REF_FRAME_CONFIG,
+ &enc_layer_conf);
+ const int update_buffer_slot =
+ enc_layer_conf.update_buffer_slot[layer_id.spatial_layer_id];
+
+ for (size_t i = 0; i < ref_buf_.size(); ++i) {
+ if (update_buffer_slot & (1 << i)) {
+ ref_buf_[i] = frame_buf;
+ }
+ }
+
+ RTC_LOG(LS_VERBOSE) << "Frame " << pic_num << " sl "
+ << layer_id.spatial_layer_id << " tl "
+ << layer_id.temporal_layer_id << " updated buffers "
+ << (update_buffer_slot & (1 << 0) ? 1 : 0)
+ << (update_buffer_slot & (1 << 1) ? 1 : 0)
+ << (update_buffer_slot & (1 << 2) ? 1 : 0)
+ << (update_buffer_slot & (1 << 3) ? 1 : 0)
+ << (update_buffer_slot & (1 << 4) ? 1 : 0)
+ << (update_buffer_slot & (1 << 5) ? 1 : 0)
+ << (update_buffer_slot & (1 << 6) ? 1 : 0)
+ << (update_buffer_slot & (1 << 7) ? 1 : 0);
+ } else {
+ RTC_DCHECK_EQ(num_spatial_layers_, 1);
+ RTC_DCHECK_EQ(num_temporal_layers_, 1);
+ // In non-svc mode encoder doesn't provide reference list. Assume each frame
+ // is reference and stored in buffer 0.
+ ref_buf_[0] = frame_buf;
+ }
+}
+
+vpx_svc_ref_frame_config_t LibvpxVp9Encoder::SetReferences(
+ bool is_key_pic,
+ int first_active_spatial_layer_id) {
+ // kRefBufIdx, kUpdBufIdx need to be updated to support longer GOFs.
+ RTC_DCHECK_LE(gof_.num_frames_in_gof, 4);
+
+ vpx_svc_ref_frame_config_t ref_config;
+ memset(&ref_config, 0, sizeof(ref_config));
+
+ const size_t num_temporal_refs = std::max(1, num_temporal_layers_ - 1);
+ const bool is_inter_layer_pred_allowed =
+ inter_layer_pred_ == InterLayerPredMode::kOn ||
+ (inter_layer_pred_ == InterLayerPredMode::kOnKeyPic && is_key_pic);
+ absl::optional<int> last_updated_buf_idx;
+
+ // Put temporal reference to LAST and spatial reference to GOLDEN. Update
+ // frame buffer (i.e. store encoded frame) if current frame is a temporal
+ // reference (i.e. it belongs to a low temporal layer) or it is a spatial
+ // reference. In later case, always store spatial reference in the last
+ // reference frame buffer.
+ // For the case of 3 temporal and 3 spatial layers we need 6 frame buffers
+ // for temporal references plus 1 buffer for spatial reference. 7 buffers
+ // in total.
+
+ for (int sl_idx = first_active_spatial_layer_id;
+ sl_idx < num_active_spatial_layers_; ++sl_idx) {
+ const size_t curr_pic_num = is_key_pic ? 0 : pics_since_key_ + 1;
+ const size_t gof_idx = curr_pic_num % gof_.num_frames_in_gof;
+
+ if (!is_key_pic) {
+ // Set up temporal reference.
+ const int buf_idx = sl_idx * num_temporal_refs + kRefBufIdx[gof_idx];
+
+ // Last reference frame buffer is reserved for spatial reference. It is
+ // not supposed to be used for temporal prediction.
+ RTC_DCHECK_LT(buf_idx, kNumVp9Buffers - 1);
+
+ const int pid_diff = curr_pic_num - ref_buf_[buf_idx].pic_num;
+ // Incorrect spatial layer may be in the buffer due to a key-frame.
+ const bool same_spatial_layer =
+ ref_buf_[buf_idx].spatial_layer_id == sl_idx;
+ bool correct_pid = false;
+ if (is_flexible_mode_) {
+ correct_pid = pid_diff > 0 && pid_diff < kMaxAllowedPidDiff;
+ } else {
+ // Below code assumes single temporal referecence.
+ RTC_DCHECK_EQ(gof_.num_ref_pics[gof_idx], 1);
+ correct_pid = pid_diff == gof_.pid_diff[gof_idx][0];
+ }
+
+ if (same_spatial_layer && correct_pid) {
+ ref_config.lst_fb_idx[sl_idx] = buf_idx;
+ ref_config.reference_last[sl_idx] = 1;
+ } else {
+ // This reference doesn't match with one specified by GOF. This can
+ // only happen if spatial layer is enabled dynamically without key
+ // frame. Spatial prediction is supposed to be enabled in this case.
+ RTC_DCHECK(is_inter_layer_pred_allowed &&
+ sl_idx > first_active_spatial_layer_id);
+ }
+ }
+
+ if (is_inter_layer_pred_allowed && sl_idx > first_active_spatial_layer_id) {
+ // Set up spatial reference.
+ RTC_DCHECK(last_updated_buf_idx);
+ ref_config.gld_fb_idx[sl_idx] = *last_updated_buf_idx;
+ ref_config.reference_golden[sl_idx] = 1;
+ } else {
+ RTC_DCHECK(ref_config.reference_last[sl_idx] != 0 ||
+ sl_idx == first_active_spatial_layer_id ||
+ inter_layer_pred_ == InterLayerPredMode::kOff);
+ }
+
+ last_updated_buf_idx.reset();
+
+ if (gof_.temporal_idx[gof_idx] < num_temporal_layers_ - 1 ||
+ num_temporal_layers_ == 1) {
+ last_updated_buf_idx = sl_idx * num_temporal_refs + kUpdBufIdx[gof_idx];
+
+ // Ensure last frame buffer is not used for temporal prediction (it is
+ // reserved for spatial reference).
+ RTC_DCHECK_LT(*last_updated_buf_idx, kNumVp9Buffers - 1);
+ } else if (is_inter_layer_pred_allowed) {
+ last_updated_buf_idx = kNumVp9Buffers - 1;
+ }
+
+ if (last_updated_buf_idx) {
+ ref_config.update_buffer_slot[sl_idx] = 1 << *last_updated_buf_idx;
+ }
+ }
+
+ return ref_config;
+}
+
+void LibvpxVp9Encoder::GetEncodedLayerFrame(const vpx_codec_cx_pkt* pkt) {
+ RTC_DCHECK_EQ(pkt->kind, VPX_CODEC_CX_FRAME_PKT);
+
+ if (pkt->data.frame.sz == 0) {
+ // Ignore dropped frame.
+ return;
+ }
+
+ vpx_svc_layer_id_t layer_id = {0};
+ libvpx_->codec_control(encoder_, VP9E_GET_SVC_LAYER_ID, &layer_id);
+
+ if (layer_buffering_) {
+ // Deliver buffered low spatial layer frame.
+ const bool end_of_picture = false;
+ DeliverBufferedFrame(end_of_picture);
+ }
+
+ encoded_image_.SetEncodedData(EncodedImageBuffer::Create(
+ static_cast<const uint8_t*>(pkt->data.frame.buf), pkt->data.frame.sz));
+
+ codec_specific_ = {};
+ absl::optional<int> spatial_index;
+ absl::optional<int> temporal_index;
+ if (!PopulateCodecSpecific(&codec_specific_, &spatial_index, &temporal_index,
+ *pkt)) {
+ // Drop the frame.
+ encoded_image_.set_size(0);
+ return;
+ }
+ encoded_image_.SetSpatialIndex(spatial_index);
+ encoded_image_.SetTemporalIndex(temporal_index);
+
+ const bool is_key_frame =
+ ((pkt->data.frame.flags & VPX_FRAME_IS_KEY) ? true : false) &&
+ !codec_specific_.codecSpecific.VP9.inter_layer_predicted;
+
+ // Ensure encoder issued key frame on request.
+ RTC_DCHECK(is_key_frame || !force_key_frame_);
+
+ // Check if encoded frame is a key frame.
+ encoded_image_._frameType = VideoFrameType::kVideoFrameDelta;
+ if (is_key_frame) {
+ encoded_image_._frameType = VideoFrameType::kVideoFrameKey;
+ force_key_frame_ = false;
+ }
+
+ UpdateReferenceBuffers(*pkt, pics_since_key_);
+
+ TRACE_COUNTER1("webrtc", "EncodedFrameSize", encoded_image_.size());
+ encoded_image_.SetTimestamp(input_image_->timestamp());
+ encoded_image_.SetColorSpace(input_image_->color_space());
+ encoded_image_._encodedHeight =
+ pkt->data.frame.height[layer_id.spatial_layer_id];
+ encoded_image_._encodedWidth =
+ pkt->data.frame.width[layer_id.spatial_layer_id];
+ int qp = -1;
+ libvpx_->codec_control(encoder_, VP8E_GET_LAST_QUANTIZER, &qp);
+ encoded_image_.qp_ = qp;
+
+ if (!layer_buffering_) {
+ const bool end_of_picture = encoded_image_.SpatialIndex().value_or(0) + 1 ==
+ num_active_spatial_layers_;
+ DeliverBufferedFrame(end_of_picture);
+ }
+}
+
+void LibvpxVp9Encoder::DeliverBufferedFrame(bool end_of_picture) {
+ if (encoded_image_.size() > 0) {
+ if (num_spatial_layers_ > 1) {
+ // Restore frame dropping settings, as dropping may be temporary forbidden
+ // due to dynamically enabled layers.
+ for (size_t i = 0; i < num_spatial_layers_; ++i) {
+ svc_drop_frame_.framedrop_thresh[i] = config_->rc_dropframe_thresh;
+ }
+ }
+
+ codec_specific_.end_of_picture = end_of_picture;
+
+ encoded_complete_callback_->OnEncodedImage(encoded_image_,
+ &codec_specific_);
+
+ if (codec_.mode == VideoCodecMode::kScreensharing) {
+ const uint8_t spatial_idx = encoded_image_.SpatialIndex().value_or(0);
+ const uint32_t frame_timestamp_ms =
+ 1000 * encoded_image_.Timestamp() / kVideoPayloadTypeFrequency;
+ framerate_controller_[spatial_idx].AddFrame(frame_timestamp_ms);
+
+ const size_t steady_state_size = SteadyStateSize(
+ spatial_idx, codec_specific_.codecSpecific.VP9.temporal_idx);
+
+ // Only frames on spatial layers, which may be limited in a steady state
+ // are considered for steady state detection.
+ if (framerate_controller_[spatial_idx].GetTargetRate() >
+ variable_framerate_experiment_.framerate_limit + 1e-9) {
+ if (encoded_image_.qp_ <=
+ variable_framerate_experiment_.steady_state_qp &&
+ encoded_image_.size() <= steady_state_size) {
+ ++num_steady_state_frames_;
+ } else {
+ num_steady_state_frames_ = 0;
+ }
+ }
+ }
+ encoded_image_.set_size(0);
+ }
+}
+
+int LibvpxVp9Encoder::RegisterEncodeCompleteCallback(
+ EncodedImageCallback* callback) {
+ encoded_complete_callback_ = callback;
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+VideoEncoder::EncoderInfo LibvpxVp9Encoder::GetEncoderInfo() const {
+ EncoderInfo info;
+ info.supports_native_handle = false;
+ info.implementation_name = "libvpx";
+ if (quality_scaler_experiment_.enabled && inited_ &&
+ codec_.VP9().automaticResizeOn) {
+ info.scaling_settings = VideoEncoder::ScalingSettings(
+ quality_scaler_experiment_.low_qp, quality_scaler_experiment_.high_qp);
+ } else {
+ info.scaling_settings = VideoEncoder::ScalingSettings::kOff;
+ }
+ info.has_trusted_rate_controller = trusted_rate_controller_;
+ info.is_hardware_accelerated = false;
+ if (inited_) {
+ // Find the max configured fps of any active spatial layer.
+ float max_fps = 0.0;
+ for (size_t si = 0; si < num_spatial_layers_; ++si) {
+ if (codec_.spatialLayers[si].active &&
+ codec_.spatialLayers[si].maxFramerate > max_fps) {
+ max_fps = codec_.spatialLayers[si].maxFramerate;
+ }
+ }
+
+ for (size_t si = 0; si < num_spatial_layers_; ++si) {
+ info.fps_allocation[si].clear();
+ if (!codec_.spatialLayers[si].active) {
+ continue;
+ }
+
+ // This spatial layer may already use a fraction of the total frame rate.
+ const float sl_fps_fraction =
+ codec_.spatialLayers[si].maxFramerate / max_fps;
+ for (size_t ti = 0; ti < num_temporal_layers_; ++ti) {
+ const uint32_t decimator =
+ num_temporal_layers_ <= 1 ? 1 : config_->ts_rate_decimator[ti];
+ RTC_DCHECK_GT(decimator, 0);
+ info.fps_allocation[si].push_back(
+ rtc::saturated_cast<uint8_t>(EncoderInfo::kMaxFramerateFraction *
+ (sl_fps_fraction / decimator)));
+ }
+ }
+ if (profile_ == VP9Profile::kProfile0) {
+ info.preferred_pixel_formats = {VideoFrameBuffer::Type::kI420,
+ VideoFrameBuffer::Type::kNV12};
+ }
+ }
+ if (!encoder_info_override_.resolution_bitrate_limits().empty()) {
+ info.resolution_bitrate_limits =
+ encoder_info_override_.resolution_bitrate_limits();
+ }
+ return info;
+}
+
+size_t LibvpxVp9Encoder::SteadyStateSize(int sid, int tid) {
+ const size_t bitrate_bps = current_bitrate_allocation_.GetBitrate(
+ sid, tid == kNoTemporalIdx ? 0 : tid);
+ const float fps = (codec_.mode == VideoCodecMode::kScreensharing)
+ ? std::min(static_cast<float>(codec_.maxFramerate),
+ framerate_controller_[sid].GetTargetRate())
+ : codec_.maxFramerate;
+ return static_cast<size_t>(
+ bitrate_bps / (8 * fps) *
+ (100 -
+ variable_framerate_experiment_.steady_state_undershoot_percentage) /
+ 100 +
+ 0.5);
+}
+
+// static
+LibvpxVp9Encoder::VariableFramerateExperiment
+LibvpxVp9Encoder::ParseVariableFramerateConfig(const FieldTrialsView& trials) {
+ FieldTrialFlag enabled = FieldTrialFlag("Enabled");
+ FieldTrialParameter<double> framerate_limit("min_fps", 5.0);
+ FieldTrialParameter<int> qp("min_qp", 32);
+ FieldTrialParameter<int> undershoot_percentage("undershoot", 30);
+ FieldTrialParameter<int> frames_before_steady_state(
+ "frames_before_steady_state", 5);
+ ParseFieldTrial({&enabled, &framerate_limit, &qp, &undershoot_percentage,
+ &frames_before_steady_state},
+ trials.Lookup("WebRTC-VP9VariableFramerateScreenshare"));
+ VariableFramerateExperiment config;
+ config.enabled = enabled.Get();
+ config.framerate_limit = framerate_limit.Get();
+ config.steady_state_qp = qp.Get();
+ config.steady_state_undershoot_percentage = undershoot_percentage.Get();
+ config.frames_before_steady_state = frames_before_steady_state.Get();
+
+ return config;
+}
+
+// static
+LibvpxVp9Encoder::QualityScalerExperiment
+LibvpxVp9Encoder::ParseQualityScalerConfig(const FieldTrialsView& trials) {
+ FieldTrialFlag disabled = FieldTrialFlag("Disabled");
+ FieldTrialParameter<int> low_qp("low_qp", kLowVp9QpThreshold);
+ FieldTrialParameter<int> high_qp("hihg_qp", kHighVp9QpThreshold);
+ ParseFieldTrial({&disabled, &low_qp, &high_qp},
+ trials.Lookup("WebRTC-VP9QualityScaler"));
+ QualityScalerExperiment config;
+ config.enabled = !disabled.Get();
+ RTC_LOG(LS_INFO) << "Webrtc quality scaler for vp9 is "
+ << (config.enabled ? "enabled." : "disabled");
+ config.low_qp = low_qp.Get();
+ config.high_qp = high_qp.Get();
+
+ return config;
+}
+
+void LibvpxVp9Encoder::UpdatePerformanceFlags() {
+ flat_map<int, PerformanceFlags::ParameterSet> params_by_resolution;
+ if (codec_.GetVideoEncoderComplexity() ==
+ VideoCodecComplexity::kComplexityLow) {
+ // For low tier devices, always use speed 9. Only disable upper
+ // layer deblocking below QCIF.
+ params_by_resolution[0] = {.base_layer_speed = 9,
+ .high_layer_speed = 9,
+ .deblock_mode = 1,
+ .allow_denoising = true};
+ params_by_resolution[352 * 288] = {.base_layer_speed = 9,
+ .high_layer_speed = 9,
+ .deblock_mode = 0,
+ .allow_denoising = true};
+ } else {
+ params_by_resolution = performance_flags_.settings_by_resolution;
+ }
+
+ const auto find_speed = [&](int min_pixel_count) {
+ RTC_DCHECK(!params_by_resolution.empty());
+ auto it = params_by_resolution.upper_bound(min_pixel_count);
+ return std::prev(it)->second;
+ };
+ performance_flags_by_spatial_index_.clear();
+
+ if (is_svc_) {
+ for (int si = 0; si < num_spatial_layers_; ++si) {
+ performance_flags_by_spatial_index_.push_back(find_speed(
+ codec_.spatialLayers[si].width * codec_.spatialLayers[si].height));
+ }
+ } else {
+ performance_flags_by_spatial_index_.push_back(
+ find_speed(codec_.width * codec_.height));
+ }
+}
+
+// static
+LibvpxVp9Encoder::PerformanceFlags
+LibvpxVp9Encoder::ParsePerformanceFlagsFromTrials(
+ const FieldTrialsView& trials) {
+ struct Params : public PerformanceFlags::ParameterSet {
+ int min_pixel_count = 0;
+ };
+
+ FieldTrialStructList<Params> trials_list(
+ {FieldTrialStructMember("min_pixel_count",
+ [](Params* p) { return &p->min_pixel_count; }),
+ FieldTrialStructMember("high_layer_speed",
+ [](Params* p) { return &p->high_layer_speed; }),
+ FieldTrialStructMember("base_layer_speed",
+ [](Params* p) { return &p->base_layer_speed; }),
+ FieldTrialStructMember("deblock_mode",
+ [](Params* p) { return &p->deblock_mode; }),
+ FieldTrialStructMember("denoiser",
+ [](Params* p) { return &p->allow_denoising; })},
+ {});
+
+ FieldTrialFlag per_layer_speed("use_per_layer_speed");
+
+ ParseFieldTrial({&trials_list, &per_layer_speed},
+ trials.Lookup("WebRTC-VP9-PerformanceFlags"));
+
+ PerformanceFlags flags;
+ flags.use_per_layer_speed = per_layer_speed.Get();
+
+ constexpr int kMinSpeed = 1;
+ constexpr int kMaxSpeed = 9;
+ for (auto& f : trials_list.Get()) {
+ if (f.base_layer_speed < kMinSpeed || f.base_layer_speed > kMaxSpeed ||
+ f.high_layer_speed < kMinSpeed || f.high_layer_speed > kMaxSpeed ||
+ f.deblock_mode < 0 || f.deblock_mode > 2) {
+ RTC_LOG(LS_WARNING) << "Ignoring invalid performance flags: "
+ << "min_pixel_count = " << f.min_pixel_count
+ << ", high_layer_speed = " << f.high_layer_speed
+ << ", base_layer_speed = " << f.base_layer_speed
+ << ", deblock_mode = " << f.deblock_mode;
+ continue;
+ }
+ flags.settings_by_resolution[f.min_pixel_count] = f;
+ }
+
+ if (flags.settings_by_resolution.empty()) {
+ return GetDefaultPerformanceFlags();
+ }
+
+ return flags;
+}
+
+// static
+LibvpxVp9Encoder::PerformanceFlags
+LibvpxVp9Encoder::GetDefaultPerformanceFlags() {
+ PerformanceFlags flags;
+ flags.use_per_layer_speed = true;
+#if defined(WEBRTC_ARCH_ARM) || defined(WEBRTC_ARCH_ARM64) || defined(ANDROID)
+ // Speed 8 on all layers for all resolutions.
+ flags.settings_by_resolution[0] = {.base_layer_speed = 8,
+ .high_layer_speed = 8,
+ .deblock_mode = 0,
+ .allow_denoising = true};
+#else
+
+ // For smaller resolutions, use lower speed setting for the temporal base
+ // layer (get some coding gain at the cost of increased encoding complexity).
+ // Set encoder Speed 5 for TL0, encoder Speed 8 for upper temporal layers, and
+ // disable deblocking for upper-most temporal layers.
+ flags.settings_by_resolution[0] = {.base_layer_speed = 5,
+ .high_layer_speed = 8,
+ .deblock_mode = 1,
+ .allow_denoising = true};
+
+ // Use speed 7 for QCIF and above.
+ // Set encoder Speed 7 for TL0, encoder Speed 8 for upper temporal layers, and
+ // enable deblocking for all temporal layers.
+ flags.settings_by_resolution[352 * 288] = {.base_layer_speed = 7,
+ .high_layer_speed = 8,
+ .deblock_mode = 0,
+ .allow_denoising = true};
+
+ // For very high resolution (1080p and up), turn the speed all the way up
+ // since this is very CPU intensive. Also disable denoising to save CPU, at
+ // these resolutions denoising appear less effective and hopefully you also
+ // have a less noisy video source at this point.
+ flags.settings_by_resolution[1920 * 1080] = {.base_layer_speed = 9,
+ .high_layer_speed = 9,
+ .deblock_mode = 0,
+ .allow_denoising = false};
+
+#endif
+ return flags;
+}
+
+void LibvpxVp9Encoder::MaybeRewrapRawWithFormat(const vpx_img_fmt fmt) {
+ if (!raw_) {
+ raw_ = libvpx_->img_wrap(nullptr, fmt, codec_.width, codec_.height, 1,
+ nullptr);
+ } else if (raw_->fmt != fmt) {
+ RTC_LOG(LS_INFO) << "Switching VP9 encoder pixel format to "
+ << (fmt == VPX_IMG_FMT_NV12 ? "NV12" : "I420");
+ libvpx_->img_free(raw_);
+ raw_ = libvpx_->img_wrap(nullptr, fmt, codec_.width, codec_.height, 1,
+ nullptr);
+ }
+ // else no-op since the image is already in the right format.
+}
+
+rtc::scoped_refptr<VideoFrameBuffer> LibvpxVp9Encoder::PrepareBufferForProfile0(
+ rtc::scoped_refptr<VideoFrameBuffer> buffer) {
+ absl::InlinedVector<VideoFrameBuffer::Type, kMaxPreferredPixelFormats>
+ supported_formats = {VideoFrameBuffer::Type::kI420,
+ VideoFrameBuffer::Type::kNV12};
+
+ rtc::scoped_refptr<VideoFrameBuffer> mapped_buffer;
+ if (buffer->type() != VideoFrameBuffer::Type::kNative) {
+ // `buffer` is already mapped.
+ mapped_buffer = buffer;
+ } else {
+ // Attempt to map to one of the supported formats.
+ mapped_buffer = buffer->GetMappedFrameBuffer(supported_formats);
+ }
+ if (!mapped_buffer ||
+ (absl::c_find(supported_formats, mapped_buffer->type()) ==
+ supported_formats.end() &&
+ mapped_buffer->type() != VideoFrameBuffer::Type::kI420A)) {
+ // Unknown pixel format or unable to map, convert to I420 and prepare that
+ // buffer instead to ensure Scale() is safe to use.
+ auto converted_buffer = buffer->ToI420();
+ if (!converted_buffer) {
+ RTC_LOG(LS_ERROR) << "Failed to convert "
+ << VideoFrameBufferTypeToString(buffer->type())
+ << " image to I420. Can't encode frame.";
+ return {};
+ }
+ RTC_CHECK(converted_buffer->type() == VideoFrameBuffer::Type::kI420 ||
+ converted_buffer->type() == VideoFrameBuffer::Type::kI420A);
+
+ // Because `buffer` had to be converted, use `converted_buffer` instead.
+ buffer = mapped_buffer = converted_buffer;
+ }
+
+ // Prepare `raw_` from `mapped_buffer`.
+ switch (mapped_buffer->type()) {
+ case VideoFrameBuffer::Type::kI420:
+ case VideoFrameBuffer::Type::kI420A: {
+ MaybeRewrapRawWithFormat(VPX_IMG_FMT_I420);
+ const I420BufferInterface* i420_buffer = mapped_buffer->GetI420();
+ RTC_DCHECK(i420_buffer);
+ raw_->planes[VPX_PLANE_Y] = const_cast<uint8_t*>(i420_buffer->DataY());
+ raw_->planes[VPX_PLANE_U] = const_cast<uint8_t*>(i420_buffer->DataU());
+ raw_->planes[VPX_PLANE_V] = const_cast<uint8_t*>(i420_buffer->DataV());
+ raw_->stride[VPX_PLANE_Y] = i420_buffer->StrideY();
+ raw_->stride[VPX_PLANE_U] = i420_buffer->StrideU();
+ raw_->stride[VPX_PLANE_V] = i420_buffer->StrideV();
+ break;
+ }
+ case VideoFrameBuffer::Type::kNV12: {
+ MaybeRewrapRawWithFormat(VPX_IMG_FMT_NV12);
+ const NV12BufferInterface* nv12_buffer = mapped_buffer->GetNV12();
+ RTC_DCHECK(nv12_buffer);
+ raw_->planes[VPX_PLANE_Y] = const_cast<uint8_t*>(nv12_buffer->DataY());
+ raw_->planes[VPX_PLANE_U] = const_cast<uint8_t*>(nv12_buffer->DataUV());
+ raw_->planes[VPX_PLANE_V] = raw_->planes[VPX_PLANE_U] + 1;
+ raw_->stride[VPX_PLANE_Y] = nv12_buffer->StrideY();
+ raw_->stride[VPX_PLANE_U] = nv12_buffer->StrideUV();
+ raw_->stride[VPX_PLANE_V] = nv12_buffer->StrideUV();
+ break;
+ }
+ default:
+ RTC_DCHECK_NOTREACHED();
+ }
+ return mapped_buffer;
+}
+
+} // namespace webrtc
+
+#endif // RTC_ENABLE_VP9
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/vp9/libvpx_vp9_encoder.h b/third_party/libwebrtc/modules/video_coding/codecs/vp9/libvpx_vp9_encoder.h
new file mode 100644
index 0000000000..bb871f8498
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/vp9/libvpx_vp9_encoder.h
@@ -0,0 +1,251 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ *
+ */
+
+#ifndef MODULES_VIDEO_CODING_CODECS_VP9_LIBVPX_VP9_ENCODER_H_
+#define MODULES_VIDEO_CODING_CODECS_VP9_LIBVPX_VP9_ENCODER_H_
+
+#ifdef RTC_ENABLE_VP9
+
+#include <array>
+#include <memory>
+#include <vector>
+
+#include "api/fec_controller_override.h"
+#include "api/field_trials_view.h"
+#include "api/video_codecs/scalability_mode.h"
+#include "api/video_codecs/video_encoder.h"
+#include "api/video_codecs/vp9_profile.h"
+#include "common_video/include/video_frame_buffer_pool.h"
+#include "modules/video_coding/codecs/interface/libvpx_interface.h"
+#include "modules/video_coding/codecs/vp9/include/vp9.h"
+#include "modules/video_coding/codecs/vp9/vp9_frame_buffer_pool.h"
+#include "modules/video_coding/svc/scalable_video_controller.h"
+#include "modules/video_coding/utility/framerate_controller_deprecated.h"
+#include "rtc_base/containers/flat_map.h"
+#include "rtc_base/experiments/encoder_info_settings.h"
+#include "vpx/vp8cx.h"
+
+namespace webrtc {
+
+class LibvpxVp9Encoder : public VP9Encoder {
+ public:
+ LibvpxVp9Encoder(const cricket::VideoCodec& codec,
+ std::unique_ptr<LibvpxInterface> interface,
+ const FieldTrialsView& trials);
+
+ ~LibvpxVp9Encoder() override;
+
+ void SetFecControllerOverride(
+ FecControllerOverride* fec_controller_override) override;
+
+ int Release() override;
+
+ int InitEncode(const VideoCodec* codec_settings,
+ const Settings& settings) override;
+
+ int Encode(const VideoFrame& input_image,
+ const std::vector<VideoFrameType>* frame_types) override;
+
+ int RegisterEncodeCompleteCallback(EncodedImageCallback* callback) override;
+
+ void SetRates(const RateControlParameters& parameters) override;
+
+ EncoderInfo GetEncoderInfo() const override;
+
+ private:
+ // Determine number of encoder threads to use.
+ int NumberOfThreads(int width, int height, int number_of_cores);
+
+ // Call encoder initialize function and set control settings.
+ int InitAndSetControlSettings(const VideoCodec* inst);
+
+ // Update frame size for codec.
+ int UpdateCodecFrameSize(const VideoFrame& input_image);
+
+ bool PopulateCodecSpecific(CodecSpecificInfo* codec_specific,
+ absl::optional<int>* spatial_idx,
+ absl::optional<int>* temporal_idx,
+ const vpx_codec_cx_pkt& pkt);
+ void FillReferenceIndices(const vpx_codec_cx_pkt& pkt,
+ size_t pic_num,
+ bool inter_layer_predicted,
+ CodecSpecificInfoVP9* vp9_info);
+ void UpdateReferenceBuffers(const vpx_codec_cx_pkt& pkt, size_t pic_num);
+ vpx_svc_ref_frame_config_t SetReferences(bool is_key_pic,
+ int first_active_spatial_layer_id);
+
+ bool ExplicitlyConfiguredSpatialLayers() const;
+ bool SetSvcRates(const VideoBitrateAllocation& bitrate_allocation);
+
+ // Configures which spatial layers libvpx should encode according to
+ // configuration provided by svc_controller_.
+ void EnableSpatialLayer(int sid);
+ void DisableSpatialLayer(int sid);
+ void SetActiveSpatialLayers();
+
+ void GetEncodedLayerFrame(const vpx_codec_cx_pkt* pkt);
+
+ // Callback function for outputting packets per spatial layer.
+ static void EncoderOutputCodedPacketCallback(vpx_codec_cx_pkt* pkt,
+ void* user_data);
+
+ void DeliverBufferedFrame(bool end_of_picture);
+
+ bool DropFrame(uint8_t spatial_idx, uint32_t rtp_timestamp);
+
+ // Determine maximum target for Intra frames
+ //
+ // Input:
+ // - optimal_buffer_size : Optimal buffer size
+ // Return Value : Max target size for Intra frames represented as
+ // percentage of the per frame bandwidth
+ uint32_t MaxIntraTarget(uint32_t optimal_buffer_size);
+
+ size_t SteadyStateSize(int sid, int tid);
+
+ void MaybeRewrapRawWithFormat(vpx_img_fmt fmt);
+ // Prepares `raw_` to reference image data of `buffer`, or of mapped or scaled
+ // versions of `buffer`. Returns the buffer that got referenced as a result,
+ // allowing the caller to keep a reference to it until after encoding has
+ // finished. On failure to convert the buffer, null is returned.
+ rtc::scoped_refptr<VideoFrameBuffer> PrepareBufferForProfile0(
+ rtc::scoped_refptr<VideoFrameBuffer> buffer);
+
+ const std::unique_ptr<LibvpxInterface> libvpx_;
+ EncodedImage encoded_image_;
+ CodecSpecificInfo codec_specific_;
+ EncodedImageCallback* encoded_complete_callback_;
+ VideoCodec codec_;
+ const VP9Profile profile_;
+ bool inited_;
+ int64_t timestamp_;
+ uint32_t rc_max_intra_target_;
+ vpx_codec_ctx_t* encoder_;
+ vpx_codec_enc_cfg_t* config_;
+ vpx_image_t* raw_;
+ vpx_svc_extra_cfg_t svc_params_;
+ const VideoFrame* input_image_;
+ GofInfoVP9 gof_; // Contains each frame's temporal information for
+ // non-flexible mode.
+ bool force_key_frame_;
+ size_t pics_since_key_;
+ uint8_t num_temporal_layers_;
+ uint8_t num_spatial_layers_; // Number of configured SLs
+ uint8_t num_active_spatial_layers_; // Number of actively encoded SLs
+ uint8_t first_active_layer_;
+ bool layer_deactivation_requires_key_frame_;
+ bool is_svc_;
+ InterLayerPredMode inter_layer_pred_;
+ bool external_ref_control_;
+ const bool trusted_rate_controller_;
+ bool layer_buffering_;
+ const bool full_superframe_drop_;
+ vpx_svc_frame_drop_t svc_drop_frame_;
+ bool first_frame_in_picture_;
+ VideoBitrateAllocation current_bitrate_allocation_;
+ bool ss_info_needed_;
+ bool force_all_active_layers_;
+ uint8_t num_cores_;
+
+ std::unique_ptr<ScalableVideoController> svc_controller_;
+ absl::optional<ScalabilityMode> scalability_mode_;
+ std::vector<FramerateControllerDeprecated> framerate_controller_;
+
+ // Used for flexible mode.
+ bool is_flexible_mode_;
+ struct RefFrameBuffer {
+ bool operator==(const RefFrameBuffer& o) {
+ return pic_num == o.pic_num && spatial_layer_id == o.spatial_layer_id &&
+ temporal_layer_id == o.temporal_layer_id;
+ }
+
+ size_t pic_num = 0;
+ int spatial_layer_id = 0;
+ int temporal_layer_id = 0;
+ };
+ std::array<RefFrameBuffer, kNumVp9Buffers> ref_buf_;
+ std::vector<ScalableVideoController::LayerFrameConfig> layer_frames_;
+
+ // Variable frame-rate related fields and methods.
+ const struct VariableFramerateExperiment {
+ bool enabled;
+ // Framerate is limited to this value in steady state.
+ float framerate_limit;
+ // This qp or below is considered a steady state.
+ int steady_state_qp;
+ // Frames of at least this percentage below ideal for configured bitrate are
+ // considered in a steady state.
+ int steady_state_undershoot_percentage;
+ // Number of consecutive frames with good QP and size required to detect
+ // the steady state.
+ int frames_before_steady_state;
+ } variable_framerate_experiment_;
+ static VariableFramerateExperiment ParseVariableFramerateConfig(
+ const FieldTrialsView& trials);
+ FramerateControllerDeprecated variable_framerate_controller_;
+
+ const struct QualityScalerExperiment {
+ int low_qp;
+ int high_qp;
+ bool enabled;
+ } quality_scaler_experiment_;
+ static QualityScalerExperiment ParseQualityScalerConfig(
+ const FieldTrialsView& trials);
+ const bool external_ref_ctrl_;
+
+ // Flags that can affect speed vs quality tradeoff, and are configureable per
+ // resolution ranges.
+ struct PerformanceFlags {
+ // If false, a lookup will be made in `settings_by_resolution` base on the
+ // highest currently active resolution, and the overall speed then set to
+ // to the `base_layer_speed` matching that entry.
+ // If true, each active resolution will have it's speed and deblock_mode set
+ // based on it resolution, and the high layer speed configured for non
+ // base temporal layer frames.
+ bool use_per_layer_speed = false;
+
+ struct ParameterSet {
+ int base_layer_speed = -1; // Speed setting for TL0.
+ int high_layer_speed = -1; // Speed setting for TL1-TL3.
+ // 0 = deblock all temporal layers (TL)
+ // 1 = disable deblock for top-most TL
+ // 2 = disable deblock for all TLs
+ int deblock_mode = 0;
+ bool allow_denoising = true;
+ };
+ // Map from min pixel count to settings for that resolution and above.
+ // E.g. if you want some settings A if below wvga (640x360) and some other
+ // setting B at wvga and above, you'd use map {{0, A}, {230400, B}}.
+ flat_map<int, ParameterSet> settings_by_resolution;
+ };
+ // Performance flags, ordered by `min_pixel_count`.
+ const PerformanceFlags performance_flags_;
+ // Caching of of `speed_configs_`, where index i maps to the resolution as
+ // specified in `codec_.spatialLayer[i]`.
+ std::vector<PerformanceFlags::ParameterSet>
+ performance_flags_by_spatial_index_;
+ void UpdatePerformanceFlags();
+ static PerformanceFlags ParsePerformanceFlagsFromTrials(
+ const FieldTrialsView& trials);
+ static PerformanceFlags GetDefaultPerformanceFlags();
+
+ int num_steady_state_frames_;
+ // Only set config when this flag is set.
+ bool config_changed_;
+
+ const LibvpxVp9EncoderInfoSettings encoder_info_override_;
+};
+
+} // namespace webrtc
+
+#endif // RTC_ENABLE_VP9
+
+#endif // MODULES_VIDEO_CODING_CODECS_VP9_LIBVPX_VP9_ENCODER_H_
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/vp9/svc_config.cc b/third_party/libwebrtc/modules/video_coding/codecs/vp9/svc_config.cc
new file mode 100644
index 0000000000..3a32a43622
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/vp9/svc_config.cc
@@ -0,0 +1,240 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/codecs/vp9/svc_config.h"
+
+#include <algorithm>
+#include <cmath>
+#include <memory>
+#include <vector>
+
+#include "media/base/video_common.h"
+#include "modules/video_coding/codecs/vp9/include/vp9_globals.h"
+#include "modules/video_coding/svc/create_scalability_structure.h"
+#include "modules/video_coding/svc/scalability_mode_util.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+
+namespace {
+const size_t kMinVp9SvcBitrateKbps = 30;
+
+const size_t kMaxNumLayersForScreenSharing = 3;
+const float kMaxScreenSharingLayerFramerateFps[] = {5.0, 10.0, 30.0};
+const size_t kMinScreenSharingLayerBitrateKbps[] = {30, 200, 500};
+const size_t kTargetScreenSharingLayerBitrateKbps[] = {150, 350, 950};
+const size_t kMaxScreenSharingLayerBitrateKbps[] = {250, 500, 950};
+
+// Gets limited number of layers for given resolution.
+size_t GetLimitedNumSpatialLayers(size_t width, size_t height) {
+ const bool is_landscape = width >= height;
+ const size_t min_width = is_landscape ? kMinVp9SpatialLayerLongSideLength
+ : kMinVp9SpatialLayerShortSideLength;
+ const size_t min_height = is_landscape ? kMinVp9SpatialLayerShortSideLength
+ : kMinVp9SpatialLayerLongSideLength;
+ const size_t num_layers_fit_horz = static_cast<size_t>(
+ std::floor(1 + std::max(0.0f, std::log2(1.0f * width / min_width))));
+ const size_t num_layers_fit_vert = static_cast<size_t>(
+ std::floor(1 + std::max(0.0f, std::log2(1.0f * height / min_height))));
+ return std::min(num_layers_fit_horz, num_layers_fit_vert);
+}
+} // namespace
+
+std::vector<SpatialLayer> ConfigureSvcScreenSharing(size_t input_width,
+ size_t input_height,
+ float max_framerate_fps,
+ size_t num_spatial_layers) {
+ num_spatial_layers =
+ std::min(num_spatial_layers, kMaxNumLayersForScreenSharing);
+ std::vector<SpatialLayer> spatial_layers;
+
+ for (size_t sl_idx = 0; sl_idx < num_spatial_layers; ++sl_idx) {
+ SpatialLayer spatial_layer = {0};
+ spatial_layer.width = input_width;
+ spatial_layer.height = input_height;
+ spatial_layer.maxFramerate =
+ std::min(kMaxScreenSharingLayerFramerateFps[sl_idx], max_framerate_fps);
+ spatial_layer.numberOfTemporalLayers = 1;
+ spatial_layer.minBitrate =
+ static_cast<int>(kMinScreenSharingLayerBitrateKbps[sl_idx]);
+ spatial_layer.maxBitrate =
+ static_cast<int>(kMaxScreenSharingLayerBitrateKbps[sl_idx]);
+ spatial_layer.targetBitrate =
+ static_cast<int>(kTargetScreenSharingLayerBitrateKbps[sl_idx]);
+ spatial_layer.active = true;
+ spatial_layers.push_back(spatial_layer);
+ }
+
+ return spatial_layers;
+}
+
+std::vector<SpatialLayer> ConfigureSvcNormalVideo(
+ size_t input_width,
+ size_t input_height,
+ float max_framerate_fps,
+ size_t first_active_layer,
+ size_t num_spatial_layers,
+ size_t num_temporal_layers,
+ absl::optional<ScalableVideoController::StreamLayersConfig> config) {
+ RTC_DCHECK_LT(first_active_layer, num_spatial_layers);
+
+ // Limit number of layers for given resolution.
+ size_t limited_num_spatial_layers =
+ GetLimitedNumSpatialLayers(input_width, input_height);
+ if (limited_num_spatial_layers < num_spatial_layers) {
+ RTC_LOG(LS_WARNING) << "Reducing number of spatial layers from "
+ << num_spatial_layers << " to "
+ << limited_num_spatial_layers
+ << " due to low input resolution.";
+ num_spatial_layers = limited_num_spatial_layers;
+ }
+
+ // First active layer must be configured.
+ num_spatial_layers = std::max(num_spatial_layers, first_active_layer + 1);
+
+ // Ensure top layer is even enough.
+ int required_divisiblity = 1 << (num_spatial_layers - first_active_layer - 1);
+ if (config) {
+ required_divisiblity = 1;
+ for (size_t sl_idx = 0; sl_idx < num_spatial_layers; ++sl_idx) {
+ required_divisiblity = cricket::LeastCommonMultiple(
+ required_divisiblity, config->scaling_factor_den[sl_idx]);
+ }
+ }
+ input_width = input_width - input_width % required_divisiblity;
+ input_height = input_height - input_height % required_divisiblity;
+
+ std::vector<SpatialLayer> spatial_layers;
+ for (size_t sl_idx = first_active_layer; sl_idx < num_spatial_layers;
+ ++sl_idx) {
+ SpatialLayer spatial_layer = {0};
+ spatial_layer.width = input_width >> (num_spatial_layers - sl_idx - 1);
+ spatial_layer.height = input_height >> (num_spatial_layers - sl_idx - 1);
+ spatial_layer.maxFramerate = max_framerate_fps;
+ spatial_layer.numberOfTemporalLayers = num_temporal_layers;
+ spatial_layer.active = true;
+
+ if (config) {
+ spatial_layer.width = input_width * config->scaling_factor_num[sl_idx] /
+ config->scaling_factor_den[sl_idx];
+ spatial_layer.height = input_height * config->scaling_factor_num[sl_idx] /
+ config->scaling_factor_den[sl_idx];
+ }
+
+ // minBitrate and maxBitrate formulas were derived from
+ // subjective-quality data to determing bit rates below which video
+ // quality is unacceptable and above which additional bits do not provide
+ // benefit. The formulas express rate in units of kbps.
+
+ // TODO(ssilkin): Add to the comment PSNR/SSIM we get at encoding certain
+ // video to min/max bitrate specified by those formulas.
+ const size_t num_pixels = spatial_layer.width * spatial_layer.height;
+ int min_bitrate =
+ static_cast<int>((600. * std::sqrt(num_pixels) - 95000.) / 1000.);
+ min_bitrate = std::max(min_bitrate, 0);
+ spatial_layer.minBitrate =
+ std::max(static_cast<size_t>(min_bitrate), kMinVp9SvcBitrateKbps);
+ spatial_layer.maxBitrate =
+ static_cast<int>((1.6 * num_pixels + 50 * 1000) / 1000);
+ spatial_layer.targetBitrate =
+ (spatial_layer.minBitrate + spatial_layer.maxBitrate) / 2;
+ spatial_layers.push_back(spatial_layer);
+ }
+
+ // A workaround for situation when single HD layer is left with minBitrate
+ // about 500kbps. This would mean that there will always be at least 500kbps
+ // allocated to video regardless of how low is the actual BWE.
+ // Also, boost maxBitrate for the first layer to account for lost ability to
+ // predict from previous layers.
+ if (first_active_layer > 0) {
+ spatial_layers[0].minBitrate = kMinVp9SvcBitrateKbps;
+ // TODO(ilnik): tune this value or come up with a different formula to
+ // ensure that all singlecast configurations look good and not too much
+ // bitrate is added.
+ spatial_layers[0].maxBitrate *= 1.1;
+ }
+
+ return spatial_layers;
+}
+
+// Uses scalability mode to configure spatial layers.
+std::vector<SpatialLayer> GetVp9SvcConfig(VideoCodec& codec) {
+ RTC_DCHECK_EQ(codec.codecType, kVideoCodecVP9);
+
+ absl::optional<ScalabilityMode> scalability_mode = codec.GetScalabilityMode();
+ RTC_DCHECK(scalability_mode.has_value());
+
+ // Limit number of spatial layers for given resolution.
+ int limited_num_spatial_layers =
+ GetLimitedNumSpatialLayers(codec.width, codec.height);
+ if (limited_num_spatial_layers <
+ ScalabilityModeToNumSpatialLayers(*scalability_mode)) {
+ ScalabilityMode limited_scalability_mode =
+ LimitNumSpatialLayers(*scalability_mode, limited_num_spatial_layers);
+ RTC_LOG(LS_WARNING)
+ << "Reducing number of spatial layers due to low input resolution: "
+ << ScalabilityModeToString(*scalability_mode) << " to "
+ << ScalabilityModeToString(limited_scalability_mode);
+ scalability_mode = limited_scalability_mode;
+ codec.SetScalabilityMode(limited_scalability_mode);
+ }
+
+ absl::optional<ScalableVideoController::StreamLayersConfig> info =
+ ScalabilityStructureConfig(*scalability_mode);
+ if (!info.has_value()) {
+ RTC_LOG(LS_WARNING) << "Failed to create structure "
+ << ScalabilityModeToString(*scalability_mode);
+ return {};
+ }
+
+ // TODO(bugs.webrtc.org/11607): Add support for screensharing.
+ std::vector<SpatialLayer> spatial_layers =
+ GetSvcConfig(codec.width, codec.height, codec.maxFramerate,
+ /*first_active_layer=*/0, info->num_spatial_layers,
+ info->num_temporal_layers, /*is_screen_sharing=*/false,
+ codec.GetScalabilityMode() ? info : absl::nullopt);
+ RTC_DCHECK(!spatial_layers.empty());
+
+ // Use codec bitrate limits if spatial layering is not requested.
+ if (info->num_spatial_layers == 1) {
+ spatial_layers.back().minBitrate = codec.minBitrate;
+ spatial_layers.back().targetBitrate = codec.maxBitrate;
+ spatial_layers.back().maxBitrate = codec.maxBitrate;
+ }
+
+ return spatial_layers;
+}
+
+std::vector<SpatialLayer> GetSvcConfig(
+ size_t input_width,
+ size_t input_height,
+ float max_framerate_fps,
+ size_t first_active_layer,
+ size_t num_spatial_layers,
+ size_t num_temporal_layers,
+ bool is_screen_sharing,
+ absl::optional<ScalableVideoController::StreamLayersConfig> config) {
+ RTC_DCHECK_GT(input_width, 0);
+ RTC_DCHECK_GT(input_height, 0);
+ RTC_DCHECK_GT(num_spatial_layers, 0);
+ RTC_DCHECK_GT(num_temporal_layers, 0);
+
+ if (is_screen_sharing) {
+ return ConfigureSvcScreenSharing(input_width, input_height,
+ max_framerate_fps, num_spatial_layers);
+ } else {
+ return ConfigureSvcNormalVideo(input_width, input_height, max_framerate_fps,
+ first_active_layer, num_spatial_layers,
+ num_temporal_layers, config);
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/vp9/svc_config.h b/third_party/libwebrtc/modules/video_coding/codecs/vp9/svc_config.h
new file mode 100644
index 0000000000..adeaf0f161
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/vp9/svc_config.h
@@ -0,0 +1,39 @@
+/* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_CODECS_VP9_SVC_CONFIG_H_
+#define MODULES_VIDEO_CODING_CODECS_VP9_SVC_CONFIG_H_
+
+#include <stddef.h>
+
+#include <vector>
+
+#include "api/video_codecs/spatial_layer.h"
+#include "api/video_codecs/video_codec.h"
+#include "modules/video_coding/svc/scalable_video_controller.h"
+
+namespace webrtc {
+
+// Uses scalability mode to configure spatial layers.
+std::vector<SpatialLayer> GetVp9SvcConfig(VideoCodec& video_codec);
+
+std::vector<SpatialLayer> GetSvcConfig(
+ size_t input_width,
+ size_t input_height,
+ float max_framerate_fps,
+ size_t first_active_layer,
+ size_t num_spatial_layers,
+ size_t num_temporal_layers,
+ bool is_screen_sharing,
+ absl::optional<ScalableVideoController::StreamLayersConfig> config =
+ absl::nullopt);
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_CODECS_VP9_SVC_CONFIG_H_
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/vp9/svc_config_unittest.cc b/third_party/libwebrtc/modules/video_coding/codecs/vp9/svc_config_unittest.cc
new file mode 100644
index 0000000000..762fd39287
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/vp9/svc_config_unittest.cc
@@ -0,0 +1,285 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/codecs/vp9/svc_config.h"
+
+#include <cstddef>
+#include <vector>
+
+#include "modules/video_coding/codecs/vp9/include/vp9_globals.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+using ::testing::ElementsAre;
+using ::testing::Field;
+
+namespace webrtc {
+TEST(SvcConfig, NumSpatialLayers) {
+ const size_t max_num_spatial_layers = 6;
+ const size_t first_active_layer = 0;
+ const size_t num_spatial_layers = 2;
+
+ std::vector<SpatialLayer> spatial_layers = GetSvcConfig(
+ kMinVp9SpatialLayerLongSideLength << (num_spatial_layers - 1),
+ kMinVp9SpatialLayerShortSideLength << (num_spatial_layers - 1), 30,
+ first_active_layer, max_num_spatial_layers, 1, false);
+
+ EXPECT_EQ(spatial_layers.size(), num_spatial_layers);
+}
+
+TEST(SvcConfig, NumSpatialLayersPortrait) {
+ const size_t max_num_spatial_layers = 6;
+ const size_t first_active_layer = 0;
+ const size_t num_spatial_layers = 2;
+
+ std::vector<SpatialLayer> spatial_layers = GetSvcConfig(
+ kMinVp9SpatialLayerShortSideLength << (num_spatial_layers - 1),
+ kMinVp9SpatialLayerLongSideLength << (num_spatial_layers - 1), 30,
+ first_active_layer, max_num_spatial_layers, 1, false);
+
+ EXPECT_EQ(spatial_layers.size(), num_spatial_layers);
+}
+
+TEST(SvcConfig, NumSpatialLayersWithScalabilityMode) {
+ VideoCodec codec;
+ codec.codecType = kVideoCodecVP9;
+ codec.width = 960;
+ codec.height = 540;
+ codec.SetScalabilityMode(ScalabilityMode::kL3T3_KEY);
+
+ std::vector<SpatialLayer> spatial_layers = GetVp9SvcConfig(codec);
+ EXPECT_THAT(spatial_layers, ElementsAre(Field(&SpatialLayer::height, 135),
+ Field(&SpatialLayer::height, 270),
+ Field(&SpatialLayer::height, 540)));
+ EXPECT_THAT(spatial_layers,
+ ElementsAre(Field(&SpatialLayer::numberOfTemporalLayers, 3),
+ Field(&SpatialLayer::numberOfTemporalLayers, 3),
+ Field(&SpatialLayer::numberOfTemporalLayers, 3)));
+ EXPECT_EQ(codec.GetScalabilityMode(), ScalabilityMode::kL3T3_KEY);
+}
+
+TEST(SvcConfig, NumSpatialLayersLimitedWithScalabilityMode) {
+ VideoCodec codec;
+ codec.codecType = kVideoCodecVP9;
+ codec.width = 480;
+ codec.height = 270;
+ codec.SetScalabilityMode(ScalabilityMode::kL3T3_KEY);
+
+ // Scalability mode updated.
+ std::vector<SpatialLayer> spatial_layers = GetVp9SvcConfig(codec);
+ EXPECT_THAT(spatial_layers, ElementsAre(Field(&SpatialLayer::height, 135),
+ Field(&SpatialLayer::height, 270)));
+ EXPECT_THAT(spatial_layers,
+ ElementsAre(Field(&SpatialLayer::numberOfTemporalLayers, 3),
+ Field(&SpatialLayer::numberOfTemporalLayers, 3)));
+ EXPECT_EQ(codec.GetScalabilityMode(), ScalabilityMode::kL2T3_KEY);
+}
+
+TEST(SvcConfig, NumSpatialLayersLimitedWithScalabilityModePortrait) {
+ VideoCodec codec;
+ codec.codecType = kVideoCodecVP9;
+ codec.width = 270;
+ codec.height = 480;
+ codec.SetScalabilityMode(ScalabilityMode::kL3T1);
+
+ // Scalability mode updated.
+ std::vector<SpatialLayer> spatial_layers = GetVp9SvcConfig(codec);
+ EXPECT_THAT(spatial_layers, ElementsAre(Field(&SpatialLayer::width, 135),
+ Field(&SpatialLayer::width, 270)));
+ EXPECT_THAT(spatial_layers,
+ ElementsAre(Field(&SpatialLayer::numberOfTemporalLayers, 1),
+ Field(&SpatialLayer::numberOfTemporalLayers, 1)));
+ EXPECT_EQ(codec.GetScalabilityMode(), ScalabilityMode::kL2T1);
+}
+
+TEST(SvcConfig, NumSpatialLayersWithScalabilityModeResolutionRatio1_5) {
+ VideoCodec codec;
+ codec.codecType = kVideoCodecVP9;
+ codec.width = 270;
+ codec.height = 480;
+ codec.SetScalabilityMode(ScalabilityMode::kL2T1h); // 1.5:1
+
+ std::vector<SpatialLayer> spatial_layers = GetVp9SvcConfig(codec);
+ EXPECT_THAT(spatial_layers, ElementsAre(Field(&SpatialLayer::width, 180),
+ Field(&SpatialLayer::width, 270)));
+ EXPECT_THAT(spatial_layers,
+ ElementsAre(Field(&SpatialLayer::numberOfTemporalLayers, 1),
+ Field(&SpatialLayer::numberOfTemporalLayers, 1)));
+ EXPECT_EQ(codec.GetScalabilityMode(), ScalabilityMode::kL2T1h);
+}
+
+TEST(SvcConfig, NumSpatialLayersLimitedWithScalabilityModeResolutionRatio1_5) {
+ VideoCodec codec;
+ codec.codecType = kVideoCodecVP9;
+ codec.width = 320;
+ codec.height = 180;
+ codec.SetScalabilityMode(ScalabilityMode::kL3T1h); // 1.5:1
+
+ // Scalability mode updated.
+ std::vector<SpatialLayer> spatial_layers = GetVp9SvcConfig(codec);
+ EXPECT_THAT(spatial_layers, ElementsAre(Field(&SpatialLayer::width, 320)));
+ EXPECT_THAT(spatial_layers,
+ ElementsAre(Field(&SpatialLayer::numberOfTemporalLayers, 1)));
+ EXPECT_EQ(codec.GetScalabilityMode(), ScalabilityMode::kL1T1);
+}
+
+TEST(SvcConfig, AlwaysSendsAtLeastOneLayer) {
+ const size_t max_num_spatial_layers = 6;
+ const size_t first_active_layer = 5;
+
+ std::vector<SpatialLayer> spatial_layers = GetSvcConfig(
+ kMinVp9SpatialLayerLongSideLength, kMinVp9SpatialLayerShortSideLength, 30,
+ first_active_layer, max_num_spatial_layers, 1, false);
+ EXPECT_EQ(spatial_layers.size(), 1u);
+ EXPECT_EQ(spatial_layers.back().width, kMinVp9SpatialLayerLongSideLength);
+}
+
+TEST(SvcConfig, AlwaysSendsAtLeastOneLayerPortrait) {
+ const size_t max_num_spatial_layers = 6;
+ const size_t first_active_layer = 5;
+
+ std::vector<SpatialLayer> spatial_layers = GetSvcConfig(
+ kMinVp9SpatialLayerShortSideLength, kMinVp9SpatialLayerLongSideLength, 30,
+ first_active_layer, max_num_spatial_layers, 1, false);
+ EXPECT_EQ(spatial_layers.size(), 1u);
+ EXPECT_EQ(spatial_layers.back().width, kMinVp9SpatialLayerShortSideLength);
+}
+
+TEST(SvcConfig, EnforcesMinimalRequiredParity) {
+ const size_t max_num_spatial_layers = 3;
+ const size_t kOddSize = 1023;
+
+ std::vector<SpatialLayer> spatial_layers =
+ GetSvcConfig(kOddSize, kOddSize, 30,
+ /*first_active_layer=*/1, max_num_spatial_layers, 1, false);
+ // Since there are 2 layers total (1, 2), divisiblity by 2 is required.
+ EXPECT_EQ(spatial_layers.back().width, kOddSize - 1);
+ EXPECT_EQ(spatial_layers.back().width, kOddSize - 1);
+
+ spatial_layers =
+ GetSvcConfig(kOddSize, kOddSize, 30,
+ /*first_active_layer=*/0, max_num_spatial_layers, 1, false);
+ // Since there are 3 layers total (0, 1, 2), divisiblity by 4 is required.
+ EXPECT_EQ(spatial_layers.back().width, kOddSize - 3);
+ EXPECT_EQ(spatial_layers.back().width, kOddSize - 3);
+
+ spatial_layers =
+ GetSvcConfig(kOddSize, kOddSize, 30,
+ /*first_active_layer=*/2, max_num_spatial_layers, 1, false);
+ // Since there is only 1 layer active (2), divisiblity by 1 is required.
+ EXPECT_EQ(spatial_layers.back().width, kOddSize);
+ EXPECT_EQ(spatial_layers.back().width, kOddSize);
+}
+
+TEST(SvcConfig, EnforcesMinimalRequiredParityWithScalabilityMode) {
+ VideoCodec codec;
+ codec.codecType = kVideoCodecVP9;
+ codec.width = 1023;
+ codec.height = 1023;
+ codec.SetScalabilityMode(ScalabilityMode::kL3T1);
+
+ std::vector<SpatialLayer> spatial_layers = GetVp9SvcConfig(codec);
+ EXPECT_THAT(spatial_layers, // Divisiblity by 4 required.
+ ElementsAre(Field(&SpatialLayer::width, 255),
+ Field(&SpatialLayer::width, 510),
+ Field(&SpatialLayer::width, 1020)));
+
+ codec.SetScalabilityMode(ScalabilityMode::kL2T1);
+ spatial_layers = GetVp9SvcConfig(codec);
+ EXPECT_THAT(spatial_layers, // Divisiblity by 2 required.
+ ElementsAre(Field(&SpatialLayer::width, 511),
+ Field(&SpatialLayer::width, 1022)));
+
+ codec.SetScalabilityMode(ScalabilityMode::kL1T1);
+ spatial_layers = GetVp9SvcConfig(codec);
+ EXPECT_THAT(spatial_layers, // Divisiblity by 1 required.
+ ElementsAre(Field(&SpatialLayer::width, 1023)));
+}
+
+TEST(SvcConfig, EnforcesMinimalRequiredParityWithScalabilityModeResRatio1_5) {
+ VideoCodec codec;
+ codec.codecType = kVideoCodecVP9;
+ codec.width = 1280;
+ codec.height = 1280;
+ codec.SetScalabilityMode(ScalabilityMode::kL2T1h); // 1.5:1
+
+ std::vector<SpatialLayer> spatial_layers = GetVp9SvcConfig(codec);
+ EXPECT_THAT(spatial_layers, // Divisiblity by 3 required.
+ ElementsAre(Field(&SpatialLayer::width, 852),
+ Field(&SpatialLayer::width, 1278)));
+}
+
+TEST(SvcConfig, SkipsInactiveLayers) {
+ const size_t num_spatial_layers = 4;
+ const size_t first_active_layer = 2;
+
+ std::vector<SpatialLayer> spatial_layers = GetSvcConfig(
+ kMinVp9SpatialLayerLongSideLength << (num_spatial_layers - 1),
+ kMinVp9SpatialLayerShortSideLength << (num_spatial_layers - 1), 30,
+ first_active_layer, num_spatial_layers, 1, false);
+ EXPECT_EQ(spatial_layers.size(), 2u);
+ EXPECT_EQ(spatial_layers.back().width,
+ kMinVp9SpatialLayerLongSideLength << (num_spatial_layers - 1));
+}
+
+TEST(SvcConfig, BitrateThresholds) {
+ const size_t first_active_layer = 0;
+ const size_t num_spatial_layers = 3;
+ std::vector<SpatialLayer> spatial_layers = GetSvcConfig(
+ kMinVp9SpatialLayerLongSideLength << (num_spatial_layers - 1),
+ kMinVp9SpatialLayerShortSideLength << (num_spatial_layers - 1), 30,
+ first_active_layer, num_spatial_layers, 1, false);
+
+ EXPECT_EQ(spatial_layers.size(), num_spatial_layers);
+
+ for (const SpatialLayer& layer : spatial_layers) {
+ EXPECT_LE(layer.minBitrate, layer.maxBitrate);
+ EXPECT_LE(layer.minBitrate, layer.targetBitrate);
+ EXPECT_LE(layer.targetBitrate, layer.maxBitrate);
+ }
+}
+
+TEST(SvcConfig, BitrateThresholdsWithScalabilityMode) {
+ VideoCodec codec;
+ codec.codecType = kVideoCodecVP9;
+ codec.width = 960;
+ codec.height = 540;
+ codec.SetScalabilityMode(ScalabilityMode::kS3T3);
+
+ std::vector<SpatialLayer> spatial_layers = GetVp9SvcConfig(codec);
+ EXPECT_THAT(spatial_layers, ElementsAre(Field(&SpatialLayer::height, 135),
+ Field(&SpatialLayer::height, 270),
+ Field(&SpatialLayer::height, 540)));
+
+ for (const SpatialLayer& layer : spatial_layers) {
+ EXPECT_LE(layer.minBitrate, layer.maxBitrate);
+ EXPECT_LE(layer.minBitrate, layer.targetBitrate);
+ EXPECT_LE(layer.targetBitrate, layer.maxBitrate);
+ }
+}
+
+TEST(SvcConfig, ScreenSharing) {
+ std::vector<SpatialLayer> spatial_layers =
+ GetSvcConfig(1920, 1080, 30, 1, 3, 3, true);
+
+ EXPECT_EQ(spatial_layers.size(), 3UL);
+
+ for (size_t i = 0; i < 3; ++i) {
+ const SpatialLayer& layer = spatial_layers[i];
+ EXPECT_EQ(layer.width, 1920);
+ EXPECT_EQ(layer.height, 1080);
+ EXPECT_EQ(layer.maxFramerate, (i < 1) ? 5 : (i < 2 ? 10 : 30));
+ EXPECT_EQ(layer.numberOfTemporalLayers, 1);
+ EXPECT_LE(layer.minBitrate, layer.maxBitrate);
+ EXPECT_LE(layer.minBitrate, layer.targetBitrate);
+ EXPECT_LE(layer.targetBitrate, layer.maxBitrate);
+ }
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/vp9/test/vp9_impl_unittest.cc b/third_party/libwebrtc/modules/video_coding/codecs/vp9/test/vp9_impl_unittest.cc
new file mode 100644
index 0000000000..b6293a342e
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/vp9/test/vp9_impl_unittest.cc
@@ -0,0 +1,2446 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "absl/memory/memory.h"
+#include "api/test/create_frame_generator.h"
+#include "api/test/frame_generator_interface.h"
+#include "api/test/mock_video_encoder.h"
+#include "api/video/color_space.h"
+#include "api/video/i420_buffer.h"
+#include "api/video_codecs/video_encoder.h"
+#include "api/video_codecs/vp9_profile.h"
+#include "common_video/libyuv/include/webrtc_libyuv.h"
+#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
+#include "modules/video_coding/codecs/interface/libvpx_interface.h"
+#include "modules/video_coding/codecs/interface/mock_libvpx_interface.h"
+#include "modules/video_coding/codecs/test/encoded_video_frame_producer.h"
+#include "modules/video_coding/codecs/test/video_codec_unittest.h"
+#include "modules/video_coding/codecs/vp9/include/vp9.h"
+#include "modules/video_coding/codecs/vp9/libvpx_vp9_encoder.h"
+#include "modules/video_coding/codecs/vp9/svc_config.h"
+#include "rtc_base/strings/string_builder.h"
+#include "test/explicit_key_value_config.h"
+#include "test/field_trial.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+#include "test/mappable_native_buffer.h"
+#include "test/video_codec_settings.h"
+
+namespace webrtc {
+namespace {
+
+using ::testing::_;
+using ::testing::A;
+using ::testing::AllOf;
+using ::testing::An;
+using ::testing::AnyNumber;
+using ::testing::ByRef;
+using ::testing::DoAll;
+using ::testing::Each;
+using ::testing::ElementsAre;
+using ::testing::ElementsAreArray;
+using ::testing::Field;
+using ::testing::IsEmpty;
+using ::testing::Mock;
+using ::testing::NiceMock;
+using ::testing::Return;
+using ::testing::SafeMatcherCast;
+using ::testing::SaveArgPointee;
+using ::testing::SetArgPointee;
+using ::testing::SizeIs;
+using ::testing::TypedEq;
+using ::testing::UnorderedElementsAreArray;
+using ::testing::WithArg;
+using EncoderInfo = webrtc::VideoEncoder::EncoderInfo;
+using FramerateFractions =
+ absl::InlinedVector<uint8_t, webrtc::kMaxTemporalStreams>;
+
+constexpr size_t kWidth = 1280;
+constexpr size_t kHeight = 720;
+
+const VideoEncoder::Capabilities kCapabilities(false);
+const VideoEncoder::Settings kSettings(kCapabilities,
+ /*number_of_cores=*/1,
+ /*max_payload_size=*/0);
+
+VideoCodec DefaultCodecSettings() {
+ VideoCodec codec_settings;
+ webrtc::test::CodecSettings(kVideoCodecVP9, &codec_settings);
+ codec_settings.width = kWidth;
+ codec_settings.height = kHeight;
+ codec_settings.VP9()->numberOfTemporalLayers = 1;
+ codec_settings.VP9()->numberOfSpatialLayers = 1;
+ return codec_settings;
+}
+
+void ConfigureSvc(VideoCodec& codec_settings,
+ int num_spatial_layers,
+ int num_temporal_layers = 1) {
+ codec_settings.VP9()->numberOfSpatialLayers = num_spatial_layers;
+ codec_settings.VP9()->numberOfTemporalLayers = num_temporal_layers;
+ codec_settings.SetFrameDropEnabled(false);
+
+ std::vector<SpatialLayer> layers = GetSvcConfig(
+ codec_settings.width, codec_settings.height, codec_settings.maxFramerate,
+ /*first_active_layer=*/0, num_spatial_layers, num_temporal_layers, false);
+ for (size_t i = 0; i < layers.size(); ++i) {
+ codec_settings.spatialLayers[i] = layers[i];
+ }
+}
+
+} // namespace
+
+class TestVp9Impl : public VideoCodecUnitTest {
+ protected:
+ std::unique_ptr<VideoEncoder> CreateEncoder() override {
+ return VP9Encoder::Create();
+ }
+
+ std::unique_ptr<VideoDecoder> CreateDecoder() override {
+ return VP9Decoder::Create();
+ }
+
+ void ModifyCodecSettings(VideoCodec* codec_settings) override {
+ webrtc::test::CodecSettings(kVideoCodecVP9, codec_settings);
+ codec_settings->width = kWidth;
+ codec_settings->height = kHeight;
+ codec_settings->VP9()->numberOfTemporalLayers = 1;
+ codec_settings->VP9()->numberOfSpatialLayers = 1;
+ }
+};
+
+class TestVp9ImplForPixelFormat
+ : public TestVp9Impl,
+ public ::testing::WithParamInterface<
+ test::FrameGeneratorInterface::OutputType> {
+ protected:
+ void SetUp() override {
+ input_frame_generator_ = test::CreateSquareFrameGenerator(
+ kWidth, kHeight, GetParam(), absl::optional<int>());
+ TestVp9Impl::SetUp();
+ }
+};
+
+// Disabled on ios as flake, see https://crbug.com/webrtc/7057
+#if defined(WEBRTC_IOS)
+TEST_P(TestVp9ImplForPixelFormat, DISABLED_EncodeDecode) {
+#else
+TEST_P(TestVp9ImplForPixelFormat, EncodeDecode) {
+#endif
+ VideoFrame input_frame = NextInputFrame();
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Encode(input_frame, nullptr));
+ EncodedImage encoded_frame;
+ CodecSpecificInfo codec_specific_info;
+ ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
+ // First frame should be a key frame.
+ encoded_frame._frameType = VideoFrameType::kVideoFrameKey;
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder_->Decode(encoded_frame, false, 0));
+ std::unique_ptr<VideoFrame> decoded_frame;
+ absl::optional<uint8_t> decoded_qp;
+ ASSERT_TRUE(WaitForDecodedFrame(&decoded_frame, &decoded_qp));
+ ASSERT_TRUE(decoded_frame);
+ EXPECT_GT(I420PSNR(&input_frame, decoded_frame.get()), 36);
+
+ const ColorSpace color_space = *decoded_frame->color_space();
+ EXPECT_EQ(ColorSpace::PrimaryID::kUnspecified, color_space.primaries());
+ EXPECT_EQ(ColorSpace::TransferID::kUnspecified, color_space.transfer());
+ EXPECT_EQ(ColorSpace::MatrixID::kUnspecified, color_space.matrix());
+ EXPECT_EQ(ColorSpace::RangeID::kLimited, color_space.range());
+ EXPECT_EQ(ColorSpace::ChromaSiting::kUnspecified,
+ color_space.chroma_siting_horizontal());
+ EXPECT_EQ(ColorSpace::ChromaSiting::kUnspecified,
+ color_space.chroma_siting_vertical());
+}
+
+TEST_P(TestVp9ImplForPixelFormat, EncodeNativeBuffer) {
+ VideoFrame input_frame = NextInputFrame();
+ // Replace the input frame with a fake native buffer of the same size and
+ // underlying pixel format. Do not allow ToI420() for non-I420 buffers,
+ // ensuring zero-conversion.
+ input_frame = test::CreateMappableNativeFrame(
+ input_frame.ntp_time_ms(), input_frame.video_frame_buffer()->type(),
+ input_frame.width(), input_frame.height());
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Encode(input_frame, nullptr));
+ EncodedImage encoded_frame;
+ CodecSpecificInfo codec_specific_info;
+ ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
+
+ // After encoding, we would expect a single mapping to have happened.
+ rtc::scoped_refptr<test::MappableNativeBuffer> mappable_buffer =
+ test::GetMappableNativeBufferFromVideoFrame(input_frame);
+ std::vector<rtc::scoped_refptr<VideoFrameBuffer>> mapped_buffers =
+ mappable_buffer->GetMappedFramedBuffers();
+ ASSERT_EQ(mapped_buffers.size(), 1u);
+ EXPECT_EQ(mapped_buffers[0]->type(), mappable_buffer->mappable_type());
+ EXPECT_EQ(mapped_buffers[0]->width(), input_frame.width());
+ EXPECT_EQ(mapped_buffers[0]->height(), input_frame.height());
+ EXPECT_FALSE(mappable_buffer->DidConvertToI420());
+}
+
+TEST_P(TestVp9ImplForPixelFormat, DecodedColorSpaceFromBitstream) {
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Encode(NextInputFrame(), nullptr));
+ EncodedImage encoded_frame;
+ CodecSpecificInfo codec_specific_info;
+ ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
+
+ // Encoded frame without explicit color space information.
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder_->Decode(encoded_frame, false, 0));
+ std::unique_ptr<VideoFrame> decoded_frame;
+ absl::optional<uint8_t> decoded_qp;
+ ASSERT_TRUE(WaitForDecodedFrame(&decoded_frame, &decoded_qp));
+ ASSERT_TRUE(decoded_frame);
+ // Color space present from encoded bitstream.
+ ASSERT_TRUE(decoded_frame->color_space());
+ // No HDR metadata present.
+ EXPECT_FALSE(decoded_frame->color_space()->hdr_metadata());
+}
+
+TEST_P(TestVp9ImplForPixelFormat, DecodedQpEqualsEncodedQp) {
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Encode(NextInputFrame(), nullptr));
+ EncodedImage encoded_frame;
+ CodecSpecificInfo codec_specific_info;
+ ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
+ // First frame should be a key frame.
+ encoded_frame._frameType = VideoFrameType::kVideoFrameKey;
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder_->Decode(encoded_frame, false, 0));
+ std::unique_ptr<VideoFrame> decoded_frame;
+ absl::optional<uint8_t> decoded_qp;
+ ASSERT_TRUE(WaitForDecodedFrame(&decoded_frame, &decoded_qp));
+ ASSERT_TRUE(decoded_frame);
+ ASSERT_TRUE(decoded_qp);
+ EXPECT_EQ(encoded_frame.qp_, *decoded_qp);
+}
+
+TEST_F(TestVp9Impl, SwitchInputPixelFormatsWithoutReconfigure) {
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Encode(NextInputFrame(), nullptr));
+ EncodedImage encoded_frame;
+ CodecSpecificInfo codec_specific_info;
+ ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
+
+ // Change the input frame type from I420 to NV12, encoding should still work.
+ input_frame_generator_ = test::CreateSquareFrameGenerator(
+ kWidth, kHeight, test::FrameGeneratorInterface::OutputType::kNV12,
+ absl::optional<int>());
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Encode(NextInputFrame(), nullptr));
+ ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
+
+ // Flipping back to I420, encoding should still work.
+ input_frame_generator_ = test::CreateSquareFrameGenerator(
+ kWidth, kHeight, test::FrameGeneratorInterface::OutputType::kI420,
+ absl::optional<int>());
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Encode(NextInputFrame(), nullptr));
+ ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
+}
+
+TEST(Vp9ImplTest, ParserQpEqualsEncodedQp) {
+ std::unique_ptr<VideoEncoder> encoder = VP9Encoder::Create();
+ VideoCodec codec_settings = DefaultCodecSettings();
+ encoder->InitEncode(&codec_settings, kSettings);
+
+ std::vector<EncodedVideoFrameProducer::EncodedFrame> frames =
+ EncodedVideoFrameProducer(*encoder)
+ .SetNumInputFrames(1)
+ .SetResolution({kWidth, kHeight})
+ .Encode();
+ ASSERT_THAT(frames, SizeIs(1));
+ const auto& encoded_frame = frames.front().encoded_image;
+ int qp = 0;
+ ASSERT_TRUE(vp9::GetQp(encoded_frame.data(), encoded_frame.size(), &qp));
+ EXPECT_EQ(encoded_frame.qp_, qp);
+}
+
+TEST(Vp9ImplTest, EncodeAttachesTemplateStructureWithSvcController) {
+ std::unique_ptr<VideoEncoder> encoder = VP9Encoder::Create();
+ VideoCodec codec_settings = DefaultCodecSettings();
+ EXPECT_EQ(encoder->InitEncode(&codec_settings, kSettings),
+ WEBRTC_VIDEO_CODEC_OK);
+
+ std::vector<EncodedVideoFrameProducer::EncodedFrame> frames =
+ EncodedVideoFrameProducer(*encoder)
+ .SetNumInputFrames(2)
+ .SetResolution({kWidth, kHeight})
+ .Encode();
+
+ ASSERT_THAT(frames, SizeIs(2));
+ EXPECT_TRUE(frames[0].codec_specific_info.template_structure);
+ EXPECT_TRUE(frames[0].codec_specific_info.generic_frame_info);
+
+ EXPECT_FALSE(frames[1].codec_specific_info.template_structure);
+ EXPECT_TRUE(frames[1].codec_specific_info.generic_frame_info);
+}
+
+TEST(Vp9ImplTest, EncoderWith2TemporalLayers) {
+ std::unique_ptr<VideoEncoder> encoder = VP9Encoder::Create();
+ VideoCodec codec_settings = DefaultCodecSettings();
+ codec_settings.VP9()->numberOfTemporalLayers = 2;
+ // Tl0PidIdx is only used in non-flexible mode.
+ codec_settings.VP9()->flexibleMode = false;
+ EXPECT_EQ(encoder->InitEncode(&codec_settings, kSettings),
+ WEBRTC_VIDEO_CODEC_OK);
+
+ std::vector<EncodedVideoFrameProducer::EncodedFrame> frames =
+ EncodedVideoFrameProducer(*encoder)
+ .SetNumInputFrames(4)
+ .SetResolution({kWidth, kHeight})
+ .Encode();
+
+ ASSERT_THAT(frames, SizeIs(4));
+ EXPECT_EQ(frames[0].codec_specific_info.codecSpecific.VP9.temporal_idx, 0);
+ EXPECT_EQ(frames[1].codec_specific_info.codecSpecific.VP9.temporal_idx, 1);
+ EXPECT_EQ(frames[2].codec_specific_info.codecSpecific.VP9.temporal_idx, 0);
+ EXPECT_EQ(frames[3].codec_specific_info.codecSpecific.VP9.temporal_idx, 1);
+}
+
+TEST(Vp9ImplTest, EncodeTemporalLayersWithSvcController) {
+ std::unique_ptr<VideoEncoder> encoder = VP9Encoder::Create();
+ VideoCodec codec_settings = DefaultCodecSettings();
+ codec_settings.VP9()->numberOfTemporalLayers = 2;
+ EXPECT_EQ(encoder->InitEncode(&codec_settings, kSettings),
+ WEBRTC_VIDEO_CODEC_OK);
+
+ std::vector<EncodedVideoFrameProducer::EncodedFrame> frames =
+ EncodedVideoFrameProducer(*encoder)
+ .SetNumInputFrames(4)
+ .SetResolution({kWidth, kHeight})
+ .Encode();
+
+ ASSERT_THAT(frames, SizeIs(4));
+ EXPECT_EQ(frames[0].codec_specific_info.codecSpecific.VP9.temporal_idx, 0);
+ EXPECT_EQ(frames[1].codec_specific_info.codecSpecific.VP9.temporal_idx, 1);
+ EXPECT_EQ(frames[2].codec_specific_info.codecSpecific.VP9.temporal_idx, 0);
+ EXPECT_EQ(frames[3].codec_specific_info.codecSpecific.VP9.temporal_idx, 1);
+ // Verify codec agnostic part
+ ASSERT_TRUE(frames[0].codec_specific_info.generic_frame_info);
+ ASSERT_TRUE(frames[1].codec_specific_info.generic_frame_info);
+ ASSERT_TRUE(frames[2].codec_specific_info.generic_frame_info);
+ ASSERT_TRUE(frames[3].codec_specific_info.generic_frame_info);
+ EXPECT_EQ(frames[0].codec_specific_info.generic_frame_info->temporal_id, 0);
+ EXPECT_EQ(frames[1].codec_specific_info.generic_frame_info->temporal_id, 1);
+ EXPECT_EQ(frames[2].codec_specific_info.generic_frame_info->temporal_id, 0);
+ EXPECT_EQ(frames[3].codec_specific_info.generic_frame_info->temporal_id, 1);
+}
+
+TEST(Vp9ImplTest, EncoderWith2SpatialLayers) {
+ std::unique_ptr<VideoEncoder> encoder = VP9Encoder::Create();
+ VideoCodec codec_settings = DefaultCodecSettings();
+ codec_settings.VP9()->numberOfSpatialLayers = 2;
+ EXPECT_EQ(encoder->InitEncode(&codec_settings, kSettings),
+ WEBRTC_VIDEO_CODEC_OK);
+
+ std::vector<EncodedVideoFrameProducer::EncodedFrame> frames =
+ EncodedVideoFrameProducer(*encoder)
+ .SetNumInputFrames(1)
+ .SetResolution({kWidth, kHeight})
+ .Encode();
+
+ ASSERT_THAT(frames, SizeIs(2));
+ EXPECT_EQ(frames[0].encoded_image.SpatialIndex(), 0);
+ EXPECT_EQ(frames[1].encoded_image.SpatialIndex(), 1);
+}
+
+TEST(Vp9ImplTest, EncodeSpatialLayersWithSvcController) {
+ std::unique_ptr<VideoEncoder> encoder = VP9Encoder::Create();
+ VideoCodec codec_settings = DefaultCodecSettings();
+ codec_settings.VP9()->numberOfSpatialLayers = 2;
+ EXPECT_EQ(encoder->InitEncode(&codec_settings, kSettings),
+ WEBRTC_VIDEO_CODEC_OK);
+
+ std::vector<EncodedVideoFrameProducer::EncodedFrame> frames =
+ EncodedVideoFrameProducer(*encoder)
+ .SetNumInputFrames(2)
+ .SetResolution({kWidth, kHeight})
+ .Encode();
+
+ ASSERT_THAT(frames, SizeIs(4));
+ EXPECT_EQ(frames[0].encoded_image.SpatialIndex(), 0);
+ EXPECT_EQ(frames[1].encoded_image.SpatialIndex(), 1);
+ EXPECT_EQ(frames[2].encoded_image.SpatialIndex(), 0);
+ EXPECT_EQ(frames[3].encoded_image.SpatialIndex(), 1);
+ // Verify codec agnostic part
+ ASSERT_TRUE(frames[0].codec_specific_info.generic_frame_info);
+ ASSERT_TRUE(frames[1].codec_specific_info.generic_frame_info);
+ ASSERT_TRUE(frames[2].codec_specific_info.generic_frame_info);
+ ASSERT_TRUE(frames[3].codec_specific_info.generic_frame_info);
+ EXPECT_EQ(frames[0].codec_specific_info.generic_frame_info->spatial_id, 0);
+ EXPECT_EQ(frames[1].codec_specific_info.generic_frame_info->spatial_id, 1);
+ EXPECT_EQ(frames[2].codec_specific_info.generic_frame_info->spatial_id, 0);
+ EXPECT_EQ(frames[3].codec_specific_info.generic_frame_info->spatial_id, 1);
+}
+
+TEST_F(TestVp9Impl, EncoderExplicitLayering) {
+ // Override default settings.
+ codec_settings_.VP9()->numberOfTemporalLayers = 1;
+ codec_settings_.VP9()->numberOfSpatialLayers = 2;
+
+ codec_settings_.width = 960;
+ codec_settings_.height = 540;
+ codec_settings_.spatialLayers[0].minBitrate = 200;
+ codec_settings_.spatialLayers[0].maxBitrate = 500;
+ codec_settings_.spatialLayers[0].targetBitrate =
+ (codec_settings_.spatialLayers[0].minBitrate +
+ codec_settings_.spatialLayers[0].maxBitrate) /
+ 2;
+ codec_settings_.spatialLayers[0].active = true;
+
+ codec_settings_.spatialLayers[1].minBitrate = 400;
+ codec_settings_.spatialLayers[1].maxBitrate = 1500;
+ codec_settings_.spatialLayers[1].targetBitrate =
+ (codec_settings_.spatialLayers[1].minBitrate +
+ codec_settings_.spatialLayers[1].maxBitrate) /
+ 2;
+ codec_settings_.spatialLayers[1].active = true;
+
+ codec_settings_.spatialLayers[0].width = codec_settings_.width / 2;
+ codec_settings_.spatialLayers[0].height = codec_settings_.height / 2;
+ codec_settings_.spatialLayers[0].maxFramerate = codec_settings_.maxFramerate;
+ codec_settings_.spatialLayers[1].width = codec_settings_.width;
+ codec_settings_.spatialLayers[1].height = codec_settings_.height;
+ codec_settings_.spatialLayers[1].maxFramerate = codec_settings_.maxFramerate;
+
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->InitEncode(&codec_settings_, kSettings));
+
+ // Ensure it fails if scaling factors in horz/vert dimentions are different.
+ codec_settings_.spatialLayers[0].width = codec_settings_.width;
+ codec_settings_.spatialLayers[0].height = codec_settings_.height / 2;
+ codec_settings_.spatialLayers[1].width = codec_settings_.width;
+ codec_settings_.spatialLayers[1].height = codec_settings_.height;
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_ERR_PARAMETER,
+ encoder_->InitEncode(&codec_settings_, kSettings));
+
+ // Ensure it fails if scaling factor is not power of two.
+ codec_settings_.spatialLayers[0].width = codec_settings_.width / 3;
+ codec_settings_.spatialLayers[0].height = codec_settings_.height / 3;
+ codec_settings_.spatialLayers[1].width = codec_settings_.width;
+ codec_settings_.spatialLayers[1].height = codec_settings_.height;
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_ERR_PARAMETER,
+ encoder_->InitEncode(&codec_settings_, kSettings));
+}
+
+TEST_F(TestVp9Impl, EnableDisableSpatialLayers) {
+ // Configure encoder to produce N spatial layers. Encode frames of layer 0
+ // then enable layer 1 and encode more frames and so on until layer N-1.
+ // Then disable layers one by one in the same way.
+ // Note: bit rate allocation is high to avoid frame dropping due to rate
+ // control, the encoder should always produce a frame. A dropped
+ // frame indicates a problem and the test will fail.
+ const size_t num_spatial_layers = 3;
+ const size_t num_frames_to_encode = 5;
+
+ ConfigureSvc(codec_settings_, num_spatial_layers);
+ codec_settings_.SetFrameDropEnabled(true);
+
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->InitEncode(&codec_settings_, kSettings));
+
+ VideoBitrateAllocation bitrate_allocation;
+ for (size_t sl_idx = 0; sl_idx < num_spatial_layers; ++sl_idx) {
+ // Allocate high bit rate to avoid frame dropping due to rate control.
+ bitrate_allocation.SetBitrate(
+ sl_idx, 0,
+ codec_settings_.spatialLayers[sl_idx].targetBitrate * 1000 * 2);
+ encoder_->SetRates(VideoEncoder::RateControlParameters(
+ bitrate_allocation, codec_settings_.maxFramerate));
+
+ for (size_t frame_num = 0; frame_num < num_frames_to_encode; ++frame_num) {
+ SetWaitForEncodedFramesThreshold(sl_idx + 1);
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->Encode(NextInputFrame(), nullptr));
+ std::vector<EncodedImage> encoded_frame;
+ std::vector<CodecSpecificInfo> codec_specific_info;
+ ASSERT_TRUE(WaitForEncodedFrames(&encoded_frame, &codec_specific_info));
+ EXPECT_EQ(codec_specific_info[0].codecSpecific.VP9.ss_data_available,
+ frame_num == 0);
+ }
+ }
+
+ for (size_t i = 0; i < num_spatial_layers - 1; ++i) {
+ const size_t sl_idx = num_spatial_layers - i - 1;
+ bitrate_allocation.SetBitrate(sl_idx, 0, 0);
+ encoder_->SetRates(VideoEncoder::RateControlParameters(
+ bitrate_allocation, codec_settings_.maxFramerate));
+
+ for (size_t frame_num = 0; frame_num < num_frames_to_encode; ++frame_num) {
+ SetWaitForEncodedFramesThreshold(sl_idx);
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->Encode(NextInputFrame(), nullptr));
+ std::vector<EncodedImage> encoded_frame;
+ std::vector<CodecSpecificInfo> codec_specific_info;
+ ASSERT_TRUE(WaitForEncodedFrames(&encoded_frame, &codec_specific_info));
+ EXPECT_EQ(codec_specific_info[0].codecSpecific.VP9.ss_data_available,
+ frame_num == 0);
+ }
+ }
+}
+
+TEST(Vp9ImplTest, EnableDisableSpatialLayersWithSvcController) {
+ const int num_spatial_layers = 3;
+ // Configure encoder to produce 3 spatial layers. Encode frames of layer 0
+ // then enable layer 1 and encode more frames and so on.
+ // Then disable layers one by one in the same way.
+ // Note: bit rate allocation is high to avoid frame dropping due to rate
+ // control, the encoder should always produce a frame. A dropped
+ // frame indicates a problem and the test will fail.
+ std::unique_ptr<VideoEncoder> encoder = VP9Encoder::Create();
+ VideoCodec codec_settings = DefaultCodecSettings();
+ ConfigureSvc(codec_settings, num_spatial_layers);
+ codec_settings.SetFrameDropEnabled(true);
+ EXPECT_EQ(encoder->InitEncode(&codec_settings, kSettings),
+ WEBRTC_VIDEO_CODEC_OK);
+
+ EncodedVideoFrameProducer producer(*encoder);
+ producer.SetResolution({kWidth, kHeight});
+
+ // Encode a key frame to validate all other frames are delta frames.
+ std::vector<EncodedVideoFrameProducer::EncodedFrame> frames =
+ producer.SetNumInputFrames(1).Encode();
+ ASSERT_THAT(frames, Not(IsEmpty()));
+ EXPECT_TRUE(frames[0].codec_specific_info.template_structure);
+
+ const size_t num_frames_to_encode = 5;
+
+ VideoBitrateAllocation bitrate_allocation;
+ for (size_t sl_idx = 0; sl_idx < num_spatial_layers; ++sl_idx) {
+ // Allocate high bit rate to avoid frame dropping due to rate control.
+ bitrate_allocation.SetBitrate(
+ sl_idx, 0,
+ codec_settings.spatialLayers[sl_idx].targetBitrate * 1000 * 2);
+ encoder->SetRates(VideoEncoder::RateControlParameters(
+ bitrate_allocation, codec_settings.maxFramerate));
+
+ frames = producer.SetNumInputFrames(num_frames_to_encode).Encode();
+ // With (sl_idx+1) spatial layers expect (sl_idx+1) frames per input frame.
+ ASSERT_THAT(frames, SizeIs(num_frames_to_encode * (sl_idx + 1)));
+ for (size_t i = 0; i < frames.size(); ++i) {
+ EXPECT_TRUE(frames[i].codec_specific_info.generic_frame_info);
+ EXPECT_FALSE(frames[i].codec_specific_info.template_structure);
+ }
+ }
+
+ for (int sl_idx = num_spatial_layers - 1; sl_idx > 0; --sl_idx) {
+ bitrate_allocation.SetBitrate(sl_idx, 0, 0);
+ encoder->SetRates(VideoEncoder::RateControlParameters(
+ bitrate_allocation, codec_settings.maxFramerate));
+
+ frames = producer.SetNumInputFrames(num_frames_to_encode).Encode();
+ // With `sl_idx` spatial layer disabled, there are `sl_idx` spatial layers
+ // left.
+ ASSERT_THAT(frames, SizeIs(num_frames_to_encode * sl_idx));
+ for (size_t i = 0; i < frames.size(); ++i) {
+ EXPECT_TRUE(frames[i].codec_specific_info.generic_frame_info);
+ EXPECT_FALSE(frames[i].codec_specific_info.template_structure);
+ }
+ }
+}
+
+MATCHER_P2(GenericLayerIs, spatial_id, temporal_id, "") {
+ if (arg.codec_specific_info.generic_frame_info == absl::nullopt) {
+ *result_listener << " miss generic_frame_info";
+ return false;
+ }
+ const auto& layer = *arg.codec_specific_info.generic_frame_info;
+ if (layer.spatial_id != spatial_id || layer.temporal_id != temporal_id) {
+ *result_listener << " frame from layer (" << layer.spatial_id << ", "
+ << layer.temporal_id << ")";
+ return false;
+ }
+ return true;
+}
+
+TEST(Vp9ImplTest, SpatialUpswitchNotAtGOFBoundary) {
+ std::unique_ptr<VideoEncoder> encoder = VP9Encoder::Create();
+ VideoCodec codec_settings = DefaultCodecSettings();
+ ConfigureSvc(codec_settings, /*num_spatial_layers=*/3,
+ /*num_temporal_layers=*/3);
+ codec_settings.SetFrameDropEnabled(true);
+ EXPECT_EQ(encoder->InitEncode(&codec_settings, kSettings),
+ WEBRTC_VIDEO_CODEC_OK);
+
+ EncodedVideoFrameProducer producer(*encoder);
+ producer.SetResolution({kWidth, kHeight});
+
+ // Disable all but spatial_layer = 0;
+ VideoBitrateAllocation bitrate_allocation;
+ int layer_bitrate_bps = codec_settings.spatialLayers[0].targetBitrate * 1000;
+ bitrate_allocation.SetBitrate(0, 0, layer_bitrate_bps);
+ bitrate_allocation.SetBitrate(0, 1, layer_bitrate_bps);
+ bitrate_allocation.SetBitrate(0, 2, layer_bitrate_bps);
+ encoder->SetRates(VideoEncoder::RateControlParameters(
+ bitrate_allocation, codec_settings.maxFramerate));
+ EXPECT_THAT(producer.SetNumInputFrames(3).Encode(),
+ ElementsAre(GenericLayerIs(0, 0), GenericLayerIs(0, 2),
+ GenericLayerIs(0, 1)));
+
+ // Upswitch to spatial_layer = 1
+ layer_bitrate_bps = codec_settings.spatialLayers[1].targetBitrate * 1000;
+ bitrate_allocation.SetBitrate(1, 0, layer_bitrate_bps);
+ bitrate_allocation.SetBitrate(1, 1, layer_bitrate_bps);
+ bitrate_allocation.SetBitrate(1, 2, layer_bitrate_bps);
+ encoder->SetRates(VideoEncoder::RateControlParameters(
+ bitrate_allocation, codec_settings.maxFramerate));
+ // Expect upswitch doesn't happen immediately since there is no S1 frame that
+ // S1T2 frame can reference.
+ EXPECT_THAT(producer.SetNumInputFrames(1).Encode(),
+ ElementsAre(GenericLayerIs(0, 2)));
+ // Expect spatial upswitch happens now, at T0 frame.
+ EXPECT_THAT(producer.SetNumInputFrames(1).Encode(),
+ ElementsAre(GenericLayerIs(0, 0), GenericLayerIs(1, 0)));
+}
+// TODO(bugs.webrtc.org/13442) Enable once a forward fix has landed in WebRTC.
+TEST_F(TestVp9Impl, DISABLED_DisableEnableBaseLayerTriggersKeyFrame) {
+ // Configure encoder to produce N spatial layers. Encode frames for all
+ // layers. Then disable all but the last layer. Then reenable all back again.
+ test::ScopedFieldTrials override_field_trials(
+ "WebRTC-Vp9ExternalRefCtrl/Enabled/");
+ const size_t num_spatial_layers = 3;
+ const size_t num_temporal_layers = 3;
+ // Must not be multiple of temporal period to exercise all code paths.
+ const size_t num_frames_to_encode = 5;
+
+ ConfigureSvc(codec_settings_, num_spatial_layers, num_temporal_layers);
+ codec_settings_.SetFrameDropEnabled(false);
+ codec_settings_.VP9()->flexibleMode = false;
+ codec_settings_.VP9()->interLayerPred = InterLayerPredMode::kOnKeyPic;
+ codec_settings_.mode = VideoCodecMode::kRealtimeVideo;
+
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->InitEncode(&codec_settings_, kSettings));
+
+ VideoBitrateAllocation bitrate_allocation;
+ for (size_t sl_idx = 0; sl_idx < num_spatial_layers; ++sl_idx) {
+ for (size_t tl_idx = 0; tl_idx < num_temporal_layers; ++tl_idx) {
+ // Allocate high bit rate to avoid frame dropping due to rate control.
+ bitrate_allocation.SetBitrate(
+ sl_idx, tl_idx,
+ codec_settings_.spatialLayers[sl_idx].targetBitrate * 1000 * 2);
+ }
+ }
+ encoder_->SetRates(VideoEncoder::RateControlParameters(
+ bitrate_allocation, codec_settings_.maxFramerate));
+
+ for (size_t frame_num = 0; frame_num < num_frames_to_encode; ++frame_num) {
+ SetWaitForEncodedFramesThreshold(num_spatial_layers);
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->Encode(NextInputFrame(), nullptr));
+ std::vector<EncodedImage> encoded_frame;
+ std::vector<CodecSpecificInfo> codec_specific_info;
+ ASSERT_TRUE(WaitForEncodedFrames(&encoded_frame, &codec_specific_info));
+ EXPECT_EQ(codec_specific_info[0].codecSpecific.VP9.ss_data_available,
+ frame_num == 0);
+ }
+
+ // Disable all but top layer.
+ for (size_t sl_idx = 0; sl_idx < num_spatial_layers - 1; ++sl_idx) {
+ for (size_t tl_idx = 0; tl_idx < num_temporal_layers; ++tl_idx) {
+ bitrate_allocation.SetBitrate(sl_idx, tl_idx, 0);
+ }
+ }
+ encoder_->SetRates(VideoEncoder::RateControlParameters(
+ bitrate_allocation, codec_settings_.maxFramerate));
+
+ bool seen_ss_data = false;
+ for (size_t frame_num = 0; frame_num < num_frames_to_encode; ++frame_num) {
+ SetWaitForEncodedFramesThreshold(1);
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->Encode(NextInputFrame(), nullptr));
+ std::vector<EncodedImage> encoded_frame;
+ std::vector<CodecSpecificInfo> codec_specific_info;
+ ASSERT_TRUE(WaitForEncodedFrames(&encoded_frame, &codec_specific_info));
+ // SS available immediatly after switching on base temporal layer.
+ if (seen_ss_data) {
+ EXPECT_EQ(codec_specific_info[0].codecSpecific.VP9.ss_data_available,
+ false);
+ } else {
+ EXPECT_EQ(codec_specific_info[0].codecSpecific.VP9.ss_data_available,
+ codec_specific_info[0].codecSpecific.VP9.temporal_idx == 0);
+ seen_ss_data |=
+ codec_specific_info[0].codecSpecific.VP9.ss_data_available;
+ }
+ // No key-frames generated for disabling layers.
+ EXPECT_EQ(encoded_frame[0]._frameType, VideoFrameType::kVideoFrameDelta);
+ EXPECT_EQ(encoded_frame[0].SpatialIndex().value_or(-1), 2);
+ }
+ EXPECT_TRUE(seen_ss_data);
+
+ // Force key-frame.
+ std::vector<VideoFrameType> frame_types = {VideoFrameType::kVideoFrameKey};
+ SetWaitForEncodedFramesThreshold(1);
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->Encode(NextInputFrame(), &frame_types));
+ std::vector<EncodedImage> encoded_frame;
+ std::vector<CodecSpecificInfo> codec_specific_info;
+ ASSERT_TRUE(WaitForEncodedFrames(&encoded_frame, &codec_specific_info));
+ // Key-frame should be produced.
+ EXPECT_EQ(encoded_frame[0]._frameType, VideoFrameType::kVideoFrameKey);
+ EXPECT_EQ(encoded_frame[0].SpatialIndex().value_or(-1), 2);
+
+ // Encode some more frames.
+ for (size_t frame_num = 0; frame_num < num_frames_to_encode; ++frame_num) {
+ SetWaitForEncodedFramesThreshold(1);
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->Encode(NextInputFrame(), nullptr));
+ std::vector<EncodedImage> encoded_frame;
+ std::vector<CodecSpecificInfo> codec_specific_info;
+ ASSERT_TRUE(WaitForEncodedFrames(&encoded_frame, &codec_specific_info));
+ EXPECT_EQ(encoded_frame[0]._frameType, VideoFrameType::kVideoFrameDelta);
+ EXPECT_EQ(encoded_frame[0].SpatialIndex().value_or(-1), 2);
+ }
+
+ // Enable the second layer back.
+ // Allocate high bit rate to avoid frame dropping due to rate control.
+ for (size_t tl_idx = 0; tl_idx < num_temporal_layers; ++tl_idx) {
+ bitrate_allocation.SetBitrate(
+ 1, tl_idx, codec_settings_.spatialLayers[0].targetBitrate * 1000 * 2);
+ }
+ encoder_->SetRates(VideoEncoder::RateControlParameters(
+ bitrate_allocation, codec_settings_.maxFramerate));
+
+ for (size_t frame_num = 0; frame_num < num_frames_to_encode; ++frame_num) {
+ SetWaitForEncodedFramesThreshold(2);
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->Encode(NextInputFrame(), nullptr));
+ std::vector<EncodedImage> encoded_frame;
+ std::vector<CodecSpecificInfo> codec_specific_info;
+ ASSERT_TRUE(WaitForEncodedFrames(&encoded_frame, &codec_specific_info));
+ ASSERT_EQ(encoded_frame.size(), 2u);
+ // SS available immediatly after switching on.
+ EXPECT_EQ(codec_specific_info[0].codecSpecific.VP9.ss_data_available,
+ frame_num == 0);
+ // Keyframe should be generated when enabling lower layers.
+ const VideoFrameType expected_type = frame_num == 0
+ ? VideoFrameType::kVideoFrameKey
+ : VideoFrameType::kVideoFrameDelta;
+ EXPECT_EQ(encoded_frame[0]._frameType, expected_type);
+ EXPECT_EQ(encoded_frame[0].SpatialIndex().value_or(-1), 1);
+ EXPECT_EQ(encoded_frame[1].SpatialIndex().value_or(-1), 2);
+ }
+
+ // Enable the first layer back.
+ // Allocate high bit rate to avoid frame dropping due to rate control.
+ for (size_t tl_idx = 0; tl_idx < num_temporal_layers; ++tl_idx) {
+ bitrate_allocation.SetBitrate(
+ 0, tl_idx, codec_settings_.spatialLayers[1].targetBitrate * 1000 * 2);
+ }
+ encoder_->SetRates(VideoEncoder::RateControlParameters(
+ bitrate_allocation, codec_settings_.maxFramerate));
+
+ for (size_t frame_num = 0; frame_num < num_frames_to_encode; ++frame_num) {
+ SetWaitForEncodedFramesThreshold(num_spatial_layers);
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->Encode(NextInputFrame(), nullptr));
+ std::vector<EncodedImage> encoded_frame;
+ std::vector<CodecSpecificInfo> codec_specific_info;
+ ASSERT_TRUE(WaitForEncodedFrames(&encoded_frame, &codec_specific_info));
+ ASSERT_EQ(encoded_frame.size(), 3u);
+ // SS available immediatly after switching on.
+ EXPECT_EQ(codec_specific_info[0].codecSpecific.VP9.ss_data_available,
+ frame_num == 0);
+ // Keyframe should be generated when enabling lower layers.
+ const VideoFrameType expected_type = frame_num == 0
+ ? VideoFrameType::kVideoFrameKey
+ : VideoFrameType::kVideoFrameDelta;
+ EXPECT_EQ(encoded_frame[0]._frameType, expected_type);
+ }
+}
+// TODO(bugs.webrtc.org/13442) Enable once a forward fix has landed in WebRTC.
+TEST(Vp9ImplTest,
+ DISABLED_DisableEnableBaseLayerWithSvcControllerTriggersKeyFrame) {
+ // Configure encoder to produce N spatial layers. Encode frames for all
+ // layers. Then disable all but the last layer. Then reenable all back again.
+ const size_t num_spatial_layers = 3;
+ const size_t num_temporal_layers = 3;
+ // Must not be multiple of temporal period to exercise all code paths.
+ const size_t num_frames_to_encode = 5;
+
+ std::unique_ptr<VideoEncoder> encoder = VP9Encoder::Create();
+ VideoCodec codec_settings = DefaultCodecSettings();
+ ConfigureSvc(codec_settings, num_spatial_layers, num_temporal_layers);
+ codec_settings.SetFrameDropEnabled(false);
+ codec_settings.VP9()->flexibleMode = false;
+ codec_settings.VP9()->interLayerPred = InterLayerPredMode::kOnKeyPic;
+ codec_settings.mode = VideoCodecMode::kRealtimeVideo;
+
+ EXPECT_EQ(encoder->InitEncode(&codec_settings, kSettings),
+ WEBRTC_VIDEO_CODEC_OK);
+
+ VideoBitrateAllocation bitrate_allocation;
+ for (size_t sl_idx = 0; sl_idx < num_spatial_layers; ++sl_idx) {
+ for (size_t tl_idx = 0; tl_idx < num_temporal_layers; ++tl_idx) {
+ // Allocate high bit rate to avoid frame dropping due to rate control.
+ bitrate_allocation.SetBitrate(
+ sl_idx, tl_idx,
+ codec_settings.spatialLayers[sl_idx].targetBitrate * 1000 * 2);
+ }
+ }
+ encoder->SetRates(VideoEncoder::RateControlParameters(
+ bitrate_allocation, codec_settings.maxFramerate));
+
+ EncodedVideoFrameProducer producer(*encoder);
+ producer.SetResolution({kWidth, kHeight});
+
+ std::vector<EncodedVideoFrameProducer::EncodedFrame> frames =
+ producer.SetNumInputFrames(num_frames_to_encode).Encode();
+ ASSERT_THAT(frames, SizeIs(num_frames_to_encode * num_spatial_layers));
+
+ // Disable all but top spatial layer.
+ for (size_t sl_idx = 0; sl_idx < num_spatial_layers - 1; ++sl_idx) {
+ for (size_t tl_idx = 0; tl_idx < num_temporal_layers; ++tl_idx) {
+ bitrate_allocation.SetBitrate(sl_idx, tl_idx, 0);
+ }
+ }
+ encoder->SetRates(VideoEncoder::RateControlParameters(
+ bitrate_allocation, codec_settings.maxFramerate));
+
+ frames = producer.SetNumInputFrames(num_frames_to_encode).Encode();
+ EXPECT_THAT(frames, SizeIs(num_frames_to_encode));
+ for (const auto& frame : frames) {
+ // Expect no key-frames generated.
+ EXPECT_FALSE(frame.codec_specific_info.template_structure);
+ ASSERT_TRUE(frame.codec_specific_info.generic_frame_info);
+ EXPECT_EQ(frame.codec_specific_info.generic_frame_info->spatial_id, 2);
+ }
+
+ frames = producer.ForceKeyFrame().SetNumInputFrames(1).Encode();
+ ASSERT_THAT(frames, SizeIs(1));
+ // Key-frame should be produced.
+ EXPECT_EQ(frames[0].encoded_image._frameType, VideoFrameType::kVideoFrameKey);
+ ASSERT_TRUE(frames[0].codec_specific_info.template_structure);
+ ASSERT_TRUE(frames[0].codec_specific_info.generic_frame_info);
+ EXPECT_EQ(frames[0].codec_specific_info.generic_frame_info->spatial_id, 2);
+
+ frames = producer.SetNumInputFrames(num_frames_to_encode).Encode();
+ ASSERT_THAT(frames, SizeIs(num_frames_to_encode));
+ for (const auto& frame : frames) {
+ EXPECT_EQ(frame.encoded_image._frameType, VideoFrameType::kVideoFrameDelta);
+ EXPECT_FALSE(frame.codec_specific_info.template_structure);
+ ASSERT_TRUE(frame.codec_specific_info.generic_frame_info);
+ EXPECT_EQ(frame.codec_specific_info.generic_frame_info->spatial_id, 2);
+ }
+
+ // Enable the second layer back.
+ // Allocate high bit rate to avoid frame dropping due to rate control.
+ for (size_t tl_idx = 0; tl_idx < num_temporal_layers; ++tl_idx) {
+ bitrate_allocation.SetBitrate(
+ 1, tl_idx, codec_settings.spatialLayers[0].targetBitrate * 1000 * 2);
+ }
+ encoder->SetRates(VideoEncoder::RateControlParameters(
+ bitrate_allocation, codec_settings.maxFramerate));
+
+ frames = producer.SetNumInputFrames(num_frames_to_encode).Encode();
+ ASSERT_THAT(frames, SizeIs(num_frames_to_encode * 2));
+ EXPECT_EQ(frames[0].encoded_image._frameType, VideoFrameType::kVideoFrameKey);
+ EXPECT_TRUE(frames[0].codec_specific_info.template_structure);
+ ASSERT_TRUE(frames[0].codec_specific_info.generic_frame_info);
+ EXPECT_EQ(frames[0].codec_specific_info.generic_frame_info->spatial_id, 1);
+ for (size_t i = 1; i < frames.size(); ++i) {
+ EXPECT_EQ(frames[i].encoded_image._frameType,
+ VideoFrameType::kVideoFrameDelta);
+ EXPECT_FALSE(frames[i].codec_specific_info.template_structure);
+ ASSERT_TRUE(frames[i].codec_specific_info.generic_frame_info);
+ EXPECT_EQ(frames[i].codec_specific_info.generic_frame_info->spatial_id,
+ 1 + static_cast<int>(i % 2));
+ }
+
+ // Enable the first layer back.
+ // Allocate high bit rate to avoid frame dropping due to rate control.
+ for (size_t tl_idx = 0; tl_idx < num_temporal_layers; ++tl_idx) {
+ bitrate_allocation.SetBitrate(
+ 0, tl_idx, codec_settings.spatialLayers[1].targetBitrate * 1000 * 2);
+ }
+ encoder->SetRates(VideoEncoder::RateControlParameters(
+ bitrate_allocation, codec_settings.maxFramerate));
+
+ frames = producer.SetNumInputFrames(num_frames_to_encode).Encode();
+ ASSERT_THAT(frames, SizeIs(num_frames_to_encode * 3));
+ EXPECT_TRUE(frames[0].codec_specific_info.template_structure);
+ ASSERT_TRUE(frames[0].codec_specific_info.generic_frame_info);
+ EXPECT_EQ(frames[0].codec_specific_info.generic_frame_info->spatial_id, 0);
+ for (size_t i = 1; i < frames.size(); ++i) {
+ EXPECT_FALSE(frames[i].codec_specific_info.template_structure);
+ ASSERT_TRUE(frames[i].codec_specific_info.generic_frame_info);
+ EXPECT_EQ(frames[i].codec_specific_info.generic_frame_info->spatial_id,
+ static_cast<int>(i % 3));
+ }
+}
+
+TEST_F(TestVp9Impl, DisableEnableBaseLayerTriggersKeyFrameForScreenshare) {
+ // Configure encoder to produce N spatial layers. Encode frames for all
+ // layers. Then disable all but the last layer. Then reenable all back again.
+ const size_t num_spatial_layers = 3;
+ const size_t num_frames_to_encode = 5;
+
+ ConfigureSvc(codec_settings_, num_spatial_layers);
+ codec_settings_.SetFrameDropEnabled(false);
+ codec_settings_.mode = VideoCodecMode::kScreensharing;
+ codec_settings_.VP9()->interLayerPred = InterLayerPredMode::kOn;
+ codec_settings_.VP9()->flexibleMode = true;
+
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->InitEncode(&codec_settings_, kSettings));
+
+ VideoBitrateAllocation bitrate_allocation;
+ for (size_t sl_idx = 0; sl_idx < num_spatial_layers; ++sl_idx) {
+ // Allocate high bit rate to avoid frame dropping due to rate control.
+ bitrate_allocation.SetBitrate(
+ sl_idx, 0,
+ codec_settings_.spatialLayers[sl_idx].targetBitrate * 1000 * 2);
+ }
+ encoder_->SetRates(VideoEncoder::RateControlParameters(
+ bitrate_allocation, codec_settings_.maxFramerate));
+
+ for (size_t frame_num = 0; frame_num < num_frames_to_encode; ++frame_num) {
+ SetWaitForEncodedFramesThreshold(num_spatial_layers);
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->Encode(NextInputFrame(), nullptr));
+ std::vector<EncodedImage> encoded_frame;
+ std::vector<CodecSpecificInfo> codec_specific_info;
+ ASSERT_TRUE(WaitForEncodedFrames(&encoded_frame, &codec_specific_info));
+ EXPECT_EQ(codec_specific_info[0].codecSpecific.VP9.ss_data_available,
+ frame_num == 0);
+ }
+
+ // Disable all but top layer.
+ for (size_t sl_idx = 0; sl_idx < num_spatial_layers - 1; ++sl_idx) {
+ bitrate_allocation.SetBitrate(sl_idx, 0, 0);
+ }
+ encoder_->SetRates(VideoEncoder::RateControlParameters(
+ bitrate_allocation, codec_settings_.maxFramerate));
+
+ for (size_t frame_num = 0; frame_num < num_frames_to_encode; ++frame_num) {
+ SetWaitForEncodedFramesThreshold(1);
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->Encode(NextInputFrame(), nullptr));
+ std::vector<EncodedImage> encoded_frame;
+ std::vector<CodecSpecificInfo> codec_specific_info;
+ ASSERT_TRUE(WaitForEncodedFrames(&encoded_frame, &codec_specific_info));
+ // SS available immediatly after switching off.
+ EXPECT_EQ(codec_specific_info[0].codecSpecific.VP9.ss_data_available,
+ frame_num == 0);
+ // No key-frames generated for disabling layers.
+ EXPECT_EQ(encoded_frame[0]._frameType, VideoFrameType::kVideoFrameDelta);
+ EXPECT_EQ(encoded_frame[0].SpatialIndex().value_or(-1), 2);
+ }
+
+ // Force key-frame.
+ std::vector<VideoFrameType> frame_types = {VideoFrameType::kVideoFrameKey};
+ SetWaitForEncodedFramesThreshold(1);
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->Encode(NextInputFrame(), &frame_types));
+ std::vector<EncodedImage> encoded_frame;
+ std::vector<CodecSpecificInfo> codec_specific_info;
+ ASSERT_TRUE(WaitForEncodedFrames(&encoded_frame, &codec_specific_info));
+ // Key-frame should be produced.
+ EXPECT_EQ(encoded_frame[0]._frameType, VideoFrameType::kVideoFrameKey);
+
+ // Enable the second layer back.
+ // Allocate high bit rate to avoid frame dropping due to rate control.
+ bitrate_allocation.SetBitrate(
+ 1, 0, codec_settings_.spatialLayers[0].targetBitrate * 1000 * 2);
+ encoder_->SetRates(VideoEncoder::RateControlParameters(
+ bitrate_allocation, codec_settings_.maxFramerate));
+
+ for (size_t frame_num = 0; frame_num < num_frames_to_encode; ++frame_num) {
+ SetWaitForEncodedFramesThreshold(2);
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->Encode(NextInputFrame(), nullptr));
+ std::vector<EncodedImage> encoded_frame;
+ std::vector<CodecSpecificInfo> codec_specific_info;
+ ASSERT_TRUE(WaitForEncodedFrames(&encoded_frame, &codec_specific_info));
+ ASSERT_EQ(encoded_frame.size(), 2u);
+ // SS available immediatly after switching on.
+ EXPECT_EQ(codec_specific_info[0].codecSpecific.VP9.ss_data_available,
+ frame_num == 0);
+ // Keyframe should be generated when enabling lower layers.
+ const VideoFrameType expected_type = frame_num == 0
+ ? VideoFrameType::kVideoFrameKey
+ : VideoFrameType::kVideoFrameDelta;
+ EXPECT_EQ(encoded_frame[0]._frameType, expected_type);
+ EXPECT_EQ(encoded_frame[0].SpatialIndex().value_or(-1), 1);
+ EXPECT_EQ(encoded_frame[1].SpatialIndex().value_or(-1), 2);
+ }
+
+ // Enable the first layer back.
+ // Allocate high bit rate to avoid frame dropping due to rate control.
+ bitrate_allocation.SetBitrate(
+ 0, 0, codec_settings_.spatialLayers[1].targetBitrate * 1000 * 2);
+ encoder_->SetRates(VideoEncoder::RateControlParameters(
+ bitrate_allocation, codec_settings_.maxFramerate));
+
+ for (size_t frame_num = 0; frame_num < num_frames_to_encode; ++frame_num) {
+ SetWaitForEncodedFramesThreshold(num_spatial_layers);
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->Encode(NextInputFrame(), nullptr));
+ std::vector<EncodedImage> encoded_frame;
+ std::vector<CodecSpecificInfo> codec_specific_info;
+ ASSERT_TRUE(WaitForEncodedFrames(&encoded_frame, &codec_specific_info));
+ ASSERT_EQ(encoded_frame.size(), 3u);
+ // SS available immediatly after switching on.
+ EXPECT_EQ(codec_specific_info[0].codecSpecific.VP9.ss_data_available,
+ frame_num == 0);
+ // Keyframe should be generated when enabling lower layers.
+ const VideoFrameType expected_type = frame_num == 0
+ ? VideoFrameType::kVideoFrameKey
+ : VideoFrameType::kVideoFrameDelta;
+ EXPECT_EQ(encoded_frame[0]._frameType, expected_type);
+ }
+}
+
+TEST_F(TestVp9Impl, EndOfPicture) {
+ const size_t num_spatial_layers = 2;
+ ConfigureSvc(codec_settings_, num_spatial_layers);
+
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->InitEncode(&codec_settings_, kSettings));
+
+ // Encode both base and upper layers. Check that end-of-superframe flag is
+ // set on upper layer frame but not on base layer frame.
+ VideoBitrateAllocation bitrate_allocation;
+ bitrate_allocation.SetBitrate(
+ 0, 0, codec_settings_.spatialLayers[0].targetBitrate * 1000);
+ bitrate_allocation.SetBitrate(
+ 1, 0, codec_settings_.spatialLayers[1].targetBitrate * 1000);
+ encoder_->SetRates(VideoEncoder::RateControlParameters(
+ bitrate_allocation, codec_settings_.maxFramerate));
+ SetWaitForEncodedFramesThreshold(2);
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Encode(NextInputFrame(), nullptr));
+
+ std::vector<EncodedImage> frames;
+ std::vector<CodecSpecificInfo> codec_specific;
+ ASSERT_TRUE(WaitForEncodedFrames(&frames, &codec_specific));
+ EXPECT_FALSE(codec_specific[0].end_of_picture);
+ EXPECT_TRUE(codec_specific[1].end_of_picture);
+
+ // Encode only base layer. Check that end-of-superframe flag is
+ // set on base layer frame.
+ bitrate_allocation.SetBitrate(1, 0, 0);
+ encoder_->SetRates(VideoEncoder::RateControlParameters(
+ bitrate_allocation, codec_settings_.maxFramerate));
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->InitEncode(&codec_settings_, kSettings));
+
+ SetWaitForEncodedFramesThreshold(1);
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Encode(NextInputFrame(), nullptr));
+
+ ASSERT_TRUE(WaitForEncodedFrames(&frames, &codec_specific));
+ EXPECT_FALSE(frames[0].SpatialIndex());
+ EXPECT_TRUE(codec_specific[0].end_of_picture);
+}
+
+TEST_F(TestVp9Impl, InterLayerPred) {
+ const size_t num_spatial_layers = 2;
+ ConfigureSvc(codec_settings_, num_spatial_layers);
+ codec_settings_.SetFrameDropEnabled(false);
+
+ VideoBitrateAllocation bitrate_allocation;
+ for (size_t i = 0; i < num_spatial_layers; ++i) {
+ bitrate_allocation.SetBitrate(
+ i, 0, codec_settings_.spatialLayers[i].targetBitrate * 1000);
+ }
+
+ const std::vector<InterLayerPredMode> inter_layer_pred_modes = {
+ InterLayerPredMode::kOff, InterLayerPredMode::kOn,
+ InterLayerPredMode::kOnKeyPic};
+
+ for (const InterLayerPredMode inter_layer_pred : inter_layer_pred_modes) {
+ codec_settings_.VP9()->interLayerPred = inter_layer_pred;
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->InitEncode(&codec_settings_, kSettings));
+
+ encoder_->SetRates(VideoEncoder::RateControlParameters(
+ bitrate_allocation, codec_settings_.maxFramerate));
+
+ SetWaitForEncodedFramesThreshold(2);
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->Encode(NextInputFrame(), nullptr));
+
+ std::vector<EncodedImage> frames;
+ std::vector<CodecSpecificInfo> codec_specific;
+ ASSERT_TRUE(WaitForEncodedFrames(&frames, &codec_specific));
+
+ // Key frame.
+ ASSERT_EQ(frames[0].SpatialIndex(), 0);
+ ASSERT_FALSE(codec_specific[0].codecSpecific.VP9.inter_pic_predicted);
+ EXPECT_FALSE(codec_specific[0].codecSpecific.VP9.inter_layer_predicted);
+ EXPECT_EQ(codec_specific[0].codecSpecific.VP9.non_ref_for_inter_layer_pred,
+ inter_layer_pred == InterLayerPredMode::kOff);
+ EXPECT_TRUE(codec_specific[0].codecSpecific.VP9.ss_data_available);
+
+ ASSERT_EQ(frames[1].SpatialIndex(), 1);
+ ASSERT_FALSE(codec_specific[1].codecSpecific.VP9.inter_pic_predicted);
+ EXPECT_EQ(codec_specific[1].codecSpecific.VP9.inter_layer_predicted,
+ inter_layer_pred == InterLayerPredMode::kOn ||
+ inter_layer_pred == InterLayerPredMode::kOnKeyPic);
+ EXPECT_EQ(codec_specific[1].codecSpecific.VP9.ss_data_available,
+ inter_layer_pred == InterLayerPredMode::kOff);
+ EXPECT_TRUE(
+ codec_specific[1].codecSpecific.VP9.non_ref_for_inter_layer_pred);
+
+ // Delta frame.
+ SetWaitForEncodedFramesThreshold(2);
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->Encode(NextInputFrame(), nullptr));
+ ASSERT_TRUE(WaitForEncodedFrames(&frames, &codec_specific));
+
+ ASSERT_EQ(frames[0].SpatialIndex(), 0);
+ ASSERT_TRUE(codec_specific[0].codecSpecific.VP9.inter_pic_predicted);
+ EXPECT_FALSE(codec_specific[0].codecSpecific.VP9.inter_layer_predicted);
+ EXPECT_EQ(codec_specific[0].codecSpecific.VP9.non_ref_for_inter_layer_pred,
+ inter_layer_pred != InterLayerPredMode::kOn);
+ EXPECT_FALSE(codec_specific[0].codecSpecific.VP9.ss_data_available);
+
+ ASSERT_EQ(frames[1].SpatialIndex(), 1);
+ ASSERT_TRUE(codec_specific[1].codecSpecific.VP9.inter_pic_predicted);
+ EXPECT_EQ(codec_specific[1].codecSpecific.VP9.inter_layer_predicted,
+ inter_layer_pred == InterLayerPredMode::kOn);
+ EXPECT_TRUE(
+ codec_specific[1].codecSpecific.VP9.non_ref_for_inter_layer_pred);
+ EXPECT_FALSE(codec_specific[1].codecSpecific.VP9.ss_data_available);
+ }
+}
+
+TEST_F(TestVp9Impl,
+ EnablingUpperLayerTriggersKeyFrameIfInterLayerPredIsDisabled) {
+ const size_t num_spatial_layers = 3;
+ const size_t num_frames_to_encode = 2;
+
+ ConfigureSvc(codec_settings_, num_spatial_layers);
+ codec_settings_.SetFrameDropEnabled(false);
+
+ const std::vector<InterLayerPredMode> inter_layer_pred_modes = {
+ InterLayerPredMode::kOff, InterLayerPredMode::kOn,
+ InterLayerPredMode::kOnKeyPic};
+
+ for (const InterLayerPredMode inter_layer_pred : inter_layer_pred_modes) {
+ codec_settings_.VP9()->interLayerPred = inter_layer_pred;
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->InitEncode(&codec_settings_, kSettings));
+
+ VideoBitrateAllocation bitrate_allocation;
+ for (size_t sl_idx = 0; sl_idx < num_spatial_layers; ++sl_idx) {
+ bitrate_allocation.SetBitrate(
+ sl_idx, 0,
+ codec_settings_.spatialLayers[sl_idx].targetBitrate * 1000);
+ encoder_->SetRates(VideoEncoder::RateControlParameters(
+ bitrate_allocation, codec_settings_.maxFramerate));
+
+ for (size_t frame_num = 0; frame_num < num_frames_to_encode;
+ ++frame_num) {
+ SetWaitForEncodedFramesThreshold(sl_idx + 1);
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->Encode(NextInputFrame(), nullptr));
+ std::vector<EncodedImage> encoded_frame;
+ std::vector<CodecSpecificInfo> codec_specific_info;
+ ASSERT_TRUE(WaitForEncodedFrames(&encoded_frame, &codec_specific_info));
+
+ const bool is_first_upper_layer_frame = (sl_idx > 0 && frame_num == 0);
+ if (is_first_upper_layer_frame) {
+ if (inter_layer_pred == InterLayerPredMode::kOn) {
+ EXPECT_EQ(encoded_frame[0]._frameType,
+ VideoFrameType::kVideoFrameDelta);
+ } else {
+ EXPECT_EQ(encoded_frame[0]._frameType,
+ VideoFrameType::kVideoFrameKey);
+ }
+ } else if (sl_idx == 0 && frame_num == 0) {
+ EXPECT_EQ(encoded_frame[0]._frameType,
+ VideoFrameType::kVideoFrameKey);
+ } else {
+ for (size_t i = 0; i <= sl_idx; ++i) {
+ EXPECT_EQ(encoded_frame[i]._frameType,
+ VideoFrameType::kVideoFrameDelta);
+ }
+ }
+ }
+ }
+ }
+}
+
+TEST_F(TestVp9Impl,
+ EnablingUpperLayerUnsetsInterPicPredictedInInterlayerPredModeOn) {
+ const size_t num_spatial_layers = 3;
+ const size_t num_frames_to_encode = 2;
+
+ ConfigureSvc(codec_settings_, num_spatial_layers);
+ codec_settings_.SetFrameDropEnabled(false);
+ codec_settings_.VP9()->flexibleMode = false;
+
+ const std::vector<InterLayerPredMode> inter_layer_pred_modes = {
+ InterLayerPredMode::kOff, InterLayerPredMode::kOn,
+ InterLayerPredMode::kOnKeyPic};
+
+ for (const InterLayerPredMode inter_layer_pred : inter_layer_pred_modes) {
+ codec_settings_.VP9()->interLayerPred = inter_layer_pred;
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->InitEncode(&codec_settings_, kSettings));
+
+ VideoBitrateAllocation bitrate_allocation;
+ for (size_t sl_idx = 0; sl_idx < num_spatial_layers; ++sl_idx) {
+ bitrate_allocation.SetBitrate(
+ sl_idx, 0,
+ codec_settings_.spatialLayers[sl_idx].targetBitrate * 1000);
+ encoder_->SetRates(VideoEncoder::RateControlParameters(
+ bitrate_allocation, codec_settings_.maxFramerate));
+
+ for (size_t frame_num = 0; frame_num < num_frames_to_encode;
+ ++frame_num) {
+ SetWaitForEncodedFramesThreshold(sl_idx + 1);
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->Encode(NextInputFrame(), nullptr));
+ std::vector<EncodedImage> encoded_frame;
+ std::vector<CodecSpecificInfo> codec_specific_info;
+ ASSERT_TRUE(WaitForEncodedFrames(&encoded_frame, &codec_specific_info));
+
+ ASSERT_EQ(codec_specific_info.size(), sl_idx + 1);
+
+ for (size_t i = 0; i <= sl_idx; ++i) {
+ const bool is_keyframe =
+ encoded_frame[0]._frameType == VideoFrameType::kVideoFrameKey;
+ const bool is_first_upper_layer_frame =
+ (i == sl_idx && frame_num == 0);
+ // Interframe references are there, unless it's a keyframe,
+ // or it's a first activated frame in a upper layer
+ const bool expect_no_references =
+ is_keyframe || (is_first_upper_layer_frame &&
+ inter_layer_pred == InterLayerPredMode::kOn);
+ EXPECT_EQ(
+ codec_specific_info[i].codecSpecific.VP9.inter_pic_predicted,
+ !expect_no_references);
+ }
+ }
+ }
+ }
+}
+
+TEST_F(TestVp9Impl, EnablingDisablingUpperLayerInTheSameGof) {
+ const size_t num_spatial_layers = 2;
+ const size_t num_temporal_layers = 2;
+
+ ConfigureSvc(codec_settings_, num_spatial_layers, num_temporal_layers);
+ codec_settings_.SetFrameDropEnabled(false);
+ codec_settings_.VP9()->flexibleMode = false;
+
+ codec_settings_.VP9()->interLayerPred = InterLayerPredMode::kOn;
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->InitEncode(&codec_settings_, kSettings));
+
+ VideoBitrateAllocation bitrate_allocation;
+
+ // Enable both spatial and both temporal layers.
+ bitrate_allocation.SetBitrate(
+ 0, 0, codec_settings_.spatialLayers[0].targetBitrate * 1000 / 2);
+ bitrate_allocation.SetBitrate(
+ 0, 1, codec_settings_.spatialLayers[0].targetBitrate * 1000 / 2);
+ bitrate_allocation.SetBitrate(
+ 1, 0, codec_settings_.spatialLayers[1].targetBitrate * 1000 / 2);
+ bitrate_allocation.SetBitrate(
+ 1, 1, codec_settings_.spatialLayers[1].targetBitrate * 1000 / 2);
+ encoder_->SetRates(VideoEncoder::RateControlParameters(
+ bitrate_allocation, codec_settings_.maxFramerate));
+
+ std::vector<EncodedImage> encoded_frame;
+ std::vector<CodecSpecificInfo> codec_specific_info;
+
+ // Encode 3 frames.
+ for (int i = 0; i < 3; ++i) {
+ SetWaitForEncodedFramesThreshold(2);
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->Encode(NextInputFrame(), nullptr));
+ ASSERT_TRUE(WaitForEncodedFrames(&encoded_frame, &codec_specific_info));
+ ASSERT_EQ(codec_specific_info.size(), 2u);
+ }
+
+ // Disable SL1 layer.
+ bitrate_allocation.SetBitrate(1, 0, 0);
+ bitrate_allocation.SetBitrate(1, 1, 0);
+ encoder_->SetRates(VideoEncoder::RateControlParameters(
+ bitrate_allocation, codec_settings_.maxFramerate));
+
+ // Encode 1 frame.
+ SetWaitForEncodedFramesThreshold(1);
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Encode(NextInputFrame(), nullptr));
+ ASSERT_TRUE(WaitForEncodedFrames(&encoded_frame, &codec_specific_info));
+ ASSERT_EQ(codec_specific_info.size(), 1u);
+ EXPECT_EQ(encoded_frame[0]._frameType, VideoFrameType::kVideoFrameDelta);
+ EXPECT_EQ(codec_specific_info[0].codecSpecific.VP9.temporal_idx, 1);
+ EXPECT_EQ(codec_specific_info[0].codecSpecific.VP9.inter_pic_predicted, true);
+
+ // Enable SL1 layer.
+ bitrate_allocation.SetBitrate(
+ 1, 0, codec_settings_.spatialLayers[1].targetBitrate * 1000 / 2);
+ bitrate_allocation.SetBitrate(
+ 1, 1, codec_settings_.spatialLayers[1].targetBitrate * 1000 / 2);
+ encoder_->SetRates(VideoEncoder::RateControlParameters(
+ bitrate_allocation, codec_settings_.maxFramerate));
+
+ // Encode 1 frame.
+ SetWaitForEncodedFramesThreshold(2);
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Encode(NextInputFrame(), nullptr));
+ ASSERT_TRUE(WaitForEncodedFrames(&encoded_frame, &codec_specific_info));
+ ASSERT_EQ(codec_specific_info.size(), 2u);
+ EXPECT_EQ(encoded_frame[0]._frameType, VideoFrameType::kVideoFrameDelta);
+ EXPECT_EQ(codec_specific_info[0].codecSpecific.VP9.temporal_idx, 0);
+ EXPECT_EQ(codec_specific_info[0].codecSpecific.VP9.inter_pic_predicted, true);
+ EXPECT_EQ(codec_specific_info[1].codecSpecific.VP9.inter_pic_predicted, true);
+}
+
+TEST_F(TestVp9Impl, EnablingDisablingUpperLayerAccrossGof) {
+ const size_t num_spatial_layers = 2;
+ const size_t num_temporal_layers = 2;
+
+ ConfigureSvc(codec_settings_, num_spatial_layers, num_temporal_layers);
+ codec_settings_.SetFrameDropEnabled(false);
+ codec_settings_.VP9()->flexibleMode = false;
+
+ codec_settings_.VP9()->interLayerPred = InterLayerPredMode::kOn;
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->InitEncode(&codec_settings_, kSettings));
+
+ VideoBitrateAllocation bitrate_allocation;
+
+ // Enable both spatial and both temporal layers.
+ bitrate_allocation.SetBitrate(
+ 0, 0, codec_settings_.spatialLayers[0].targetBitrate * 1000 / 2);
+ bitrate_allocation.SetBitrate(
+ 0, 1, codec_settings_.spatialLayers[0].targetBitrate * 1000 / 2);
+ bitrate_allocation.SetBitrate(
+ 1, 0, codec_settings_.spatialLayers[1].targetBitrate * 1000 / 2);
+ bitrate_allocation.SetBitrate(
+ 1, 1, codec_settings_.spatialLayers[1].targetBitrate * 1000 / 2);
+ encoder_->SetRates(VideoEncoder::RateControlParameters(
+ bitrate_allocation, codec_settings_.maxFramerate));
+
+ std::vector<EncodedImage> encoded_frame;
+ std::vector<CodecSpecificInfo> codec_specific_info;
+
+ // Encode 3 frames.
+ for (int i = 0; i < 3; ++i) {
+ SetWaitForEncodedFramesThreshold(2);
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->Encode(NextInputFrame(), nullptr));
+ ASSERT_TRUE(WaitForEncodedFrames(&encoded_frame, &codec_specific_info));
+ ASSERT_EQ(codec_specific_info.size(), 2u);
+ }
+
+ // Disable SL1 layer.
+ bitrate_allocation.SetBitrate(1, 0, 0);
+ bitrate_allocation.SetBitrate(1, 1, 0);
+ encoder_->SetRates(VideoEncoder::RateControlParameters(
+ bitrate_allocation, codec_settings_.maxFramerate));
+
+ // Encode 11 frames. More than Gof length 2, and odd to end at TL1 frame.
+ for (int i = 0; i < 11; ++i) {
+ SetWaitForEncodedFramesThreshold(1);
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->Encode(NextInputFrame(), nullptr));
+ ASSERT_TRUE(WaitForEncodedFrames(&encoded_frame, &codec_specific_info));
+ ASSERT_EQ(codec_specific_info.size(), 1u);
+ EXPECT_EQ(encoded_frame[0]._frameType, VideoFrameType::kVideoFrameDelta);
+ EXPECT_EQ(codec_specific_info[0].codecSpecific.VP9.temporal_idx, 1 - i % 2);
+ EXPECT_EQ(codec_specific_info[0].codecSpecific.VP9.inter_pic_predicted,
+ true);
+ }
+
+ // Enable SL1 layer.
+ bitrate_allocation.SetBitrate(
+ 1, 0, codec_settings_.spatialLayers[1].targetBitrate * 1000 / 2);
+ bitrate_allocation.SetBitrate(
+ 1, 1, codec_settings_.spatialLayers[1].targetBitrate * 1000 / 2);
+ encoder_->SetRates(VideoEncoder::RateControlParameters(
+ bitrate_allocation, codec_settings_.maxFramerate));
+
+ // Encode 1 frame.
+ SetWaitForEncodedFramesThreshold(2);
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Encode(NextInputFrame(), nullptr));
+ ASSERT_TRUE(WaitForEncodedFrames(&encoded_frame, &codec_specific_info));
+ ASSERT_EQ(codec_specific_info.size(), 2u);
+ EXPECT_EQ(encoded_frame[0]._frameType, VideoFrameType::kVideoFrameDelta);
+ EXPECT_EQ(codec_specific_info[0].codecSpecific.VP9.temporal_idx, 0);
+ EXPECT_EQ(codec_specific_info[0].codecSpecific.VP9.inter_pic_predicted, true);
+ EXPECT_EQ(codec_specific_info[1].codecSpecific.VP9.inter_pic_predicted,
+ false);
+}
+
+TEST_F(TestVp9Impl, EnablingNewLayerInScreenshareForcesAllLayersWithSS) {
+ const size_t num_spatial_layers = 3;
+ // Chosen by hand, the 2nd frame is dropped with configured per-layer max
+ // framerate.
+ const size_t num_frames_to_encode_before_drop = 1;
+
+ codec_settings_.maxFramerate = 30;
+ ConfigureSvc(codec_settings_, num_spatial_layers);
+ codec_settings_.spatialLayers[0].maxFramerate = 5.0;
+ // use 30 for the SL 1 instead of 10, so even if SL 0 frame is dropped due to
+ // framerate capping we would still get back at least a middle layer. It
+ // simplifies the test.
+ codec_settings_.spatialLayers[1].maxFramerate = 30.0;
+ codec_settings_.spatialLayers[2].maxFramerate = 30.0;
+ codec_settings_.SetFrameDropEnabled(false);
+ codec_settings_.mode = VideoCodecMode::kScreensharing;
+ codec_settings_.VP9()->interLayerPred = InterLayerPredMode::kOn;
+ codec_settings_.VP9()->flexibleMode = true;
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->InitEncode(&codec_settings_, kSettings));
+
+ // Enable all but the last layer.
+ VideoBitrateAllocation bitrate_allocation;
+ for (size_t sl_idx = 0; sl_idx < num_spatial_layers - 1; ++sl_idx) {
+ bitrate_allocation.SetBitrate(
+ sl_idx, 0, codec_settings_.spatialLayers[sl_idx].targetBitrate * 1000);
+ }
+ encoder_->SetRates(VideoEncoder::RateControlParameters(
+ bitrate_allocation, codec_settings_.maxFramerate));
+
+ // Encode enough frames to force drop due to framerate capping.
+ for (size_t frame_num = 0; frame_num < num_frames_to_encode_before_drop;
+ ++frame_num) {
+ SetWaitForEncodedFramesThreshold(num_spatial_layers - 1);
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->Encode(NextInputFrame(), nullptr));
+ std::vector<EncodedImage> encoded_frames;
+ std::vector<CodecSpecificInfo> codec_specific_info;
+ ASSERT_TRUE(WaitForEncodedFrames(&encoded_frames, &codec_specific_info));
+ }
+
+ // Enable the last layer.
+ bitrate_allocation.SetBitrate(
+ num_spatial_layers - 1, 0,
+ codec_settings_.spatialLayers[num_spatial_layers - 1].targetBitrate *
+ 1000);
+ encoder_->SetRates(VideoEncoder::RateControlParameters(
+ bitrate_allocation, codec_settings_.maxFramerate));
+
+ // All layers are encoded, even though frame dropping should happen.
+ SetWaitForEncodedFramesThreshold(num_spatial_layers);
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Encode(NextInputFrame(), nullptr));
+ // Now all 3 layers should be encoded.
+ std::vector<EncodedImage> encoded_frames;
+ std::vector<CodecSpecificInfo> codec_specific_info;
+ ASSERT_TRUE(WaitForEncodedFrames(&encoded_frames, &codec_specific_info));
+ EXPECT_EQ(encoded_frames.size(), 3u);
+ // Scalability structure has to be triggered.
+ EXPECT_TRUE(codec_specific_info[0].codecSpecific.VP9.ss_data_available);
+}
+
+TEST_F(TestVp9Impl, ScreenshareFrameDropping) {
+ const int num_spatial_layers = 3;
+ const int num_frames_to_detect_drops = 2;
+
+ codec_settings_.maxFramerate = 30;
+ ConfigureSvc(codec_settings_, num_spatial_layers);
+ // use 30 for the SL0 and SL1 because it simplifies the test.
+ codec_settings_.spatialLayers[0].maxFramerate = 30.0;
+ codec_settings_.spatialLayers[1].maxFramerate = 30.0;
+ codec_settings_.spatialLayers[2].maxFramerate = 30.0;
+ codec_settings_.SetFrameDropEnabled(true);
+ codec_settings_.mode = VideoCodecMode::kScreensharing;
+ codec_settings_.VP9()->interLayerPred = InterLayerPredMode::kOn;
+ codec_settings_.VP9()->flexibleMode = true;
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->InitEncode(&codec_settings_, kSettings));
+
+ // Enable all but the last layer.
+ VideoBitrateAllocation bitrate_allocation;
+ // Very low bitrate for the lowest spatial layer to ensure rate-control drops.
+ bitrate_allocation.SetBitrate(0, 0, 1000);
+ bitrate_allocation.SetBitrate(
+ 1, 0, codec_settings_.spatialLayers[1].targetBitrate * 1000);
+ // Disable highest layer.
+ bitrate_allocation.SetBitrate(2, 0, 0);
+
+ encoder_->SetRates(VideoEncoder::RateControlParameters(
+ bitrate_allocation, codec_settings_.maxFramerate));
+
+ bool frame_dropped = false;
+ // Encode enough frames to force drop due to rate-control.
+ for (size_t frame_num = 0; frame_num < num_frames_to_detect_drops;
+ ++frame_num) {
+ SetWaitForEncodedFramesThreshold(1);
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->Encode(NextInputFrame(), nullptr));
+ std::vector<EncodedImage> encoded_frames;
+ std::vector<CodecSpecificInfo> codec_specific_info;
+ ASSERT_TRUE(WaitForEncodedFrames(&encoded_frames, &codec_specific_info));
+ EXPECT_LE(encoded_frames.size(), 2u);
+ EXPECT_GE(encoded_frames.size(), 1u);
+ if (encoded_frames.size() == 1) {
+ frame_dropped = true;
+ // Dropped frame is on the SL0.
+ EXPECT_EQ(encoded_frames[0].SpatialIndex(), 1);
+ }
+ }
+ EXPECT_TRUE(frame_dropped);
+
+ // Enable the last layer.
+ bitrate_allocation.SetBitrate(
+ 2, 0, codec_settings_.spatialLayers[2].targetBitrate * 1000);
+ encoder_->SetRates(VideoEncoder::RateControlParameters(
+ bitrate_allocation, codec_settings_.maxFramerate));
+ SetWaitForEncodedFramesThreshold(1);
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Encode(NextInputFrame(), nullptr));
+ std::vector<EncodedImage> encoded_frames;
+ std::vector<CodecSpecificInfo> codec_specific_info;
+ ASSERT_TRUE(WaitForEncodedFrames(&encoded_frames, &codec_specific_info));
+ // No drop allowed.
+ EXPECT_EQ(encoded_frames.size(), 3u);
+
+ // Verify that frame-dropping is re-enabled back.
+ frame_dropped = false;
+ // Encode enough frames to force drop due to rate-control.
+ for (size_t frame_num = 0; frame_num < num_frames_to_detect_drops;
+ ++frame_num) {
+ SetWaitForEncodedFramesThreshold(1);
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->Encode(NextInputFrame(), nullptr));
+ std::vector<EncodedImage> encoded_frames;
+ std::vector<CodecSpecificInfo> codec_specific_info;
+ ASSERT_TRUE(WaitForEncodedFrames(&encoded_frames, &codec_specific_info));
+ EXPECT_LE(encoded_frames.size(), 3u);
+ EXPECT_GE(encoded_frames.size(), 2u);
+ if (encoded_frames.size() == 2) {
+ frame_dropped = true;
+ // Dropped frame is on the SL0.
+ EXPECT_EQ(encoded_frames[0].SpatialIndex(), 1);
+ EXPECT_EQ(encoded_frames[1].SpatialIndex(), 2);
+ }
+ }
+ EXPECT_TRUE(frame_dropped);
+}
+
+TEST_F(TestVp9Impl, RemovingLayerIsNotDelayedInScreenshareAndAddsSsInfo) {
+ const size_t num_spatial_layers = 3;
+ // Chosen by hand, the 2nd frame is dropped with configured per-layer max
+ // framerate.
+ const size_t num_frames_to_encode_before_drop = 1;
+ // Chosen by hand, exactly 5 frames are dropped for input fps=30 and max
+ // framerate = 5.
+ const size_t num_dropped_frames = 5;
+
+ codec_settings_.maxFramerate = 30;
+ ConfigureSvc(codec_settings_, num_spatial_layers);
+ codec_settings_.spatialLayers[0].maxFramerate = 5.0;
+ // use 30 for the SL 1 instead of 5, so even if SL 0 frame is dropped due to
+ // framerate capping we would still get back at least a middle layer. It
+ // simplifies the test.
+ codec_settings_.spatialLayers[1].maxFramerate = 30.0;
+ codec_settings_.spatialLayers[2].maxFramerate = 30.0;
+ codec_settings_.SetFrameDropEnabled(false);
+ codec_settings_.mode = VideoCodecMode::kScreensharing;
+ codec_settings_.VP9()->interLayerPred = InterLayerPredMode::kOn;
+ codec_settings_.VP9()->flexibleMode = true;
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->InitEncode(&codec_settings_, kSettings));
+
+ // All layers are enabled from the start.
+ VideoBitrateAllocation bitrate_allocation;
+ for (size_t sl_idx = 0; sl_idx < num_spatial_layers; ++sl_idx) {
+ bitrate_allocation.SetBitrate(
+ sl_idx, 0, codec_settings_.spatialLayers[sl_idx].targetBitrate * 1000);
+ }
+ encoder_->SetRates(VideoEncoder::RateControlParameters(
+ bitrate_allocation, codec_settings_.maxFramerate));
+
+ // Encode enough frames to force drop due to framerate capping.
+ for (size_t frame_num = 0; frame_num < num_frames_to_encode_before_drop;
+ ++frame_num) {
+ SetWaitForEncodedFramesThreshold(num_spatial_layers);
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->Encode(NextInputFrame(), nullptr));
+ std::vector<EncodedImage> encoded_frames;
+ std::vector<CodecSpecificInfo> codec_specific_info;
+ ASSERT_TRUE(WaitForEncodedFrames(&encoded_frames, &codec_specific_info));
+ }
+
+ // Now the first layer should not have frames in it.
+ for (size_t frame_num = 0; frame_num < num_dropped_frames - 2; ++frame_num) {
+ SetWaitForEncodedFramesThreshold(2);
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->Encode(NextInputFrame(), nullptr));
+ // First layer is dropped due to frame rate cap. The last layer should not
+ // be enabled yet.
+ std::vector<EncodedImage> encoded_frames;
+ std::vector<CodecSpecificInfo> codec_specific_info;
+ ASSERT_TRUE(WaitForEncodedFrames(&encoded_frames, &codec_specific_info));
+ // First layer is skipped.
+ EXPECT_EQ(encoded_frames[0].SpatialIndex().value_or(-1), 1);
+ }
+
+ // Disable the last layer.
+ bitrate_allocation.SetBitrate(num_spatial_layers - 1, 0, 0);
+ encoder_->SetRates(VideoEncoder::RateControlParameters(
+ bitrate_allocation, codec_settings_.maxFramerate));
+
+ // Still expected to drop first layer. Last layer has to be disable also.
+ for (size_t frame_num = num_dropped_frames - 2;
+ frame_num < num_dropped_frames; ++frame_num) {
+ // Expect back one frame.
+ SetWaitForEncodedFramesThreshold(1);
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->Encode(NextInputFrame(), nullptr));
+ // First layer is dropped due to frame rate cap. The last layer should not
+ // be enabled yet.
+ std::vector<EncodedImage> encoded_frames;
+ std::vector<CodecSpecificInfo> codec_specific_info;
+ ASSERT_TRUE(WaitForEncodedFrames(&encoded_frames, &codec_specific_info));
+ // First layer is skipped.
+ EXPECT_EQ(encoded_frames[0].SpatialIndex().value_or(-1), 1);
+ // No SS data on non-base spatial layer.
+ EXPECT_FALSE(codec_specific_info[0].codecSpecific.VP9.ss_data_available);
+ }
+
+ SetWaitForEncodedFramesThreshold(2);
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Encode(NextInputFrame(), nullptr));
+ std::vector<EncodedImage> encoded_frames;
+ std::vector<CodecSpecificInfo> codec_specific_info;
+ ASSERT_TRUE(WaitForEncodedFrames(&encoded_frames, &codec_specific_info));
+ // First layer is not skipped now.
+ EXPECT_EQ(encoded_frames[0].SpatialIndex().value_or(-1), 0);
+ // SS data should be present.
+ EXPECT_TRUE(codec_specific_info[0].codecSpecific.VP9.ss_data_available);
+}
+
+TEST_F(TestVp9Impl, DisableNewLayerInVideoDelaysSsInfoTillTL0) {
+ const size_t num_spatial_layers = 3;
+ const size_t num_temporal_layers = 2;
+ // Chosen by hand, the 2nd frame is dropped with configured per-layer max
+ // framerate.
+ ConfigureSvc(codec_settings_, num_spatial_layers, num_temporal_layers);
+ codec_settings_.SetFrameDropEnabled(false);
+ codec_settings_.mode = VideoCodecMode::kRealtimeVideo;
+ codec_settings_.VP9()->interLayerPred = InterLayerPredMode::kOnKeyPic;
+ codec_settings_.VP9()->flexibleMode = false;
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->InitEncode(&codec_settings_, kSettings));
+
+ // Enable all the layers.
+ VideoBitrateAllocation bitrate_allocation;
+ for (size_t sl_idx = 0; sl_idx < num_spatial_layers; ++sl_idx) {
+ for (size_t tl_idx = 0; tl_idx < num_temporal_layers; ++tl_idx) {
+ bitrate_allocation.SetBitrate(
+ sl_idx, tl_idx,
+ codec_settings_.spatialLayers[sl_idx].targetBitrate * 1000 /
+ num_temporal_layers);
+ }
+ }
+ encoder_->SetRates(VideoEncoder::RateControlParameters(
+ bitrate_allocation, codec_settings_.maxFramerate));
+
+ std::vector<EncodedImage> encoded_frames;
+ std::vector<CodecSpecificInfo> codec_specific_info;
+
+ // Encode one TL0 frame
+ SetWaitForEncodedFramesThreshold(num_spatial_layers);
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Encode(NextInputFrame(), nullptr));
+ ASSERT_TRUE(WaitForEncodedFrames(&encoded_frames, &codec_specific_info));
+ EXPECT_EQ(codec_specific_info[0].codecSpecific.VP9.temporal_idx, 0u);
+
+ // Disable the last layer.
+ for (size_t tl_idx = 0; tl_idx < num_temporal_layers; ++tl_idx) {
+ bitrate_allocation.SetBitrate(num_spatial_layers - 1, tl_idx, 0);
+ }
+ encoder_->SetRates(VideoEncoder::RateControlParameters(
+ bitrate_allocation, codec_settings_.maxFramerate));
+
+ // Next is TL1 frame. The last layer is disabled immediately, but SS structure
+ // is not provided here.
+ SetWaitForEncodedFramesThreshold(num_spatial_layers - 1);
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Encode(NextInputFrame(), nullptr));
+ ASSERT_TRUE(WaitForEncodedFrames(&encoded_frames, &codec_specific_info));
+ EXPECT_EQ(codec_specific_info[0].codecSpecific.VP9.temporal_idx, 1u);
+ EXPECT_FALSE(codec_specific_info[0].codecSpecific.VP9.ss_data_available);
+
+ // Next is TL0 frame, which should have delayed SS structure.
+ SetWaitForEncodedFramesThreshold(num_spatial_layers - 1);
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Encode(NextInputFrame(), nullptr));
+ ASSERT_TRUE(WaitForEncodedFrames(&encoded_frames, &codec_specific_info));
+ EXPECT_EQ(codec_specific_info[0].codecSpecific.VP9.temporal_idx, 0u);
+ EXPECT_TRUE(codec_specific_info[0].codecSpecific.VP9.ss_data_available);
+ EXPECT_TRUE(codec_specific_info[0]
+ .codecSpecific.VP9.spatial_layer_resolution_present);
+ EXPECT_EQ(codec_specific_info[0].codecSpecific.VP9.num_spatial_layers,
+ num_spatial_layers - 1);
+}
+
+TEST_F(TestVp9Impl,
+ LowLayerMarkedAsRefIfHighLayerNotEncodedAndInterLayerPredIsEnabled) {
+ ConfigureSvc(codec_settings_, 3);
+ codec_settings_.SetFrameDropEnabled(false);
+ codec_settings_.VP9()->interLayerPred = InterLayerPredMode::kOn;
+
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->InitEncode(&codec_settings_, kSettings));
+
+ VideoBitrateAllocation bitrate_allocation;
+ bitrate_allocation.SetBitrate(
+ 0, 0, codec_settings_.spatialLayers[0].targetBitrate * 1000);
+ encoder_->SetRates(VideoEncoder::RateControlParameters(
+ bitrate_allocation, codec_settings_.maxFramerate));
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Encode(NextInputFrame(), nullptr));
+ EncodedImage encoded_frame;
+ CodecSpecificInfo codec_info;
+ ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_info));
+ EXPECT_TRUE(codec_info.codecSpecific.VP9.ss_data_available);
+ EXPECT_FALSE(codec_info.codecSpecific.VP9.non_ref_for_inter_layer_pred);
+}
+
+TEST_F(TestVp9Impl, ScalabilityStructureIsAvailableInFlexibleMode) {
+ codec_settings_.VP9()->flexibleMode = true;
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->InitEncode(&codec_settings_, kSettings));
+
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Encode(NextInputFrame(), nullptr));
+ EncodedImage encoded_frame;
+ CodecSpecificInfo codec_specific_info;
+ ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
+ EXPECT_TRUE(codec_specific_info.codecSpecific.VP9.ss_data_available);
+}
+
+TEST_F(TestVp9Impl, Profile0PreferredPixelFormats) {
+ EXPECT_THAT(encoder_->GetEncoderInfo().preferred_pixel_formats,
+ testing::UnorderedElementsAre(VideoFrameBuffer::Type::kNV12,
+ VideoFrameBuffer::Type::kI420));
+}
+
+TEST_F(TestVp9Impl, EncoderInfoWithoutResolutionBitrateLimits) {
+ EXPECT_TRUE(encoder_->GetEncoderInfo().resolution_bitrate_limits.empty());
+}
+
+TEST_F(TestVp9Impl, EncoderInfoWithBitrateLimitsFromFieldTrial) {
+ test::ScopedFieldTrials field_trials(
+ "WebRTC-VP9-GetEncoderInfoOverride/"
+ "frame_size_pixels:123|456|789,"
+ "min_start_bitrate_bps:11000|22000|33000,"
+ "min_bitrate_bps:44000|55000|66000,"
+ "max_bitrate_bps:77000|88000|99000/");
+ SetUp();
+
+ EXPECT_THAT(
+ encoder_->GetEncoderInfo().resolution_bitrate_limits,
+ ::testing::ElementsAre(
+ VideoEncoder::ResolutionBitrateLimits{123, 11000, 44000, 77000},
+ VideoEncoder::ResolutionBitrateLimits{456, 22000, 55000, 88000},
+ VideoEncoder::ResolutionBitrateLimits{789, 33000, 66000, 99000}));
+}
+
+TEST_F(TestVp9Impl, EncoderInfoFpsAllocation) {
+ const uint8_t kNumSpatialLayers = 3;
+ const uint8_t kNumTemporalLayers = 3;
+
+ codec_settings_.maxFramerate = 30;
+ codec_settings_.VP9()->numberOfSpatialLayers = kNumSpatialLayers;
+ codec_settings_.VP9()->numberOfTemporalLayers = kNumTemporalLayers;
+
+ for (uint8_t sl_idx = 0; sl_idx < kNumSpatialLayers; ++sl_idx) {
+ codec_settings_.spatialLayers[sl_idx].width = codec_settings_.width;
+ codec_settings_.spatialLayers[sl_idx].height = codec_settings_.height;
+ codec_settings_.spatialLayers[sl_idx].minBitrate =
+ codec_settings_.startBitrate;
+ codec_settings_.spatialLayers[sl_idx].maxBitrate =
+ codec_settings_.startBitrate;
+ codec_settings_.spatialLayers[sl_idx].targetBitrate =
+ codec_settings_.startBitrate;
+ codec_settings_.spatialLayers[sl_idx].active = true;
+ codec_settings_.spatialLayers[sl_idx].maxFramerate =
+ codec_settings_.maxFramerate;
+ }
+
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->InitEncode(&codec_settings_, kSettings));
+
+ FramerateFractions expected_fps_allocation[kMaxSpatialLayers];
+ expected_fps_allocation[0].push_back(EncoderInfo::kMaxFramerateFraction / 4);
+ expected_fps_allocation[0].push_back(EncoderInfo::kMaxFramerateFraction / 2);
+ expected_fps_allocation[0].push_back(EncoderInfo::kMaxFramerateFraction);
+ expected_fps_allocation[1] = expected_fps_allocation[0];
+ expected_fps_allocation[2] = expected_fps_allocation[0];
+ EXPECT_THAT(encoder_->GetEncoderInfo().fps_allocation,
+ ElementsAreArray(expected_fps_allocation));
+}
+
+TEST_F(TestVp9Impl, EncoderInfoFpsAllocationFlexibleMode) {
+ const uint8_t kNumSpatialLayers = 3;
+
+ codec_settings_.maxFramerate = 30;
+ codec_settings_.VP9()->numberOfSpatialLayers = kNumSpatialLayers;
+ codec_settings_.VP9()->numberOfTemporalLayers = 1;
+ codec_settings_.VP9()->flexibleMode = true;
+
+ VideoEncoder::RateControlParameters rate_params;
+ for (uint8_t sl_idx = 0; sl_idx < kNumSpatialLayers; ++sl_idx) {
+ codec_settings_.spatialLayers[sl_idx].width = codec_settings_.width;
+ codec_settings_.spatialLayers[sl_idx].height = codec_settings_.height;
+ codec_settings_.spatialLayers[sl_idx].minBitrate =
+ codec_settings_.startBitrate;
+ codec_settings_.spatialLayers[sl_idx].maxBitrate =
+ codec_settings_.startBitrate;
+ codec_settings_.spatialLayers[sl_idx].targetBitrate =
+ codec_settings_.startBitrate;
+ codec_settings_.spatialLayers[sl_idx].active = true;
+ // Force different frame rates for different layers, to verify that total
+ // fraction is correct.
+ codec_settings_.spatialLayers[sl_idx].maxFramerate =
+ codec_settings_.maxFramerate / (kNumSpatialLayers - sl_idx);
+ rate_params.bitrate.SetBitrate(sl_idx, 0,
+ codec_settings_.startBitrate * 1000);
+ }
+ rate_params.bandwidth_allocation =
+ DataRate::BitsPerSec(rate_params.bitrate.get_sum_bps());
+ rate_params.framerate_fps = codec_settings_.maxFramerate;
+
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->InitEncode(&codec_settings_, kSettings));
+
+ // No temporal layers allowed when spatial layers have different fps targets.
+ FramerateFractions expected_fps_allocation[kMaxSpatialLayers];
+ expected_fps_allocation[0].push_back(EncoderInfo::kMaxFramerateFraction / 3);
+ expected_fps_allocation[1].push_back(EncoderInfo::kMaxFramerateFraction / 2);
+ expected_fps_allocation[2].push_back(EncoderInfo::kMaxFramerateFraction);
+ EXPECT_THAT(encoder_->GetEncoderInfo().fps_allocation,
+ ::testing::ElementsAreArray(expected_fps_allocation));
+
+ // SetRates with current fps does not alter outcome.
+ encoder_->SetRates(rate_params);
+ EXPECT_THAT(encoder_->GetEncoderInfo().fps_allocation,
+ ::testing::ElementsAreArray(expected_fps_allocation));
+
+ // Higher fps than the codec wants, should still not affect outcome.
+ rate_params.framerate_fps *= 2;
+ encoder_->SetRates(rate_params);
+ EXPECT_THAT(encoder_->GetEncoderInfo().fps_allocation,
+ ::testing::ElementsAreArray(expected_fps_allocation));
+}
+
+class Vp9ImplWithLayeringTest
+ : public ::testing::TestWithParam<std::tuple<int, int, bool>> {
+ protected:
+ Vp9ImplWithLayeringTest()
+ : num_spatial_layers_(std::get<0>(GetParam())),
+ num_temporal_layers_(std::get<1>(GetParam())),
+ override_field_trials_(std::get<2>(GetParam())
+ ? "WebRTC-Vp9ExternalRefCtrl/Enabled/"
+ : "") {}
+
+ const uint8_t num_spatial_layers_;
+ const uint8_t num_temporal_layers_;
+ const test::ScopedFieldTrials override_field_trials_;
+};
+
+TEST_P(Vp9ImplWithLayeringTest, FlexibleMode) {
+ // In flexible mode encoder wrapper obtains actual list of references from
+ // encoder and writes it into RTP payload descriptor. Check that reference
+ // list in payload descriptor matches the predefined one, which is used
+ // in non-flexible mode.
+ std::unique_ptr<VideoEncoder> encoder = VP9Encoder::Create();
+ VideoCodec codec_settings = DefaultCodecSettings();
+ codec_settings.VP9()->flexibleMode = true;
+ codec_settings.SetFrameDropEnabled(false);
+ codec_settings.VP9()->numberOfSpatialLayers = num_spatial_layers_;
+ codec_settings.VP9()->numberOfTemporalLayers = num_temporal_layers_;
+ EXPECT_EQ(encoder->InitEncode(&codec_settings, kSettings),
+ WEBRTC_VIDEO_CODEC_OK);
+
+ GofInfoVP9 gof;
+ if (num_temporal_layers_ == 1) {
+ gof.SetGofInfoVP9(kTemporalStructureMode1);
+ } else if (num_temporal_layers_ == 2) {
+ gof.SetGofInfoVP9(kTemporalStructureMode2);
+ } else if (num_temporal_layers_ == 3) {
+ gof.SetGofInfoVP9(kTemporalStructureMode3);
+ }
+
+ // Encode at least (num_frames_in_gof + 1) frames to verify references
+ // of non-key frame with gof_idx = 0.
+ int num_input_frames = gof.num_frames_in_gof + 1;
+ std::vector<EncodedVideoFrameProducer::EncodedFrame> frames =
+ EncodedVideoFrameProducer(*encoder)
+ .SetNumInputFrames(num_input_frames)
+ .SetResolution({kWidth, kHeight})
+ .Encode();
+ ASSERT_THAT(frames, SizeIs(num_input_frames * num_spatial_layers_));
+
+ for (size_t i = 0; i < frames.size(); ++i) {
+ const EncodedVideoFrameProducer::EncodedFrame& frame = frames[i];
+ const size_t picture_idx = i / num_spatial_layers_;
+ const size_t gof_idx = picture_idx % gof.num_frames_in_gof;
+
+ const CodecSpecificInfoVP9& vp9 =
+ frame.codec_specific_info.codecSpecific.VP9;
+ EXPECT_EQ(frame.encoded_image.SpatialIndex(),
+ num_spatial_layers_ == 1
+ ? absl::nullopt
+ : absl::optional<int>(i % num_spatial_layers_))
+ << "Frame " << i;
+ EXPECT_EQ(vp9.temporal_idx, num_temporal_layers_ == 1
+ ? kNoTemporalIdx
+ : gof.temporal_idx[gof_idx])
+ << "Frame " << i;
+ EXPECT_EQ(vp9.temporal_up_switch, gof.temporal_up_switch[gof_idx])
+ << "Frame " << i;
+ if (picture_idx == 0) {
+ EXPECT_EQ(vp9.num_ref_pics, 0) << "Frame " << i;
+ } else {
+ EXPECT_THAT(rtc::MakeArrayView(vp9.p_diff, vp9.num_ref_pics),
+ UnorderedElementsAreArray(gof.pid_diff[gof_idx],
+ gof.num_ref_pics[gof_idx]))
+ << "Frame " << i;
+ }
+ }
+}
+
+INSTANTIATE_TEST_SUITE_P(All,
+ Vp9ImplWithLayeringTest,
+ ::testing::Combine(::testing::Values(1, 2, 3),
+ ::testing::Values(1, 2, 3),
+ ::testing::Bool()));
+
+class TestVp9ImplFrameDropping : public TestVp9Impl {
+ protected:
+ void ModifyCodecSettings(VideoCodec* codec_settings) override {
+ webrtc::test::CodecSettings(kVideoCodecVP9, codec_settings);
+ // We need to encode quite a lot of frames in this test. Use low resolution
+ // to reduce execution time.
+ codec_settings->width = 64;
+ codec_settings->height = 64;
+ codec_settings->mode = VideoCodecMode::kScreensharing;
+ }
+};
+
+TEST_F(TestVp9ImplFrameDropping, PreEncodeFrameDropping) {
+ const size_t num_frames_to_encode = 100;
+ const float input_framerate_fps = 30.0;
+ const float video_duration_secs = num_frames_to_encode / input_framerate_fps;
+ const float expected_framerate_fps = 5.0f;
+ const float max_abs_framerate_error_fps = expected_framerate_fps * 0.1f;
+
+ codec_settings_.maxFramerate = static_cast<uint32_t>(expected_framerate_fps);
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->InitEncode(&codec_settings_, kSettings));
+
+ VideoFrame input_frame = NextInputFrame();
+ for (size_t frame_num = 0; frame_num < num_frames_to_encode; ++frame_num) {
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Encode(input_frame, nullptr));
+ const size_t timestamp = input_frame.timestamp() +
+ kVideoPayloadTypeFrequency / input_framerate_fps;
+ input_frame.set_timestamp(static_cast<uint32_t>(timestamp));
+ }
+
+ const size_t num_encoded_frames = GetNumEncodedFrames();
+ const float encoded_framerate_fps = num_encoded_frames / video_duration_secs;
+ EXPECT_NEAR(encoded_framerate_fps, expected_framerate_fps,
+ max_abs_framerate_error_fps);
+}
+
+TEST_F(TestVp9ImplFrameDropping, DifferentFrameratePerSpatialLayer) {
+ // Assign different frame rate to spatial layers and check that result frame
+ // rate is close to the assigned one.
+ const uint8_t num_spatial_layers = 3;
+ const float input_framerate_fps = 30.0;
+ const size_t video_duration_secs = 3;
+ const size_t num_input_frames = video_duration_secs * input_framerate_fps;
+
+ codec_settings_.VP9()->numberOfSpatialLayers = num_spatial_layers;
+ codec_settings_.SetFrameDropEnabled(false);
+ codec_settings_.VP9()->flexibleMode = true;
+
+ VideoBitrateAllocation bitrate_allocation;
+ for (uint8_t sl_idx = 0; sl_idx < num_spatial_layers; ++sl_idx) {
+ // Frame rate increases from low to high layer.
+ const uint32_t framerate_fps = 10 * (sl_idx + 1);
+
+ codec_settings_.spatialLayers[sl_idx].width = codec_settings_.width;
+ codec_settings_.spatialLayers[sl_idx].height = codec_settings_.height;
+ codec_settings_.spatialLayers[sl_idx].maxFramerate = framerate_fps;
+ codec_settings_.spatialLayers[sl_idx].minBitrate =
+ codec_settings_.startBitrate;
+ codec_settings_.spatialLayers[sl_idx].maxBitrate =
+ codec_settings_.startBitrate;
+ codec_settings_.spatialLayers[sl_idx].targetBitrate =
+ codec_settings_.startBitrate;
+ codec_settings_.spatialLayers[sl_idx].active = true;
+
+ bitrate_allocation.SetBitrate(
+ sl_idx, 0, codec_settings_.spatialLayers[sl_idx].targetBitrate * 1000);
+ }
+
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->InitEncode(&codec_settings_, kSettings));
+
+ encoder_->SetRates(VideoEncoder::RateControlParameters(
+ bitrate_allocation, codec_settings_.maxFramerate));
+
+ VideoFrame input_frame = NextInputFrame();
+ for (size_t frame_num = 0; frame_num < num_input_frames; ++frame_num) {
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Encode(input_frame, nullptr));
+ const size_t timestamp = input_frame.timestamp() +
+ kVideoPayloadTypeFrequency / input_framerate_fps;
+ input_frame.set_timestamp(static_cast<uint32_t>(timestamp));
+ }
+
+ std::vector<EncodedImage> encoded_frames;
+ std::vector<CodecSpecificInfo> codec_infos;
+ ASSERT_TRUE(WaitForEncodedFrames(&encoded_frames, &codec_infos));
+
+ std::vector<size_t> num_encoded_frames(num_spatial_layers, 0);
+ for (EncodedImage& encoded_frame : encoded_frames) {
+ ++num_encoded_frames[encoded_frame.SpatialIndex().value_or(0)];
+ }
+
+ for (uint8_t sl_idx = 0; sl_idx < num_spatial_layers; ++sl_idx) {
+ const float layer_target_framerate_fps =
+ codec_settings_.spatialLayers[sl_idx].maxFramerate;
+ const float layer_output_framerate_fps =
+ static_cast<float>(num_encoded_frames[sl_idx]) / video_duration_secs;
+ const float max_framerate_error_fps = layer_target_framerate_fps * 0.1f;
+ EXPECT_NEAR(layer_output_framerate_fps, layer_target_framerate_fps,
+ max_framerate_error_fps);
+ }
+}
+
+class TestVp9ImplProfile2 : public TestVp9Impl {
+ protected:
+ void SetUp() override {
+ // Profile 2 might not be available on some platforms until
+ // https://bugs.chromium.org/p/webm/issues/detail?id=1544 is solved.
+ bool profile_2_is_supported = false;
+ for (const auto& codec : SupportedVP9Codecs()) {
+ if (ParseSdpForVP9Profile(codec.parameters)
+ .value_or(VP9Profile::kProfile0) == VP9Profile::kProfile2) {
+ profile_2_is_supported = true;
+ }
+ }
+ if (!profile_2_is_supported)
+ return;
+
+ TestVp9Impl::SetUp();
+ input_frame_generator_ = test::CreateSquareFrameGenerator(
+ codec_settings_.width, codec_settings_.height,
+ test::FrameGeneratorInterface::OutputType::kI010,
+ absl::optional<int>());
+ }
+
+ std::unique_ptr<VideoEncoder> CreateEncoder() override {
+ cricket::VideoCodec profile2_codec;
+ profile2_codec.SetParam(kVP9FmtpProfileId,
+ VP9ProfileToString(VP9Profile::kProfile2));
+ return VP9Encoder::Create(profile2_codec);
+ }
+
+ std::unique_ptr<VideoDecoder> CreateDecoder() override {
+ return VP9Decoder::Create();
+ }
+};
+
+TEST_F(TestVp9ImplProfile2, EncodeDecode) {
+ if (!encoder_)
+ return;
+
+ VideoFrame input_frame = NextInputFrame();
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Encode(input_frame, nullptr));
+ EncodedImage encoded_frame;
+ CodecSpecificInfo codec_specific_info;
+ ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
+ // First frame should be a key frame.
+ encoded_frame._frameType = VideoFrameType::kVideoFrameKey;
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder_->Decode(encoded_frame, false, 0));
+ std::unique_ptr<VideoFrame> decoded_frame;
+ absl::optional<uint8_t> decoded_qp;
+ ASSERT_TRUE(WaitForDecodedFrame(&decoded_frame, &decoded_qp));
+ ASSERT_TRUE(decoded_frame);
+
+ // TODO(emircan): Add PSNR for different color depths.
+ EXPECT_GT(I420PSNR(*input_frame.video_frame_buffer()->ToI420(),
+ *decoded_frame->video_frame_buffer()->ToI420()),
+ 31);
+}
+
+TEST_F(TestVp9Impl, EncodeWithDynamicRate) {
+ // Configured dynamic rate field trial and re-create the encoder.
+ test::ScopedFieldTrials field_trials(
+ "WebRTC-VideoRateControl/vp9_dynamic_rate:true/");
+ SetUp();
+
+ // Set 300kbps target with 100% headroom.
+ VideoEncoder::RateControlParameters params;
+ params.bandwidth_allocation = DataRate::BitsPerSec(300000);
+ params.bitrate.SetBitrate(0, 0, params.bandwidth_allocation.bps());
+ params.framerate_fps = 30.0;
+
+ encoder_->SetRates(params);
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Encode(NextInputFrame(), nullptr));
+ EncodedImage encoded_frame;
+ CodecSpecificInfo codec_specific_info;
+ ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
+
+ // Set no headroom and encode again.
+ params.bandwidth_allocation = DataRate::Zero();
+ encoder_->SetRates(params);
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Encode(NextInputFrame(), nullptr));
+ ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
+}
+
+TEST_F(TestVp9Impl, ReenablingUpperLayerAfterKFWithInterlayerPredIsEnabled) {
+ const size_t num_spatial_layers = 2;
+ const int num_frames_to_encode = 10;
+ codec_settings_.VP9()->flexibleMode = true;
+ codec_settings_.SetFrameDropEnabled(false);
+ codec_settings_.VP9()->numberOfSpatialLayers = num_spatial_layers;
+ codec_settings_.VP9()->numberOfTemporalLayers = 1;
+ codec_settings_.VP9()->interLayerPred = InterLayerPredMode::kOn;
+ // Force low frame-rate, so all layers are present for all frames.
+ codec_settings_.maxFramerate = 5;
+
+ ConfigureSvc(codec_settings_, num_spatial_layers);
+
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->InitEncode(&codec_settings_, kSettings));
+
+ VideoBitrateAllocation bitrate_allocation;
+ for (size_t sl_idx = 0; sl_idx < num_spatial_layers; ++sl_idx) {
+ bitrate_allocation.SetBitrate(
+ sl_idx, 0, codec_settings_.spatialLayers[sl_idx].targetBitrate * 1000);
+ }
+ encoder_->SetRates(VideoEncoder::RateControlParameters(
+ bitrate_allocation, codec_settings_.maxFramerate));
+
+ std::vector<EncodedImage> encoded_frames;
+ std::vector<CodecSpecificInfo> codec_specific;
+
+ for (int i = 0; i < num_frames_to_encode; ++i) {
+ SetWaitForEncodedFramesThreshold(num_spatial_layers);
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->Encode(NextInputFrame(), nullptr));
+ ASSERT_TRUE(WaitForEncodedFrames(&encoded_frames, &codec_specific));
+ EXPECT_EQ(encoded_frames.size(), num_spatial_layers);
+ }
+
+ // Disable the last layer.
+ bitrate_allocation.SetBitrate(num_spatial_layers - 1, 0, 0);
+ encoder_->SetRates(VideoEncoder::RateControlParameters(
+ bitrate_allocation, codec_settings_.maxFramerate));
+
+ for (int i = 0; i < num_frames_to_encode; ++i) {
+ SetWaitForEncodedFramesThreshold(num_spatial_layers - 1);
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->Encode(NextInputFrame(), nullptr));
+ ASSERT_TRUE(WaitForEncodedFrames(&encoded_frames, &codec_specific));
+ EXPECT_EQ(encoded_frames.size(), num_spatial_layers - 1);
+ }
+
+ std::vector<VideoFrameType> frame_types = {VideoFrameType::kVideoFrameKey};
+
+ // Force a key-frame with the last layer still disabled.
+ SetWaitForEncodedFramesThreshold(num_spatial_layers - 1);
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->Encode(NextInputFrame(), &frame_types));
+ ASSERT_TRUE(WaitForEncodedFrames(&encoded_frames, &codec_specific));
+ EXPECT_EQ(encoded_frames.size(), num_spatial_layers - 1);
+ ASSERT_EQ(encoded_frames[0]._frameType, VideoFrameType::kVideoFrameKey);
+
+ // Re-enable the last layer.
+ bitrate_allocation.SetBitrate(
+ num_spatial_layers - 1, 0,
+ codec_settings_.spatialLayers[num_spatial_layers - 1].targetBitrate *
+ 1000);
+ encoder_->SetRates(VideoEncoder::RateControlParameters(
+ bitrate_allocation, codec_settings_.maxFramerate));
+
+ SetWaitForEncodedFramesThreshold(num_spatial_layers);
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Encode(NextInputFrame(), nullptr));
+ ASSERT_TRUE(WaitForEncodedFrames(&encoded_frames, &codec_specific));
+ EXPECT_EQ(encoded_frames.size(), num_spatial_layers);
+ EXPECT_EQ(encoded_frames[0]._frameType, VideoFrameType::kVideoFrameDelta);
+}
+
+TEST_F(TestVp9Impl, HandlesEmptyDecoderConfigure) {
+ std::unique_ptr<VideoDecoder> decoder = CreateDecoder();
+ // Check that default settings are ok for decoder.
+ EXPECT_TRUE(decoder->Configure({}));
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder->Release());
+}
+
+INSTANTIATE_TEST_SUITE_P(
+ TestVp9ImplForPixelFormat,
+ TestVp9ImplForPixelFormat,
+ ::testing::Values(test::FrameGeneratorInterface::OutputType::kI420,
+ test::FrameGeneratorInterface::OutputType::kNV12),
+ [](const auto& info) {
+ return test::FrameGeneratorInterface::OutputTypeToString(info.param);
+ });
+
+// Helper function to populate an vpx_image_t instance with dimensions and
+// potential image data.
+std::function<vpx_image_t*(vpx_image_t*,
+ vpx_img_fmt_t,
+ unsigned int,
+ unsigned int,
+ unsigned int,
+ unsigned char* img_data)>
+GetWrapImageFunction(vpx_image_t* img) {
+ return [img](vpx_image_t* /*img*/, vpx_img_fmt_t fmt, unsigned int d_w,
+ unsigned int d_h, unsigned int /*stride_align*/,
+ unsigned char* img_data) {
+ img->fmt = fmt;
+ img->d_w = d_w;
+ img->d_h = d_h;
+ img->img_data = img_data;
+ return img;
+ };
+}
+
+TEST(Vp9SpeedSettingsTrialsTest, NoSvcUsesGlobalSpeedFromTl0InLayerConfig) {
+ // TL0 speed 8 at >= 480x270, 5 if below that.
+ test::ExplicitKeyValueConfig trials(
+ "WebRTC-VP9-PerformanceFlags/"
+ "use_per_layer_speed,"
+ "min_pixel_count:0|129600,"
+ "base_layer_speed:4|8,"
+ "high_layer_speed:5|9,"
+ "deblock_mode:1|0/");
+
+ // Keep a raw pointer for EXPECT calls and the like. Ownership is otherwise
+ // passed on to LibvpxVp9Encoder.
+ auto* const vpx = new NiceMock<MockLibvpxInterface>();
+ LibvpxVp9Encoder encoder(cricket::VideoCodec(),
+ absl::WrapUnique<LibvpxInterface>(vpx), trials);
+
+ VideoCodec settings = DefaultCodecSettings();
+ settings.width = 480;
+ settings.height = 270;
+ vpx_image_t img;
+
+ ON_CALL(*vpx, img_wrap).WillByDefault(GetWrapImageFunction(&img));
+ ON_CALL(*vpx, codec_enc_config_default)
+ .WillByDefault(DoAll(WithArg<1>([](vpx_codec_enc_cfg_t* cfg) {
+ memset(cfg, 0, sizeof(vpx_codec_enc_cfg_t));
+ }),
+ Return(VPX_CODEC_OK)));
+ EXPECT_CALL(*vpx, codec_control(_, _, An<int>())).Times(AnyNumber());
+
+ EXPECT_CALL(*vpx, codec_control(_, VP9E_SET_SVC_PARAMETERS,
+ A<vpx_svc_extra_cfg_t*>()))
+ .Times(0);
+
+ EXPECT_CALL(*vpx, codec_control(_, VP8E_SET_CPUUSED, TypedEq<int>(8)));
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder.InitEncode(&settings, kSettings));
+
+ encoder.Release();
+ settings.width = 352;
+ settings.height = 216;
+
+ EXPECT_CALL(*vpx, codec_control(_, VP8E_SET_CPUUSED, TypedEq<int>(4)));
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder.InitEncode(&settings, kSettings));
+}
+
+TEST(Vp9SpeedSettingsTrialsTest,
+ NoPerLayerFlagUsesGlobalSpeedFromTopLayerInConfig) {
+ // TL0 speed 8 at >= 480x270, 5 if below that.
+ test::ExplicitKeyValueConfig trials(
+ "WebRTC-VP9-PerformanceFlags/"
+ "min_pixel_count:0|129600,"
+ "base_layer_speed:4|8,"
+ "high_layer_speed:5|9,"
+ "deblock_mode:1|0/");
+
+ // Keep a raw pointer for EXPECT calls and the like. Ownership is otherwise
+ // passed on to LibvpxVp9Encoder.
+ auto* const vpx = new NiceMock<MockLibvpxInterface>();
+ LibvpxVp9Encoder encoder(cricket::VideoCodec(),
+ absl::WrapUnique<LibvpxInterface>(vpx), trials);
+
+ VideoCodec settings = DefaultCodecSettings();
+ settings.width = 480;
+ settings.height = 270;
+ ConfigureSvc(settings, 2, 3);
+ vpx_image_t img;
+
+ ON_CALL(*vpx, img_wrap).WillByDefault(GetWrapImageFunction(&img));
+ ON_CALL(*vpx, codec_enc_config_default)
+ .WillByDefault(DoAll(WithArg<1>([](vpx_codec_enc_cfg_t* cfg) {
+ memset(cfg, 0, sizeof(vpx_codec_enc_cfg_t));
+ }),
+ Return(VPX_CODEC_OK)));
+ EXPECT_CALL(*vpx, codec_control(_, _, An<int>())).Times(AnyNumber());
+
+ // Speed settings not populated when 'use_per_layer_speed' flag is absent.
+ EXPECT_CALL(*vpx,
+ codec_control(
+ _, VP9E_SET_SVC_PARAMETERS,
+ SafeMatcherCast<vpx_svc_extra_cfg_t*>(AllOf(
+ Field(&vpx_svc_extra_cfg_t::speed_per_layer, Each(0)),
+ Field(&vpx_svc_extra_cfg_t::loopfilter_ctrl, Each(0))))))
+ .Times(2);
+
+ EXPECT_CALL(*vpx, codec_control(_, VP8E_SET_CPUUSED, TypedEq<int>(8)));
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder.InitEncode(&settings, kSettings));
+
+ encoder.Release();
+ settings.width = 476;
+ settings.height = 268;
+ settings.spatialLayers[0].width = settings.width / 2;
+ settings.spatialLayers[0].height = settings.height / 2;
+ settings.spatialLayers[1].width = settings.width;
+ settings.spatialLayers[1].height = settings.height;
+
+ EXPECT_CALL(*vpx, codec_control(_, VP8E_SET_CPUUSED, TypedEq<int>(4)));
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder.InitEncode(&settings, kSettings));
+}
+
+TEST(Vp9SpeedSettingsTrialsTest, DefaultPerLayerFlagsWithSvc) {
+ // Per-temporal and spatial layer speed settings:
+ // SL0: TL0 = speed 5, TL1/TL2 = speed 8.
+ // SL1/2: TL0 = speed 7, TL1/TL2 = speed 8.
+ // Deblocking-mode per spatial layer:
+ // SL0: mode 1, SL1/2: mode 0.
+ test::ExplicitKeyValueConfig trials(
+ "WebRTC-VP9-PerformanceFlags/"
+ "use_per_layer_speed,"
+ "min_pixel_count:0|129600,"
+ "base_layer_speed:5|7,"
+ "high_layer_speed:8|8,"
+ "deblock_mode:1|0/");
+
+ // Keep a raw pointer for EXPECT calls and the like. Ownership is otherwise
+ // passed on to LibvpxVp9Encoder.
+ auto* const vpx = new NiceMock<MockLibvpxInterface>();
+ LibvpxVp9Encoder encoder(cricket::VideoCodec(),
+ absl::WrapUnique<LibvpxInterface>(vpx), trials);
+
+ VideoCodec settings = DefaultCodecSettings();
+ constexpr int kNumSpatialLayers = 3;
+ constexpr int kNumTemporalLayers = 3;
+ ConfigureSvc(settings, kNumSpatialLayers, kNumTemporalLayers);
+ VideoBitrateAllocation bitrate_allocation;
+ for (int si = 0; si < kNumSpatialLayers; ++si) {
+ for (int ti = 0; ti < kNumTemporalLayers; ++ti) {
+ uint32_t bitrate_bps =
+ settings.spatialLayers[si].targetBitrate * 1'000 / kNumTemporalLayers;
+ bitrate_allocation.SetBitrate(si, ti, bitrate_bps);
+ }
+ }
+ vpx_image_t img;
+
+ // Speed settings per spatial layer, for TL0.
+ const int kBaseTlSpeed[VPX_MAX_LAYERS] = {5, 7, 7};
+ // Speed settings per spatial layer, for TL1, TL2.
+ const int kHighTlSpeed[VPX_MAX_LAYERS] = {8, 8, 8};
+ // Loopfilter settings are handled within libvpx, so this array is valid for
+ // both TL0 and higher.
+ const int kLoopFilter[VPX_MAX_LAYERS] = {1, 0, 0};
+
+ ON_CALL(*vpx, img_wrap).WillByDefault(GetWrapImageFunction(&img));
+ ON_CALL(*vpx, codec_enc_init)
+ .WillByDefault(WithArg<0>([](vpx_codec_ctx_t* ctx) {
+ memset(ctx, 0, sizeof(*ctx));
+ return VPX_CODEC_OK;
+ }));
+ ON_CALL(*vpx, codec_enc_config_default)
+ .WillByDefault(DoAll(WithArg<1>([](vpx_codec_enc_cfg_t* cfg) {
+ memset(cfg, 0, sizeof(vpx_codec_enc_cfg_t));
+ }),
+ Return(VPX_CODEC_OK)));
+ EXPECT_CALL(
+ *vpx, codec_control(_, VP9E_SET_SVC_PARAMETERS,
+ SafeMatcherCast<vpx_svc_extra_cfg_t*>(
+ AllOf(Field(&vpx_svc_extra_cfg_t::speed_per_layer,
+ ElementsAreArray(kBaseTlSpeed)),
+ Field(&vpx_svc_extra_cfg_t::loopfilter_ctrl,
+ ElementsAreArray(kLoopFilter))))));
+
+ // Capture the callback into the vp9 wrapper.
+ vpx_codec_priv_output_cx_pkt_cb_pair_t callback_pointer = {};
+ EXPECT_CALL(*vpx, codec_control(_, VP9E_REGISTER_CX_CALLBACK, A<void*>()))
+ .WillOnce(WithArg<2>([&](void* cbp) {
+ callback_pointer =
+ *reinterpret_cast<vpx_codec_priv_output_cx_pkt_cb_pair_t*>(cbp);
+ return VPX_CODEC_OK;
+ }));
+
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder.InitEncode(&settings, kSettings));
+
+ encoder.SetRates(VideoEncoder::RateControlParameters(bitrate_allocation,
+ settings.maxFramerate));
+
+ MockEncodedImageCallback callback;
+ encoder.RegisterEncodeCompleteCallback(&callback);
+ auto frame_generator = test::CreateSquareFrameGenerator(
+ kWidth, kHeight, test::FrameGeneratorInterface::OutputType::kI420, 10);
+ Mock::VerifyAndClearExpectations(vpx);
+
+ uint8_t data[1] = {0};
+ vpx_codec_cx_pkt encoded_data = {};
+ encoded_data.data.frame.buf = &data;
+ encoded_data.data.frame.sz = 1;
+
+ const auto kImageOk =
+ EncodedImageCallback::Result(EncodedImageCallback::Result::OK);
+
+ int spatial_id = 0;
+ int temporal_id = 0;
+ EXPECT_CALL(*vpx,
+ codec_control(_, VP9E_SET_SVC_LAYER_ID, A<vpx_svc_layer_id_t*>()))
+ .Times(AnyNumber());
+ EXPECT_CALL(*vpx,
+ codec_control(_, VP9E_GET_SVC_LAYER_ID, A<vpx_svc_layer_id_t*>()))
+ .WillRepeatedly(WithArg<2>([&](vpx_svc_layer_id_t* layer_id) {
+ layer_id->spatial_layer_id = spatial_id;
+ layer_id->temporal_layer_id = temporal_id;
+ return VPX_CODEC_OK;
+ }));
+ vpx_svc_ref_frame_config_t stored_refs = {};
+ ON_CALL(*vpx, codec_control(_, VP9E_SET_SVC_REF_FRAME_CONFIG,
+ A<vpx_svc_ref_frame_config_t*>()))
+ .WillByDefault(
+ DoAll(SaveArgPointee<2>(&stored_refs), Return(VPX_CODEC_OK)));
+ ON_CALL(*vpx, codec_control(_, VP9E_GET_SVC_REF_FRAME_CONFIG,
+ A<vpx_svc_ref_frame_config_t*>()))
+ .WillByDefault(
+ DoAll(SetArgPointee<2>(ByRef(stored_refs)), Return(VPX_CODEC_OK)));
+
+ // First frame is keyframe.
+ encoded_data.data.frame.flags = VPX_FRAME_IS_KEY;
+
+ // Default 3-layer temporal pattern: 0-2-1-2, then repeat and do two more.
+ for (int ti : {0, 2, 1, 2, 0, 2}) {
+ EXPECT_CALL(*vpx, codec_encode).WillOnce(Return(VPX_CODEC_OK));
+ // No update expected if flags haven't changed, and they change we we move
+ // between base temporal layer and non-base temporal layer.
+ if ((ti > 0) != (temporal_id > 0)) {
+ EXPECT_CALL(*vpx, codec_control(
+ _, VP9E_SET_SVC_PARAMETERS,
+ SafeMatcherCast<vpx_svc_extra_cfg_t*>(AllOf(
+ Field(&vpx_svc_extra_cfg_t::speed_per_layer,
+ ElementsAreArray(ti == 0 ? kBaseTlSpeed
+ : kHighTlSpeed)),
+ Field(&vpx_svc_extra_cfg_t::loopfilter_ctrl,
+ ElementsAreArray(kLoopFilter))))));
+ } else {
+ EXPECT_CALL(*vpx, codec_control(_, VP9E_SET_SVC_PARAMETERS,
+ A<vpx_svc_extra_cfg_t*>()))
+ .Times(0);
+ }
+
+ VideoFrame frame =
+ VideoFrame::Builder()
+ .set_video_frame_buffer(frame_generator->NextFrame().buffer)
+ .build();
+ encoder.Encode(frame, nullptr);
+
+ temporal_id = ti;
+ for (int si = 0; si < kNumSpatialLayers; ++si) {
+ spatial_id = si;
+
+ EXPECT_CALL(callback, OnEncodedImage).WillOnce(Return(kImageOk));
+ callback_pointer.output_cx_pkt(&encoded_data, callback_pointer.user_priv);
+ }
+
+ encoded_data.data.frame.flags = 0; // Following frames are delta frames.
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/vp9/vp9.cc b/third_party/libwebrtc/modules/video_coding/codecs/vp9/vp9.cc
new file mode 100644
index 0000000000..222e57b6ba
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/vp9/vp9.cc
@@ -0,0 +1,118 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/codecs/vp9/include/vp9.h"
+
+#include <memory>
+
+#include "absl/container/inlined_vector.h"
+#include "api/transport/field_trial_based_config.h"
+#include "api/video_codecs/scalability_mode.h"
+#include "api/video_codecs/sdp_video_format.h"
+#include "api/video_codecs/vp9_profile.h"
+#include "modules/video_coding/codecs/vp9/libvpx_vp9_decoder.h"
+#include "modules/video_coding/codecs/vp9/libvpx_vp9_encoder.h"
+#include "modules/video_coding/svc/create_scalability_structure.h"
+#include "rtc_base/checks.h"
+#include "vpx/vp8cx.h"
+#include "vpx/vp8dx.h"
+#include "vpx/vpx_codec.h"
+
+namespace webrtc {
+
+std::vector<SdpVideoFormat> SupportedVP9Codecs(bool add_scalability_modes) {
+#ifdef RTC_ENABLE_VP9
+ // Profile 2 might not be available on some platforms until
+ // https://bugs.chromium.org/p/webm/issues/detail?id=1544 is solved.
+ static bool vpx_supports_high_bit_depth =
+ (vpx_codec_get_caps(vpx_codec_vp9_cx()) & VPX_CODEC_CAP_HIGHBITDEPTH) !=
+ 0 &&
+ (vpx_codec_get_caps(vpx_codec_vp9_dx()) & VPX_CODEC_CAP_HIGHBITDEPTH) !=
+ 0;
+
+ absl::InlinedVector<ScalabilityMode, kScalabilityModeCount> scalability_modes;
+ if (add_scalability_modes) {
+ for (const auto scalability_mode : kAllScalabilityModes) {
+ if (ScalabilityStructureConfig(scalability_mode).has_value()) {
+ scalability_modes.push_back(scalability_mode);
+ }
+ }
+ }
+ std::vector<SdpVideoFormat> supported_formats{SdpVideoFormat(
+ cricket::kVp9CodecName,
+ {{kVP9FmtpProfileId, VP9ProfileToString(VP9Profile::kProfile0)}},
+ scalability_modes)};
+ if (vpx_supports_high_bit_depth) {
+ supported_formats.push_back(SdpVideoFormat(
+ cricket::kVp9CodecName,
+ {{kVP9FmtpProfileId, VP9ProfileToString(VP9Profile::kProfile2)}},
+ scalability_modes));
+ }
+
+ return supported_formats;
+#else
+ return std::vector<SdpVideoFormat>();
+#endif
+}
+
+std::vector<SdpVideoFormat> SupportedVP9DecoderCodecs() {
+#ifdef RTC_ENABLE_VP9
+ std::vector<SdpVideoFormat> supported_formats = SupportedVP9Codecs();
+ // The WebRTC internal decoder supports VP9 profile 1 and 3. However, there's
+ // currently no way of sending VP9 profile 1 or 3 using the internal encoder.
+ // It would require extended support for I444, I422, and I440 buffers.
+ supported_formats.push_back(SdpVideoFormat(
+ cricket::kVp9CodecName,
+ {{kVP9FmtpProfileId, VP9ProfileToString(VP9Profile::kProfile1)}}));
+ supported_formats.push_back(SdpVideoFormat(
+ cricket::kVp9CodecName,
+ {{kVP9FmtpProfileId, VP9ProfileToString(VP9Profile::kProfile3)}}));
+ return supported_formats;
+#else
+ return std::vector<SdpVideoFormat>();
+#endif
+}
+
+std::unique_ptr<VP9Encoder> VP9Encoder::Create() {
+#ifdef RTC_ENABLE_VP9
+ return std::make_unique<LibvpxVp9Encoder>(cricket::VideoCodec(),
+ LibvpxInterface::Create(),
+ FieldTrialBasedConfig());
+#else
+ RTC_DCHECK_NOTREACHED();
+ return nullptr;
+#endif
+}
+
+std::unique_ptr<VP9Encoder> VP9Encoder::Create(
+ const cricket::VideoCodec& codec) {
+#ifdef RTC_ENABLE_VP9
+ return std::make_unique<LibvpxVp9Encoder>(codec, LibvpxInterface::Create(),
+ FieldTrialBasedConfig());
+#else
+ RTC_DCHECK_NOTREACHED();
+ return nullptr;
+#endif
+}
+
+bool VP9Encoder::SupportsScalabilityMode(ScalabilityMode scalability_mode) {
+ return ScalabilityStructureConfig(scalability_mode).has_value();
+}
+
+std::unique_ptr<VP9Decoder> VP9Decoder::Create() {
+#ifdef RTC_ENABLE_VP9
+ return std::make_unique<LibvpxVp9Decoder>();
+#else
+ RTC_DCHECK_NOTREACHED();
+ return nullptr;
+#endif
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/vp9/vp9_frame_buffer_pool.cc b/third_party/libwebrtc/modules/video_coding/codecs/vp9/vp9_frame_buffer_pool.cc
new file mode 100644
index 0000000000..181550ce91
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/vp9/vp9_frame_buffer_pool.cc
@@ -0,0 +1,182 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ *
+ */
+
+#ifdef RTC_ENABLE_VP9
+
+#include "modules/video_coding/codecs/vp9/vp9_frame_buffer_pool.h"
+
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "vpx/vpx_codec.h"
+#include "vpx/vpx_decoder.h"
+#include "vpx/vpx_frame_buffer.h"
+
+namespace webrtc {
+
+uint8_t* Vp9FrameBufferPool::Vp9FrameBuffer::GetData() {
+ return data_.data<uint8_t>();
+}
+
+size_t Vp9FrameBufferPool::Vp9FrameBuffer::GetDataSize() const {
+ return data_.size();
+}
+
+void Vp9FrameBufferPool::Vp9FrameBuffer::SetSize(size_t size) {
+ data_.SetSize(size);
+}
+
+bool Vp9FrameBufferPool::InitializeVpxUsePool(
+ vpx_codec_ctx* vpx_codec_context) {
+ RTC_DCHECK(vpx_codec_context);
+ // Tell libvpx to use this pool.
+ if (vpx_codec_set_frame_buffer_functions(
+ // In which context to use these callback functions.
+ vpx_codec_context,
+ // Called by libvpx when it needs another frame buffer.
+ &Vp9FrameBufferPool::VpxGetFrameBuffer,
+ // Called by libvpx when it no longer uses a frame buffer.
+ &Vp9FrameBufferPool::VpxReleaseFrameBuffer,
+ // `this` will be passed as `user_priv` to VpxGetFrameBuffer.
+ this)) {
+ // Failed to configure libvpx to use Vp9FrameBufferPool.
+ return false;
+ }
+ return true;
+}
+
+rtc::scoped_refptr<Vp9FrameBufferPool::Vp9FrameBuffer>
+Vp9FrameBufferPool::GetFrameBuffer(size_t min_size) {
+ RTC_DCHECK_GT(min_size, 0);
+ rtc::scoped_refptr<Vp9FrameBuffer> available_buffer = nullptr;
+ {
+ MutexLock lock(&buffers_lock_);
+ // Do we have a buffer we can recycle?
+ for (const auto& buffer : allocated_buffers_) {
+ if (buffer->HasOneRef()) {
+ available_buffer = buffer;
+ break;
+ }
+ }
+ // Otherwise create one.
+ if (available_buffer == nullptr) {
+ available_buffer = new Vp9FrameBuffer();
+ allocated_buffers_.push_back(available_buffer);
+ if (allocated_buffers_.size() > max_num_buffers_) {
+ RTC_LOG(LS_WARNING)
+ << allocated_buffers_.size()
+ << " Vp9FrameBuffers have been "
+ "allocated by a Vp9FrameBufferPool (exceeding what is "
+ "considered reasonable, "
+ << max_num_buffers_ << ").";
+
+ // TODO(phoglund): this limit is being hit in tests since Oct 5 2016.
+ // See https://bugs.chromium.org/p/webrtc/issues/detail?id=6484.
+ // RTC_DCHECK_NOTREACHED();
+ }
+ }
+ }
+
+ available_buffer->SetSize(min_size);
+ return available_buffer;
+}
+
+int Vp9FrameBufferPool::GetNumBuffersInUse() const {
+ int num_buffers_in_use = 0;
+ MutexLock lock(&buffers_lock_);
+ for (const auto& buffer : allocated_buffers_) {
+ if (!buffer->HasOneRef())
+ ++num_buffers_in_use;
+ }
+ return num_buffers_in_use;
+}
+
+bool Vp9FrameBufferPool::Resize(size_t max_number_of_buffers) {
+ MutexLock lock(&buffers_lock_);
+ size_t used_buffers_count = 0;
+ for (const auto& buffer : allocated_buffers_) {
+ // If the buffer is in use, the ref count will be >= 2, one from the list we
+ // are looping over and one from the application. If the ref count is 1,
+ // then the list we are looping over holds the only reference and it's safe
+ // to reuse.
+ if (!buffer->HasOneRef()) {
+ used_buffers_count++;
+ }
+ }
+ if (used_buffers_count > max_number_of_buffers) {
+ return false;
+ }
+ max_num_buffers_ = max_number_of_buffers;
+
+ size_t buffers_to_purge = allocated_buffers_.size() - max_num_buffers_;
+ auto iter = allocated_buffers_.begin();
+ while (iter != allocated_buffers_.end() && buffers_to_purge > 0) {
+ if ((*iter)->HasOneRef()) {
+ iter = allocated_buffers_.erase(iter);
+ buffers_to_purge--;
+ } else {
+ ++iter;
+ }
+ }
+ return true;
+}
+
+void Vp9FrameBufferPool::ClearPool() {
+ MutexLock lock(&buffers_lock_);
+ allocated_buffers_.clear();
+}
+
+// static
+int32_t Vp9FrameBufferPool::VpxGetFrameBuffer(void* user_priv,
+ size_t min_size,
+ vpx_codec_frame_buffer* fb) {
+ RTC_DCHECK(user_priv);
+ RTC_DCHECK(fb);
+
+#ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
+ // Limit size of 8k YUV highdef frame
+ size_t size_limit = 7680 * 4320 * 3 / 2 * 2;
+ if (min_size > size_limit)
+ return -1;
+#endif
+
+ Vp9FrameBufferPool* pool = static_cast<Vp9FrameBufferPool*>(user_priv);
+
+ rtc::scoped_refptr<Vp9FrameBuffer> buffer = pool->GetFrameBuffer(min_size);
+ fb->data = buffer->GetData();
+ fb->size = buffer->GetDataSize();
+ // Store Vp9FrameBuffer* in `priv` for use in VpxReleaseFrameBuffer.
+ // This also makes vpx_codec_get_frame return images with their `fb_priv` set
+ // to `buffer` which is important for external reference counting.
+ // Release from refptr so that the buffer's `ref_count_` remains 1 when
+ // `buffer` goes out of scope.
+ fb->priv = static_cast<void*>(buffer.release());
+ return 0;
+}
+
+// static
+int32_t Vp9FrameBufferPool::VpxReleaseFrameBuffer(void* user_priv,
+ vpx_codec_frame_buffer* fb) {
+ RTC_DCHECK(user_priv);
+ RTC_DCHECK(fb);
+ Vp9FrameBuffer* buffer = static_cast<Vp9FrameBuffer*>(fb->priv);
+ if (buffer != nullptr) {
+ buffer->Release();
+ // When libvpx fails to decode and you continue to try to decode (and fail)
+ // libvpx can for some reason try to release the same buffer multiple times.
+ // Setting `priv` to null protects against trying to Release multiple times.
+ fb->priv = nullptr;
+ }
+ return 0;
+}
+
+} // namespace webrtc
+
+#endif // RTC_ENABLE_VP9
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/vp9/vp9_frame_buffer_pool.h b/third_party/libwebrtc/modules/video_coding/codecs/vp9/vp9_frame_buffer_pool.h
new file mode 100644
index 0000000000..f46f1b7ea2
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/codecs/vp9/vp9_frame_buffer_pool.h
@@ -0,0 +1,134 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ *
+ */
+
+#ifndef MODULES_VIDEO_CODING_CODECS_VP9_VP9_FRAME_BUFFER_POOL_H_
+#define MODULES_VIDEO_CODING_CODECS_VP9_VP9_FRAME_BUFFER_POOL_H_
+
+#ifdef RTC_ENABLE_VP9
+
+#include <vector>
+
+#include "api/ref_counted_base.h"
+#include "api/scoped_refptr.h"
+#include "rtc_base/buffer.h"
+#include "rtc_base/synchronization/mutex.h"
+
+struct vpx_codec_ctx;
+struct vpx_codec_frame_buffer;
+
+namespace webrtc {
+
+// If more buffers than this are allocated we print warnings and crash if in
+// debug mode. VP9 is defined to have 8 reference buffers, of which 3 can be
+// referenced by any frame, see
+// https://tools.ietf.org/html/draft-grange-vp9-bitstream-00#section-2.2.2.
+// Assuming VP9 holds on to at most 8 buffers, any more buffers than that
+// would have to be by application code. Decoded frames should not be
+// referenced for longer than necessary. If we allow ~60 additional buffers
+// then the application has ~1 second to e.g. render each frame of a 60 fps
+// video.
+constexpr size_t kDefaultMaxNumBuffers = 68;
+
+// This memory pool is used to serve buffers to libvpx for decoding purposes in
+// VP9, which is set up in InitializeVPXUsePool. After the initialization any
+// time libvpx wants to decode a frame it will use buffers provided and released
+// through VpxGetFrameBuffer and VpxReleaseFrameBuffer.
+// The benefit of owning the pool that libvpx relies on for decoding is that the
+// decoded frames returned by libvpx (from vpx_codec_get_frame) use parts of our
+// buffers for the decoded image data. By retaining ownership of this buffer
+// using scoped_refptr, the image buffer can be reused by VideoFrames and no
+// frame copy has to occur during decoding and frame delivery.
+//
+// Pseudo example usage case:
+// Vp9FrameBufferPool pool;
+// pool.InitializeVpxUsePool(decoder_ctx);
+// ...
+//
+// // During decoding, libvpx will get and release buffers from the pool.
+// vpx_codec_decode(decoder_ctx, ...);
+//
+// vpx_image_t* img = vpx_codec_get_frame(decoder_ctx, &iter);
+// // Important to use scoped_refptr to protect it against being recycled by
+// // the pool.
+// scoped_refptr<Vp9FrameBuffer> img_buffer = (Vp9FrameBuffer*)img->fb_priv;
+// ...
+//
+// // Destroying the codec will make libvpx release any buffers it was using.
+// vpx_codec_destroy(decoder_ctx);
+class Vp9FrameBufferPool {
+ public:
+ class Vp9FrameBuffer final
+ : public rtc::RefCountedNonVirtual<Vp9FrameBuffer> {
+ public:
+ uint8_t* GetData();
+ size_t GetDataSize() const;
+ void SetSize(size_t size);
+
+ using rtc::RefCountedNonVirtual<Vp9FrameBuffer>::HasOneRef;
+
+ private:
+ // Data as an easily resizable buffer.
+ rtc::Buffer data_;
+ };
+
+ // Configures libvpx to, in the specified context, use this memory pool for
+ // buffers used to decompress frames. This is only supported for VP9.
+ bool InitializeVpxUsePool(vpx_codec_ctx* vpx_codec_context);
+
+ // Gets a frame buffer of at least `min_size`, recycling an available one or
+ // creating a new one. When no longer referenced from the outside the buffer
+ // becomes recyclable.
+ rtc::scoped_refptr<Vp9FrameBuffer> GetFrameBuffer(size_t min_size);
+ // Gets the number of buffers currently in use (not ready to be recycled).
+ int GetNumBuffersInUse() const;
+ // Changes the max amount of buffers in the pool to the new value.
+ // Returns true if change was successful and false if the amount of already
+ // allocated buffers is bigger than new value.
+ bool Resize(size_t max_number_of_buffers);
+ // Releases allocated buffers, deleting available buffers. Buffers in use are
+ // not deleted until they are no longer referenced.
+ void ClearPool();
+
+ // InitializeVpxUsePool configures libvpx to call this function when it needs
+ // a new frame buffer. Parameters:
+ // `user_priv` Private data passed to libvpx, InitializeVpxUsePool sets it up
+ // to be a pointer to the pool.
+ // `min_size` Minimum size needed by libvpx (to decompress a frame).
+ // `fb` Pointer to the libvpx frame buffer object, this is updated to
+ // use the pool's buffer.
+ // Returns 0 on success. Returns < 0 on failure.
+ static int32_t VpxGetFrameBuffer(void* user_priv,
+ size_t min_size,
+ vpx_codec_frame_buffer* fb);
+
+ // InitializeVpxUsePool configures libvpx to call this function when it has
+ // finished using one of the pool's frame buffer. Parameters:
+ // `user_priv` Private data passed to libvpx, InitializeVpxUsePool sets it up
+ // to be a pointer to the pool.
+ // `fb` Pointer to the libvpx frame buffer object, its `priv` will be
+ // a pointer to one of the pool's Vp9FrameBuffer.
+ static int32_t VpxReleaseFrameBuffer(void* user_priv,
+ vpx_codec_frame_buffer* fb);
+
+ private:
+ // Protects `allocated_buffers_`.
+ mutable Mutex buffers_lock_;
+ // All buffers, in use or ready to be recycled.
+ std::vector<rtc::scoped_refptr<Vp9FrameBuffer>> allocated_buffers_
+ RTC_GUARDED_BY(buffers_lock_);
+ size_t max_num_buffers_ = kDefaultMaxNumBuffers;
+};
+
+} // namespace webrtc
+
+#endif // RTC_ENABLE_VP9
+
+#endif // MODULES_VIDEO_CODING_CODECS_VP9_VP9_FRAME_BUFFER_POOL_H_
diff --git a/third_party/libwebrtc/modules/video_coding/decoder_database.cc b/third_party/libwebrtc/modules/video_coding/decoder_database.cc
new file mode 100644
index 0000000000..3410edc624
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/decoder_database.cc
@@ -0,0 +1,152 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/decoder_database.h"
+
+#include <memory>
+#include <utility>
+
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+
+VCMDecoderDatabase::VCMDecoderDatabase() {
+ decoder_sequence_checker_.Detach();
+}
+
+void VCMDecoderDatabase::DeregisterExternalDecoder(uint8_t payload_type) {
+ RTC_DCHECK_RUN_ON(&decoder_sequence_checker_);
+ auto it = decoders_.find(payload_type);
+ if (it == decoders_.end()) {
+ return;
+ }
+
+ // We can't use payload_type to check if the decoder is currently in use,
+ // because payload type may be out of date (e.g. before we decode the first
+ // frame after RegisterReceiveCodec).
+ if (current_decoder_ && current_decoder_->IsSameDecoder(it->second.get())) {
+ // Release it if it was registered and in use.
+ current_decoder_ = absl::nullopt;
+ }
+ decoders_.erase(it);
+}
+
+// Add the external decoder object to the list of external decoders.
+// Won't be registered as a receive codec until RegisterReceiveCodec is called.
+void VCMDecoderDatabase::RegisterExternalDecoder(
+ uint8_t payload_type,
+ std::unique_ptr<VideoDecoder> external_decoder) {
+ RTC_DCHECK_RUN_ON(&decoder_sequence_checker_);
+ // If payload value already exists, erase old and insert new.
+ DeregisterExternalDecoder(payload_type);
+ if (external_decoder) {
+ decoders_.emplace(
+ std::make_pair(payload_type, std::move(external_decoder)));
+ }
+}
+
+bool VCMDecoderDatabase::IsExternalDecoderRegistered(
+ uint8_t payload_type) const {
+ RTC_DCHECK_RUN_ON(&decoder_sequence_checker_);
+ return decoders_.find(payload_type) != decoders_.end();
+}
+
+void VCMDecoderDatabase::RegisterReceiveCodec(
+ uint8_t payload_type,
+ const VideoDecoder::Settings& settings) {
+ // If payload value already exists, erase old and insert new.
+ if (payload_type == current_payload_type_) {
+ current_payload_type_ = absl::nullopt;
+ }
+ decoder_settings_[payload_type] = settings;
+}
+
+bool VCMDecoderDatabase::DeregisterReceiveCodec(uint8_t payload_type) {
+ if (decoder_settings_.erase(payload_type) == 0) {
+ return false;
+ }
+ if (payload_type == current_payload_type_) {
+ // This codec is currently in use.
+ current_payload_type_ = absl::nullopt;
+ }
+ return true;
+}
+
+void VCMDecoderDatabase::DeregisterReceiveCodecs() {
+ current_payload_type_ = absl::nullopt;
+ decoder_settings_.clear();
+}
+
+VCMGenericDecoder* VCMDecoderDatabase::GetDecoder(
+ const VCMEncodedFrame& frame,
+ VCMDecodedFrameCallback* decoded_frame_callback) {
+ RTC_DCHECK_RUN_ON(&decoder_sequence_checker_);
+ RTC_DCHECK(decoded_frame_callback->UserReceiveCallback());
+ uint8_t payload_type = frame.PayloadType();
+ if (payload_type == current_payload_type_ || payload_type == 0) {
+ return current_decoder_.has_value() ? &*current_decoder_ : nullptr;
+ }
+ // If decoder exists - delete.
+ if (current_decoder_.has_value()) {
+ current_decoder_ = absl::nullopt;
+ current_payload_type_ = absl::nullopt;
+ }
+
+ CreateAndInitDecoder(frame);
+ if (current_decoder_ == absl::nullopt) {
+ return nullptr;
+ }
+
+ VCMReceiveCallback* callback = decoded_frame_callback->UserReceiveCallback();
+ callback->OnIncomingPayloadType(payload_type);
+ if (current_decoder_->RegisterDecodeCompleteCallback(decoded_frame_callback) <
+ 0) {
+ current_decoder_ = absl::nullopt;
+ return nullptr;
+ }
+
+ current_payload_type_ = payload_type;
+ return &*current_decoder_;
+}
+
+void VCMDecoderDatabase::CreateAndInitDecoder(const VCMEncodedFrame& frame) {
+ uint8_t payload_type = frame.PayloadType();
+ RTC_DLOG(LS_INFO) << "Initializing decoder with payload type '"
+ << int{payload_type} << "'.";
+ auto decoder_item = decoder_settings_.find(payload_type);
+ if (decoder_item == decoder_settings_.end()) {
+ RTC_LOG(LS_ERROR) << "Can't find a decoder associated with payload type: "
+ << int{payload_type};
+ return;
+ }
+ auto external_dec_item = decoders_.find(payload_type);
+ if (external_dec_item == decoders_.end()) {
+ RTC_LOG(LS_ERROR) << "No decoder of this type exists.";
+ return;
+ }
+ current_decoder_.emplace(external_dec_item->second.get());
+
+ // Copy over input resolutions to prevent codec reinitialization due to
+ // the first frame being of a different resolution than the database values.
+ // This is best effort, since there's no guarantee that width/height have been
+ // parsed yet (and may be zero).
+ RenderResolution frame_resolution(frame.EncodedImage()._encodedWidth,
+ frame.EncodedImage()._encodedHeight);
+ if (frame_resolution.Valid()) {
+ decoder_item->second.set_max_render_resolution(frame_resolution);
+ }
+ if (!current_decoder_->Configure(decoder_item->second)) {
+ current_decoder_ = absl::nullopt;
+ RTC_LOG(LS_ERROR) << "Failed to initialize decoder.";
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/decoder_database.h b/third_party/libwebrtc/modules/video_coding/decoder_database.h
new file mode 100644
index 0000000000..98f4335621
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/decoder_database.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_DECODER_DATABASE_H_
+#define MODULES_VIDEO_CODING_DECODER_DATABASE_H_
+
+#include <stdint.h>
+
+#include <map>
+#include <memory>
+
+#include "absl/types/optional.h"
+#include "api/sequence_checker.h"
+#include "api/video_codecs/video_decoder.h"
+#include "modules/video_coding/encoded_frame.h"
+#include "modules/video_coding/generic_decoder.h"
+
+namespace webrtc {
+
+class VCMDecoderDatabase {
+ public:
+ VCMDecoderDatabase();
+ VCMDecoderDatabase(const VCMDecoderDatabase&) = delete;
+ VCMDecoderDatabase& operator=(const VCMDecoderDatabase&) = delete;
+ ~VCMDecoderDatabase() = default;
+
+ // Returns a pointer to the previously registered decoder or nullptr if none
+ // was registered for the `payload_type`.
+ void DeregisterExternalDecoder(uint8_t payload_type);
+ void RegisterExternalDecoder(uint8_t payload_type,
+ std::unique_ptr<VideoDecoder> external_decoder);
+ bool IsExternalDecoderRegistered(uint8_t payload_type) const;
+
+ void RegisterReceiveCodec(uint8_t payload_type,
+ const VideoDecoder::Settings& settings);
+ bool DeregisterReceiveCodec(uint8_t payload_type);
+ void DeregisterReceiveCodecs();
+
+ // Returns a decoder specified by frame.PayloadType. The decoded frame
+ // callback of the decoder is set to `decoded_frame_callback`. If no such
+ // decoder already exists an instance will be created and initialized.
+ // nullptr is returned if no decoder with the specified payload type was found
+ // and the function failed to create one.
+ VCMGenericDecoder* GetDecoder(
+ const VCMEncodedFrame& frame,
+ VCMDecodedFrameCallback* decoded_frame_callback);
+
+ private:
+ void CreateAndInitDecoder(const VCMEncodedFrame& frame)
+ RTC_RUN_ON(decoder_sequence_checker_);
+
+ SequenceChecker decoder_sequence_checker_;
+
+ absl::optional<uint8_t> current_payload_type_;
+ absl::optional<VCMGenericDecoder> current_decoder_
+ RTC_GUARDED_BY(decoder_sequence_checker_);
+ // Initialization paramaters for decoders keyed by payload type.
+ std::map<uint8_t, VideoDecoder::Settings> decoder_settings_;
+ // Decoders keyed by payload type.
+ std::map<uint8_t, std::unique_ptr<VideoDecoder>> decoders_
+ RTC_GUARDED_BY(decoder_sequence_checker_);
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_DECODER_DATABASE_H_
diff --git a/third_party/libwebrtc/modules/video_coding/decoder_database_unittest.cc b/third_party/libwebrtc/modules/video_coding/decoder_database_unittest.cc
new file mode 100644
index 0000000000..2e9c91b1c4
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/decoder_database_unittest.cc
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/decoder_database.h"
+
+#include <memory>
+#include <utility>
+
+#include "api/test/mock_video_decoder.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+
+using ::testing::NiceMock;
+
+// Test registering and unregistering an external decoder instance.
+TEST(VCMDecoderDatabaseTest, RegisterExternalDecoder) {
+ VCMDecoderDatabase db;
+ constexpr int kPayloadType = 1;
+ ASSERT_FALSE(db.IsExternalDecoderRegistered(kPayloadType));
+
+ auto decoder = std::make_unique<NiceMock<MockVideoDecoder>>();
+ bool decoder_deleted = false;
+ EXPECT_CALL(*decoder, Destruct).WillOnce([&decoder_deleted] {
+ decoder_deleted = true;
+ });
+
+ db.RegisterExternalDecoder(kPayloadType, std::move(decoder));
+ EXPECT_TRUE(db.IsExternalDecoderRegistered(kPayloadType));
+ db.DeregisterExternalDecoder(kPayloadType);
+ EXPECT_TRUE(decoder_deleted);
+ EXPECT_FALSE(db.IsExternalDecoderRegistered(kPayloadType));
+}
+
+TEST(VCMDecoderDatabaseTest, RegisterReceiveCodec) {
+ VCMDecoderDatabase db;
+ constexpr int kPayloadType = 1;
+ ASSERT_FALSE(db.DeregisterReceiveCodec(kPayloadType));
+
+ VideoDecoder::Settings settings;
+ settings.set_codec_type(kVideoCodecVP8);
+ settings.set_max_render_resolution({10, 10});
+ settings.set_number_of_cores(4);
+ db.RegisterReceiveCodec(kPayloadType, settings);
+
+ EXPECT_TRUE(db.DeregisterReceiveCodec(kPayloadType));
+}
+
+TEST(VCMDecoderDatabaseTest, DeregisterReceiveCodecs) {
+ VCMDecoderDatabase db;
+ constexpr int kPayloadType1 = 1;
+ constexpr int kPayloadType2 = 2;
+ ASSERT_FALSE(db.DeregisterReceiveCodec(kPayloadType1));
+ ASSERT_FALSE(db.DeregisterReceiveCodec(kPayloadType2));
+
+ VideoDecoder::Settings settings1;
+ settings1.set_codec_type(kVideoCodecVP8);
+ settings1.set_max_render_resolution({10, 10});
+ settings1.set_number_of_cores(4);
+
+ VideoDecoder::Settings settings2 = settings1;
+ settings2.set_codec_type(kVideoCodecVP9);
+
+ db.RegisterReceiveCodec(kPayloadType1, settings1);
+ db.RegisterReceiveCodec(kPayloadType2, settings2);
+
+ db.DeregisterReceiveCodecs();
+
+ // All receive codecs must have been removed.
+ EXPECT_FALSE(db.DeregisterReceiveCodec(kPayloadType1));
+ EXPECT_FALSE(db.DeregisterReceiveCodec(kPayloadType2));
+}
+
+} // namespace
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/decoding_state.cc b/third_party/libwebrtc/modules/video_coding/decoding_state.cc
new file mode 100644
index 0000000000..5e405cbd05
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/decoding_state.cc
@@ -0,0 +1,368 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/decoding_state.h"
+
+#include "common_video/h264/h264_common.h"
+#include "modules/include/module_common_types_public.h"
+#include "modules/video_coding/frame_buffer.h"
+#include "modules/video_coding/jitter_buffer_common.h"
+#include "modules/video_coding/packet.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+
+VCMDecodingState::VCMDecodingState()
+ : sequence_num_(0),
+ time_stamp_(0),
+ picture_id_(kNoPictureId),
+ temporal_id_(kNoTemporalIdx),
+ tl0_pic_id_(kNoTl0PicIdx),
+ full_sync_(true),
+ in_initial_state_(true) {
+ memset(frame_decoded_, 0, sizeof(frame_decoded_));
+}
+
+VCMDecodingState::~VCMDecodingState() {}
+
+void VCMDecodingState::Reset() {
+ // TODO(mikhal): Verify - not always would want to reset the sync
+ sequence_num_ = 0;
+ time_stamp_ = 0;
+ picture_id_ = kNoPictureId;
+ temporal_id_ = kNoTemporalIdx;
+ tl0_pic_id_ = kNoTl0PicIdx;
+ full_sync_ = true;
+ in_initial_state_ = true;
+ memset(frame_decoded_, 0, sizeof(frame_decoded_));
+ received_sps_.clear();
+ received_pps_.clear();
+}
+
+uint32_t VCMDecodingState::time_stamp() const {
+ return time_stamp_;
+}
+
+uint16_t VCMDecodingState::sequence_num() const {
+ return sequence_num_;
+}
+
+bool VCMDecodingState::IsOldFrame(const VCMFrameBuffer* frame) const {
+ RTC_DCHECK(frame);
+ if (in_initial_state_)
+ return false;
+ return !IsNewerTimestamp(frame->Timestamp(), time_stamp_);
+}
+
+bool VCMDecodingState::IsOldPacket(const VCMPacket* packet) const {
+ RTC_DCHECK(packet);
+ if (in_initial_state_)
+ return false;
+ return !IsNewerTimestamp(packet->timestamp, time_stamp_);
+}
+
+void VCMDecodingState::SetState(const VCMFrameBuffer* frame) {
+ RTC_DCHECK(frame);
+ RTC_CHECK_GE(frame->GetHighSeqNum(), 0);
+ if (!UsingFlexibleMode(frame))
+ UpdateSyncState(frame);
+ sequence_num_ = static_cast<uint16_t>(frame->GetHighSeqNum());
+ time_stamp_ = frame->Timestamp();
+ picture_id_ = frame->PictureId();
+ temporal_id_ = frame->TemporalId();
+ tl0_pic_id_ = frame->Tl0PicId();
+
+ for (const NaluInfo& nalu : frame->GetNaluInfos()) {
+ if (nalu.type == H264::NaluType::kPps) {
+ if (nalu.pps_id < 0) {
+ RTC_LOG(LS_WARNING) << "Received pps without pps id.";
+ } else if (nalu.sps_id < 0) {
+ RTC_LOG(LS_WARNING) << "Received pps without sps id.";
+ } else {
+ received_pps_[nalu.pps_id] = nalu.sps_id;
+ }
+ } else if (nalu.type == H264::NaluType::kSps) {
+ if (nalu.sps_id < 0) {
+ RTC_LOG(LS_WARNING) << "Received sps without sps id.";
+ } else {
+ received_sps_.insert(nalu.sps_id);
+ }
+ }
+ }
+
+ if (UsingFlexibleMode(frame)) {
+ uint16_t frame_index = picture_id_ % kFrameDecodedLength;
+ if (in_initial_state_) {
+ frame_decoded_cleared_to_ = frame_index;
+ } else if (frame->FrameType() == VideoFrameType::kVideoFrameKey) {
+ memset(frame_decoded_, 0, sizeof(frame_decoded_));
+ frame_decoded_cleared_to_ = frame_index;
+ } else {
+ if (AheadOfFramesDecodedClearedTo(frame_index)) {
+ while (frame_decoded_cleared_to_ != frame_index) {
+ frame_decoded_cleared_to_ =
+ (frame_decoded_cleared_to_ + 1) % kFrameDecodedLength;
+ frame_decoded_[frame_decoded_cleared_to_] = false;
+ }
+ }
+ }
+ frame_decoded_[frame_index] = true;
+ }
+
+ in_initial_state_ = false;
+}
+
+void VCMDecodingState::CopyFrom(const VCMDecodingState& state) {
+ sequence_num_ = state.sequence_num_;
+ time_stamp_ = state.time_stamp_;
+ picture_id_ = state.picture_id_;
+ temporal_id_ = state.temporal_id_;
+ tl0_pic_id_ = state.tl0_pic_id_;
+ full_sync_ = state.full_sync_;
+ in_initial_state_ = state.in_initial_state_;
+ frame_decoded_cleared_to_ = state.frame_decoded_cleared_to_;
+ memcpy(frame_decoded_, state.frame_decoded_, sizeof(frame_decoded_));
+ received_sps_ = state.received_sps_;
+ received_pps_ = state.received_pps_;
+}
+
+bool VCMDecodingState::UpdateEmptyFrame(const VCMFrameBuffer* frame) {
+ bool empty_packet = frame->GetHighSeqNum() == frame->GetLowSeqNum();
+ if (in_initial_state_ && empty_packet) {
+ // Drop empty packets as long as we are in the initial state.
+ return true;
+ }
+ if ((empty_packet && ContinuousSeqNum(frame->GetHighSeqNum())) ||
+ ContinuousFrame(frame)) {
+ // Continuous empty packets or continuous frames can be dropped if we
+ // advance the sequence number.
+ sequence_num_ = frame->GetHighSeqNum();
+ time_stamp_ = frame->Timestamp();
+ return true;
+ }
+ return false;
+}
+
+void VCMDecodingState::UpdateOldPacket(const VCMPacket* packet) {
+ RTC_DCHECK(packet);
+ if (packet->timestamp == time_stamp_) {
+ // Late packet belonging to the last decoded frame - make sure we update the
+ // last decoded sequence number.
+ sequence_num_ = LatestSequenceNumber(packet->seqNum, sequence_num_);
+ }
+}
+
+void VCMDecodingState::SetSeqNum(uint16_t new_seq_num) {
+ sequence_num_ = new_seq_num;
+}
+
+bool VCMDecodingState::in_initial_state() const {
+ return in_initial_state_;
+}
+
+bool VCMDecodingState::full_sync() const {
+ return full_sync_;
+}
+
+void VCMDecodingState::UpdateSyncState(const VCMFrameBuffer* frame) {
+ if (in_initial_state_)
+ return;
+ if (frame->TemporalId() == kNoTemporalIdx ||
+ frame->Tl0PicId() == kNoTl0PicIdx) {
+ full_sync_ = true;
+ } else if (frame->FrameType() == VideoFrameType::kVideoFrameKey ||
+ frame->LayerSync()) {
+ full_sync_ = true;
+ } else if (full_sync_) {
+ // Verify that we are still in sync.
+ // Sync will be broken if continuity is true for layers but not for the
+ // other methods (PictureId and SeqNum).
+ if (UsingPictureId(frame)) {
+ // First check for a valid tl0PicId.
+ if (frame->Tl0PicId() - tl0_pic_id_ > 1) {
+ full_sync_ = false;
+ } else {
+ full_sync_ = ContinuousPictureId(frame->PictureId());
+ }
+ } else {
+ full_sync_ =
+ ContinuousSeqNum(static_cast<uint16_t>(frame->GetLowSeqNum()));
+ }
+ }
+}
+
+bool VCMDecodingState::ContinuousFrame(const VCMFrameBuffer* frame) const {
+ // Check continuity based on the following hierarchy:
+ // - Temporal layers (stop here if out of sync).
+ // - Picture Id when available.
+ // - Sequence numbers.
+ // Return true when in initial state.
+ // Note that when a method is not applicable it will return false.
+ RTC_DCHECK(frame);
+ // A key frame is always considered continuous as it doesn't refer to any
+ // frames and therefore won't introduce any errors even if prior frames are
+ // missing.
+ if (frame->FrameType() == VideoFrameType::kVideoFrameKey &&
+ HaveSpsAndPps(frame->GetNaluInfos())) {
+ return true;
+ }
+ // When in the initial state we always require a key frame to start decoding.
+ if (in_initial_state_)
+ return false;
+ if (ContinuousLayer(frame->TemporalId(), frame->Tl0PicId()))
+ return true;
+ // tl0picId is either not used, or should remain unchanged.
+ if (frame->Tl0PicId() != tl0_pic_id_)
+ return false;
+ // Base layers are not continuous or temporal layers are inactive.
+ // In the presence of temporal layers, check for Picture ID/sequence number
+ // continuity if sync can be restored by this frame.
+ if (!full_sync_ && !frame->LayerSync())
+ return false;
+ if (UsingPictureId(frame)) {
+ if (UsingFlexibleMode(frame)) {
+ return ContinuousFrameRefs(frame);
+ } else {
+ return ContinuousPictureId(frame->PictureId());
+ }
+ } else {
+ return ContinuousSeqNum(static_cast<uint16_t>(frame->GetLowSeqNum())) &&
+ HaveSpsAndPps(frame->GetNaluInfos());
+ }
+}
+
+bool VCMDecodingState::ContinuousPictureId(int picture_id) const {
+ int next_picture_id = picture_id_ + 1;
+ if (picture_id < picture_id_) {
+ // Wrap
+ if (picture_id_ >= 0x80) {
+ // 15 bits used for picture id
+ return ((next_picture_id & 0x7FFF) == picture_id);
+ } else {
+ // 7 bits used for picture id
+ return ((next_picture_id & 0x7F) == picture_id);
+ }
+ }
+ // No wrap
+ return (next_picture_id == picture_id);
+}
+
+bool VCMDecodingState::ContinuousSeqNum(uint16_t seq_num) const {
+ return seq_num == static_cast<uint16_t>(sequence_num_ + 1);
+}
+
+bool VCMDecodingState::ContinuousLayer(int temporal_id, int tl0_pic_id) const {
+ // First, check if applicable.
+ if (temporal_id == kNoTemporalIdx || tl0_pic_id == kNoTl0PicIdx)
+ return false;
+ // If this is the first frame to use temporal layers, make sure we start
+ // from base.
+ else if (tl0_pic_id_ == kNoTl0PicIdx && temporal_id_ == kNoTemporalIdx &&
+ temporal_id == 0)
+ return true;
+
+ // Current implementation: Look for base layer continuity.
+ if (temporal_id != 0)
+ return false;
+ return (static_cast<uint8_t>(tl0_pic_id_ + 1) == tl0_pic_id);
+}
+
+bool VCMDecodingState::ContinuousFrameRefs(const VCMFrameBuffer* frame) const {
+ uint8_t num_refs = frame->CodecSpecific()->codecSpecific.VP9.num_ref_pics;
+ for (uint8_t r = 0; r < num_refs; ++r) {
+ uint16_t frame_ref = frame->PictureId() -
+ frame->CodecSpecific()->codecSpecific.VP9.p_diff[r];
+ uint16_t frame_index = frame_ref % kFrameDecodedLength;
+ if (AheadOfFramesDecodedClearedTo(frame_index) ||
+ !frame_decoded_[frame_index]) {
+ return false;
+ }
+ }
+ return true;
+}
+
+bool VCMDecodingState::UsingPictureId(const VCMFrameBuffer* frame) const {
+ return (frame->PictureId() != kNoPictureId && picture_id_ != kNoPictureId);
+}
+
+bool VCMDecodingState::UsingFlexibleMode(const VCMFrameBuffer* frame) const {
+ bool is_flexible_mode =
+ frame->CodecSpecific()->codecType == kVideoCodecVP9 &&
+ frame->CodecSpecific()->codecSpecific.VP9.flexible_mode;
+ if (is_flexible_mode && frame->PictureId() == kNoPictureId) {
+ RTC_LOG(LS_WARNING) << "Frame is marked as using flexible mode but no"
+ "picture id is set.";
+ return false;
+ }
+ return is_flexible_mode;
+}
+
+// TODO(philipel): change how check work, this check practially
+// limits the max p_diff to 64.
+bool VCMDecodingState::AheadOfFramesDecodedClearedTo(uint16_t index) const {
+ // No way of knowing for sure if we are actually ahead of
+ // frame_decoded_cleared_to_. We just make the assumption
+ // that we are not trying to reference back to a very old
+ // index, but instead are referencing a newer index.
+ uint16_t diff =
+ index > frame_decoded_cleared_to_
+ ? kFrameDecodedLength - (index - frame_decoded_cleared_to_)
+ : frame_decoded_cleared_to_ - index;
+ return diff > kFrameDecodedLength / 2;
+}
+
+bool VCMDecodingState::HaveSpsAndPps(const std::vector<NaluInfo>& nalus) const {
+ std::set<int> new_sps;
+ std::map<int, int> new_pps;
+ for (const NaluInfo& nalu : nalus) {
+ // Check if this nalu actually contains sps/pps information or dependencies.
+ if (nalu.sps_id == -1 && nalu.pps_id == -1)
+ continue;
+ switch (nalu.type) {
+ case H264::NaluType::kPps:
+ if (nalu.pps_id < 0) {
+ RTC_LOG(LS_WARNING) << "Received pps without pps id.";
+ } else if (nalu.sps_id < 0) {
+ RTC_LOG(LS_WARNING) << "Received pps without sps id.";
+ } else {
+ new_pps[nalu.pps_id] = nalu.sps_id;
+ }
+ break;
+ case H264::NaluType::kSps:
+ if (nalu.sps_id < 0) {
+ RTC_LOG(LS_WARNING) << "Received sps without sps id.";
+ } else {
+ new_sps.insert(nalu.sps_id);
+ }
+ break;
+ default: {
+ int needed_sps = -1;
+ auto pps_it = new_pps.find(nalu.pps_id);
+ if (pps_it != new_pps.end()) {
+ needed_sps = pps_it->second;
+ } else {
+ auto pps_it2 = received_pps_.find(nalu.pps_id);
+ if (pps_it2 == received_pps_.end()) {
+ return false;
+ }
+ needed_sps = pps_it2->second;
+ }
+ if (new_sps.find(needed_sps) == new_sps.end() &&
+ received_sps_.find(needed_sps) == received_sps_.end()) {
+ return false;
+ }
+ break;
+ }
+ }
+ }
+ return true;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/decoding_state.h b/third_party/libwebrtc/modules/video_coding/decoding_state.h
new file mode 100644
index 0000000000..ec972949d8
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/decoding_state.h
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_DECODING_STATE_H_
+#define MODULES_VIDEO_CODING_DECODING_STATE_H_
+
+#include <cstdint>
+#include <map>
+#include <set>
+#include <vector>
+
+namespace webrtc {
+
+// Forward declarations
+struct NaluInfo;
+class VCMFrameBuffer;
+class VCMPacket;
+
+class VCMDecodingState {
+ public:
+ // The max number of bits used to reference back
+ // to a previous frame when using flexible mode.
+ static const uint16_t kNumRefBits = 7;
+ static const uint16_t kFrameDecodedLength = 1 << kNumRefBits;
+
+ VCMDecodingState();
+ ~VCMDecodingState();
+ // Check for old frame
+ bool IsOldFrame(const VCMFrameBuffer* frame) const;
+ // Check for old packet
+ bool IsOldPacket(const VCMPacket* packet) const;
+ // Check for frame continuity based on current decoded state. Use best method
+ // possible, i.e. temporal info, picture ID or sequence number.
+ bool ContinuousFrame(const VCMFrameBuffer* frame) const;
+ void SetState(const VCMFrameBuffer* frame);
+ void CopyFrom(const VCMDecodingState& state);
+ bool UpdateEmptyFrame(const VCMFrameBuffer* frame);
+ // Update the sequence number if the timestamp matches current state and the
+ // sequence number is higher than the current one. This accounts for packets
+ // arriving late.
+ void UpdateOldPacket(const VCMPacket* packet);
+ void SetSeqNum(uint16_t new_seq_num);
+ void Reset();
+ uint32_t time_stamp() const;
+ uint16_t sequence_num() const;
+ // Return true if at initial state.
+ bool in_initial_state() const;
+ // Return true when sync is on - decode all layers.
+ bool full_sync() const;
+
+ private:
+ void UpdateSyncState(const VCMFrameBuffer* frame);
+ // Designated continuity functions
+ bool ContinuousPictureId(int picture_id) const;
+ bool ContinuousSeqNum(uint16_t seq_num) const;
+ bool ContinuousLayer(int temporal_id, int tl0_pic_id) const;
+ bool ContinuousFrameRefs(const VCMFrameBuffer* frame) const;
+ bool UsingPictureId(const VCMFrameBuffer* frame) const;
+ bool UsingFlexibleMode(const VCMFrameBuffer* frame) const;
+ bool AheadOfFramesDecodedClearedTo(uint16_t index) const;
+ bool HaveSpsAndPps(const std::vector<NaluInfo>& nalus) const;
+
+ // Keep state of last decoded frame.
+ // TODO(mikhal/stefan): create designated classes to handle these types.
+ uint16_t sequence_num_;
+ uint32_t time_stamp_;
+ int picture_id_;
+ int temporal_id_;
+ int tl0_pic_id_;
+ bool full_sync_; // Sync flag when temporal layers are used.
+ bool in_initial_state_;
+
+ // Used to check references in flexible mode.
+ bool frame_decoded_[kFrameDecodedLength];
+ uint16_t frame_decoded_cleared_to_;
+ std::set<int> received_sps_;
+ std::map<int, int> received_pps_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_DECODING_STATE_H_
diff --git a/third_party/libwebrtc/modules/video_coding/decoding_state_unittest.cc b/third_party/libwebrtc/modules/video_coding/decoding_state_unittest.cc
new file mode 100644
index 0000000000..bef7f81c62
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/decoding_state_unittest.cc
@@ -0,0 +1,713 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/decoding_state.h"
+
+#include "modules/rtp_rtcp/source/rtp_video_header.h"
+#include "modules/video_coding/codecs/interface/common_constants.h"
+#include "modules/video_coding/codecs/vp8/include/vp8_globals.h"
+#include "modules/video_coding/codecs/vp9/include/vp9_globals.h"
+#include "modules/video_coding/frame_buffer.h"
+#include "modules/video_coding/include/video_coding.h"
+#include "modules/video_coding/packet.h"
+#include "modules/video_coding/session_info.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+TEST(TestDecodingState, Sanity) {
+ VCMDecodingState dec_state;
+ dec_state.Reset();
+ EXPECT_TRUE(dec_state.in_initial_state());
+ EXPECT_TRUE(dec_state.full_sync());
+}
+
+TEST(TestDecodingState, FrameContinuity) {
+ VCMDecodingState dec_state;
+ // Check that makes decision based on correct method.
+ VCMFrameBuffer frame;
+ VCMFrameBuffer frame_key;
+ VCMPacket packet;
+ packet.video_header.is_first_packet_in_frame = true;
+ packet.timestamp = 1;
+ packet.seqNum = 0xffff;
+ packet.video_header.frame_type = VideoFrameType::kVideoFrameDelta;
+ packet.video_header.codec = kVideoCodecVP8;
+ auto& vp8_header =
+ packet.video_header.video_type_header.emplace<RTPVideoHeaderVP8>();
+ vp8_header.pictureId = 0x007F;
+ FrameData frame_data;
+ frame_data.rtt_ms = 0;
+ frame_data.rolling_average_packets_per_frame = -1;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
+ // Always start with a key frame.
+ dec_state.Reset();
+ EXPECT_FALSE(dec_state.ContinuousFrame(&frame));
+ packet.video_header.frame_type = VideoFrameType::kVideoFrameKey;
+ EXPECT_LE(0, frame_key.InsertPacket(packet, 0, frame_data));
+ EXPECT_TRUE(dec_state.ContinuousFrame(&frame_key));
+ dec_state.SetState(&frame);
+ frame.Reset();
+ packet.video_header.frame_type = VideoFrameType::kVideoFrameDelta;
+ // Use pictureId
+ packet.video_header.is_first_packet_in_frame = false;
+ vp8_header.pictureId = 0x0002;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
+ EXPECT_FALSE(dec_state.ContinuousFrame(&frame));
+ frame.Reset();
+ vp8_header.pictureId = 0;
+ packet.seqNum = 10;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
+ EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
+
+ // Use sequence numbers.
+ vp8_header.pictureId = kNoPictureId;
+ frame.Reset();
+ packet.seqNum = dec_state.sequence_num() - 1u;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
+ EXPECT_FALSE(dec_state.ContinuousFrame(&frame));
+ frame.Reset();
+ packet.seqNum = dec_state.sequence_num() + 1u;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
+ // Insert another packet to this frame
+ packet.seqNum++;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
+ // Verify wrap.
+ EXPECT_LE(dec_state.sequence_num(), 0xffff);
+ EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
+ dec_state.SetState(&frame);
+
+ // Insert packet with temporal info.
+ dec_state.Reset();
+ frame.Reset();
+ vp8_header.tl0PicIdx = 0;
+ vp8_header.temporalIdx = 0;
+ vp8_header.pictureId = 0;
+ packet.seqNum = 1;
+ packet.timestamp = 1;
+ EXPECT_TRUE(dec_state.full_sync());
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
+ dec_state.SetState(&frame);
+ EXPECT_TRUE(dec_state.full_sync());
+ frame.Reset();
+ // 1 layer up - still good.
+ vp8_header.tl0PicIdx = 0;
+ vp8_header.temporalIdx = 1;
+ vp8_header.pictureId = 1;
+ packet.seqNum = 2;
+ packet.timestamp = 2;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
+ EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
+ dec_state.SetState(&frame);
+ EXPECT_TRUE(dec_state.full_sync());
+ frame.Reset();
+ // Lost non-base layer packet => should update sync parameter.
+ vp8_header.tl0PicIdx = 0;
+ vp8_header.temporalIdx = 3;
+ vp8_header.pictureId = 3;
+ packet.seqNum = 4;
+ packet.timestamp = 4;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
+ EXPECT_FALSE(dec_state.ContinuousFrame(&frame));
+ // Now insert the next non-base layer (belonging to a next tl0PicId).
+ frame.Reset();
+ vp8_header.tl0PicIdx = 1;
+ vp8_header.temporalIdx = 2;
+ vp8_header.pictureId = 4;
+ packet.seqNum = 5;
+ packet.timestamp = 5;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
+ // Checking continuity and not updating the state - this should not trigger
+ // an update of sync state.
+ EXPECT_FALSE(dec_state.ContinuousFrame(&frame));
+ EXPECT_TRUE(dec_state.full_sync());
+ // Next base layer (dropped interim non-base layers) - should update sync.
+ frame.Reset();
+ vp8_header.tl0PicIdx = 1;
+ vp8_header.temporalIdx = 0;
+ vp8_header.pictureId = 5;
+ packet.seqNum = 6;
+ packet.timestamp = 6;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
+ EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
+ dec_state.SetState(&frame);
+ EXPECT_FALSE(dec_state.full_sync());
+
+ // Check wrap for temporal layers.
+ frame.Reset();
+ vp8_header.tl0PicIdx = 0x00FF;
+ vp8_header.temporalIdx = 0;
+ vp8_header.pictureId = 6;
+ packet.seqNum = 7;
+ packet.timestamp = 7;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
+ dec_state.SetState(&frame);
+ EXPECT_FALSE(dec_state.full_sync());
+ frame.Reset();
+ vp8_header.tl0PicIdx = 0x0000;
+ vp8_header.temporalIdx = 0;
+ vp8_header.pictureId = 7;
+ packet.seqNum = 8;
+ packet.timestamp = 8;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
+ EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
+ // The current frame is not continuous
+ dec_state.SetState(&frame);
+ EXPECT_FALSE(dec_state.ContinuousFrame(&frame));
+}
+
+TEST(TestDecodingState, UpdateOldPacket) {
+ VCMDecodingState dec_state;
+ // Update only if zero size and newer than previous.
+ // Should only update if the timeStamp match.
+ VCMFrameBuffer frame;
+ VCMPacket packet;
+ packet.timestamp = 1;
+ packet.seqNum = 1;
+ packet.video_header.frame_type = VideoFrameType::kVideoFrameDelta;
+ FrameData frame_data;
+ frame_data.rtt_ms = 0;
+ frame_data.rolling_average_packets_per_frame = -1;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
+ dec_state.SetState(&frame);
+ EXPECT_EQ(dec_state.sequence_num(), 1);
+ // Insert an empty packet that does not belong to the same frame.
+ // => Sequence num should be the same.
+ packet.timestamp = 2;
+ dec_state.UpdateOldPacket(&packet);
+ EXPECT_EQ(dec_state.sequence_num(), 1);
+ // Now insert empty packet belonging to the same frame.
+ packet.timestamp = 1;
+ packet.seqNum = 2;
+ packet.video_header.frame_type = VideoFrameType::kEmptyFrame;
+ packet.sizeBytes = 0;
+ dec_state.UpdateOldPacket(&packet);
+ EXPECT_EQ(dec_state.sequence_num(), 2);
+ // Now insert delta packet belonging to the same frame.
+ packet.timestamp = 1;
+ packet.seqNum = 3;
+ packet.video_header.frame_type = VideoFrameType::kVideoFrameDelta;
+ packet.sizeBytes = 1400;
+ dec_state.UpdateOldPacket(&packet);
+ EXPECT_EQ(dec_state.sequence_num(), 3);
+ // Insert a packet belonging to an older timestamp - should not update the
+ // sequence number.
+ packet.timestamp = 0;
+ packet.seqNum = 4;
+ packet.video_header.frame_type = VideoFrameType::kEmptyFrame;
+ packet.sizeBytes = 0;
+ dec_state.UpdateOldPacket(&packet);
+ EXPECT_EQ(dec_state.sequence_num(), 3);
+}
+
+TEST(TestDecodingState, MultiLayerBehavior) {
+ // Identify sync/non-sync when more than one layer.
+ VCMDecodingState dec_state;
+ // Identify packets belonging to old frames/packets.
+ // Set state for current frames.
+ // tl0PicIdx 0, temporal id 0.
+ VCMFrameBuffer frame;
+ VCMPacket packet;
+ packet.video_header.frame_type = VideoFrameType::kVideoFrameDelta;
+ packet.video_header.codec = kVideoCodecVP8;
+ packet.timestamp = 0;
+ packet.seqNum = 0;
+ auto& vp8_header =
+ packet.video_header.video_type_header.emplace<RTPVideoHeaderVP8>();
+ vp8_header.tl0PicIdx = 0;
+ vp8_header.temporalIdx = 0;
+ vp8_header.pictureId = 0;
+ FrameData frame_data;
+ frame_data.rtt_ms = 0;
+ frame_data.rolling_average_packets_per_frame = -1;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
+ dec_state.SetState(&frame);
+ // tl0PicIdx 0, temporal id 1.
+ frame.Reset();
+ packet.timestamp = 1;
+ packet.seqNum = 1;
+ vp8_header.tl0PicIdx = 0;
+ vp8_header.temporalIdx = 1;
+ vp8_header.pictureId = 1;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
+ EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
+ dec_state.SetState(&frame);
+ EXPECT_TRUE(dec_state.full_sync());
+ // Lost tl0PicIdx 0, temporal id 2.
+ // Insert tl0PicIdx 0, temporal id 3.
+ frame.Reset();
+ packet.timestamp = 3;
+ packet.seqNum = 3;
+ vp8_header.tl0PicIdx = 0;
+ vp8_header.temporalIdx = 3;
+ vp8_header.pictureId = 3;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
+ EXPECT_FALSE(dec_state.ContinuousFrame(&frame));
+ dec_state.SetState(&frame);
+ EXPECT_FALSE(dec_state.full_sync());
+ // Insert next base layer
+ frame.Reset();
+ packet.timestamp = 4;
+ packet.seqNum = 4;
+ vp8_header.tl0PicIdx = 1;
+ vp8_header.temporalIdx = 0;
+ vp8_header.pictureId = 4;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
+ EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
+ dec_state.SetState(&frame);
+ EXPECT_FALSE(dec_state.full_sync());
+ // Insert key frame - should update sync value.
+ // A key frame is always a base layer.
+ frame.Reset();
+ packet.video_header.frame_type = VideoFrameType::kVideoFrameKey;
+ packet.video_header.is_first_packet_in_frame = true;
+ packet.timestamp = 5;
+ packet.seqNum = 5;
+ vp8_header.tl0PicIdx = 2;
+ vp8_header.temporalIdx = 0;
+ vp8_header.pictureId = 5;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
+ EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
+ dec_state.SetState(&frame);
+ EXPECT_TRUE(dec_state.full_sync());
+ // After sync, a continuous PictureId is required
+ // (continuous base layer is not enough )
+ frame.Reset();
+ packet.video_header.frame_type = VideoFrameType::kVideoFrameDelta;
+ packet.timestamp = 6;
+ packet.seqNum = 6;
+ vp8_header.tl0PicIdx = 3;
+ vp8_header.temporalIdx = 0;
+ vp8_header.pictureId = 6;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
+ EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
+ EXPECT_TRUE(dec_state.full_sync());
+ frame.Reset();
+ packet.video_header.frame_type = VideoFrameType::kVideoFrameDelta;
+ packet.video_header.is_first_packet_in_frame = true;
+ packet.timestamp = 8;
+ packet.seqNum = 8;
+ vp8_header.tl0PicIdx = 4;
+ vp8_header.temporalIdx = 0;
+ vp8_header.pictureId = 8;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
+ EXPECT_FALSE(dec_state.ContinuousFrame(&frame));
+ EXPECT_TRUE(dec_state.full_sync());
+ dec_state.SetState(&frame);
+ EXPECT_FALSE(dec_state.full_sync());
+
+ // Insert a non-ref frame - should update sync value.
+ frame.Reset();
+ packet.video_header.frame_type = VideoFrameType::kVideoFrameDelta;
+ packet.video_header.is_first_packet_in_frame = true;
+ packet.timestamp = 9;
+ packet.seqNum = 9;
+ vp8_header.tl0PicIdx = 4;
+ vp8_header.temporalIdx = 2;
+ vp8_header.pictureId = 9;
+ vp8_header.layerSync = true;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
+ dec_state.SetState(&frame);
+ EXPECT_TRUE(dec_state.full_sync());
+
+ // The following test will verify the sync flag behavior after a loss.
+ // Create the following pattern:
+ // Update base layer, lose packet 1 (sync flag on, layer 2), insert packet 3
+ // (sync flag on, layer 2) check continuity and sync flag after inserting
+ // packet 2 (sync flag on, layer 1).
+ // Base layer.
+ frame.Reset();
+ dec_state.Reset();
+ packet.video_header.frame_type = VideoFrameType::kVideoFrameDelta;
+ packet.video_header.is_first_packet_in_frame = true;
+ packet.markerBit = 1;
+ packet.timestamp = 0;
+ packet.seqNum = 0;
+ vp8_header.tl0PicIdx = 0;
+ vp8_header.temporalIdx = 0;
+ vp8_header.pictureId = 0;
+ vp8_header.layerSync = false;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
+ dec_state.SetState(&frame);
+ EXPECT_TRUE(dec_state.full_sync());
+ // Layer 2 - 2 packets (insert one, lose one).
+ frame.Reset();
+ packet.video_header.frame_type = VideoFrameType::kVideoFrameDelta;
+ packet.video_header.is_first_packet_in_frame = true;
+ packet.markerBit = 0;
+ packet.timestamp = 1;
+ packet.seqNum = 1;
+ vp8_header.tl0PicIdx = 0;
+ vp8_header.temporalIdx = 2;
+ vp8_header.pictureId = 1;
+ vp8_header.layerSync = true;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
+ EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
+ // Layer 1
+ frame.Reset();
+ packet.video_header.frame_type = VideoFrameType::kVideoFrameDelta;
+ packet.video_header.is_first_packet_in_frame = true;
+ packet.markerBit = 1;
+ packet.timestamp = 2;
+ packet.seqNum = 3;
+ vp8_header.tl0PicIdx = 0;
+ vp8_header.temporalIdx = 1;
+ vp8_header.pictureId = 2;
+ vp8_header.layerSync = true;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
+ EXPECT_FALSE(dec_state.ContinuousFrame(&frame));
+ EXPECT_TRUE(dec_state.full_sync());
+}
+
+TEST(TestDecodingState, DiscontinuousPicIdContinuousSeqNum) {
+ VCMDecodingState dec_state;
+ VCMFrameBuffer frame;
+ VCMPacket packet;
+ frame.Reset();
+ packet.video_header.frame_type = VideoFrameType::kVideoFrameKey;
+ packet.video_header.codec = kVideoCodecVP8;
+ packet.timestamp = 0;
+ packet.seqNum = 0;
+ auto& vp8_header =
+ packet.video_header.video_type_header.emplace<RTPVideoHeaderVP8>();
+ vp8_header.tl0PicIdx = 0;
+ vp8_header.temporalIdx = 0;
+ vp8_header.pictureId = 0;
+ FrameData frame_data;
+ frame_data.rtt_ms = 0;
+ frame_data.rolling_average_packets_per_frame = -1;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
+ dec_state.SetState(&frame);
+ EXPECT_TRUE(dec_state.full_sync());
+
+ // Continuous sequence number but discontinuous picture id. This implies a
+ // a loss and we have to fall back to only decoding the base layer.
+ frame.Reset();
+ packet.video_header.frame_type = VideoFrameType::kVideoFrameDelta;
+ packet.timestamp += 3000;
+ ++packet.seqNum;
+ vp8_header.temporalIdx = 1;
+ vp8_header.pictureId = 2;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
+ EXPECT_FALSE(dec_state.ContinuousFrame(&frame));
+ dec_state.SetState(&frame);
+ EXPECT_FALSE(dec_state.full_sync());
+}
+
+TEST(TestDecodingState, OldInput) {
+ VCMDecodingState dec_state;
+ // Identify packets belonging to old frames/packets.
+ // Set state for current frames.
+ VCMFrameBuffer frame;
+ VCMPacket packet;
+ packet.timestamp = 10;
+ packet.seqNum = 1;
+ FrameData frame_data;
+ frame_data.rtt_ms = 0;
+ frame_data.rolling_average_packets_per_frame = -1;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
+ dec_state.SetState(&frame);
+ packet.timestamp = 9;
+ EXPECT_TRUE(dec_state.IsOldPacket(&packet));
+ // Check for old frame
+ frame.Reset();
+ frame.InsertPacket(packet, 0, frame_data);
+ EXPECT_TRUE(dec_state.IsOldFrame(&frame));
+}
+
+TEST(TestDecodingState, PictureIdRepeat) {
+ VCMDecodingState dec_state;
+ VCMFrameBuffer frame;
+ VCMPacket packet;
+ packet.video_header.frame_type = VideoFrameType::kVideoFrameDelta;
+ packet.video_header.codec = kVideoCodecVP8;
+ packet.timestamp = 0;
+ packet.seqNum = 0;
+ auto& vp8_header =
+ packet.video_header.video_type_header.emplace<RTPVideoHeaderVP8>();
+ vp8_header.tl0PicIdx = 0;
+ vp8_header.temporalIdx = 0;
+ vp8_header.pictureId = 0;
+ FrameData frame_data;
+ frame_data.rtt_ms = 0;
+ frame_data.rolling_average_packets_per_frame = -1;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
+ dec_state.SetState(&frame);
+ // tl0PicIdx 0, temporal id 1.
+ frame.Reset();
+ ++packet.timestamp;
+ ++packet.seqNum;
+ vp8_header.temporalIdx++;
+ vp8_header.pictureId++;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
+ EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
+ frame.Reset();
+ // Testing only gap in tl0PicIdx when tl0PicIdx in continuous.
+ vp8_header.tl0PicIdx += 3;
+ vp8_header.temporalIdx++;
+ vp8_header.tl0PicIdx = 1;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
+ EXPECT_FALSE(dec_state.ContinuousFrame(&frame));
+}
+
+TEST(TestDecodingState, FrameContinuityFlexibleModeKeyFrame) {
+ VCMDecodingState dec_state;
+ VCMFrameBuffer frame;
+ VCMPacket packet;
+ packet.video_header.is_first_packet_in_frame = true;
+ packet.timestamp = 1;
+ packet.seqNum = 0xffff;
+ uint8_t data[] = "I need a data pointer for this test!";
+ packet.sizeBytes = sizeof(data);
+ packet.dataPtr = data;
+ packet.video_header.codec = kVideoCodecVP9;
+
+ auto& vp9_hdr =
+ packet.video_header.video_type_header.emplace<RTPVideoHeaderVP9>();
+ vp9_hdr.picture_id = 10;
+ vp9_hdr.flexible_mode = true;
+
+ FrameData frame_data;
+ frame_data.rtt_ms = 0;
+ frame_data.rolling_average_packets_per_frame = -1;
+
+ // Key frame as first frame
+ packet.video_header.frame_type = VideoFrameType::kVideoFrameKey;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
+ EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
+ dec_state.SetState(&frame);
+
+ // Key frame again
+ vp9_hdr.picture_id = 11;
+ frame.Reset();
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
+ EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
+ dec_state.SetState(&frame);
+
+ // Ref to 11, continuous
+ frame.Reset();
+ packet.video_header.frame_type = VideoFrameType::kVideoFrameDelta;
+ vp9_hdr.picture_id = 12;
+ vp9_hdr.num_ref_pics = 1;
+ vp9_hdr.pid_diff[0] = 1;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
+ EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
+}
+
+TEST(TestDecodingState, FrameContinuityFlexibleModeOutOfOrderFrames) {
+ VCMDecodingState dec_state;
+ VCMFrameBuffer frame;
+ VCMPacket packet;
+ packet.video_header.is_first_packet_in_frame = true;
+ packet.timestamp = 1;
+ packet.seqNum = 0xffff;
+ uint8_t data[] = "I need a data pointer for this test!";
+ packet.sizeBytes = sizeof(data);
+ packet.dataPtr = data;
+ packet.video_header.codec = kVideoCodecVP9;
+
+ auto& vp9_hdr =
+ packet.video_header.video_type_header.emplace<RTPVideoHeaderVP9>();
+ vp9_hdr.picture_id = 10;
+ vp9_hdr.flexible_mode = true;
+
+ FrameData frame_data;
+ frame_data.rtt_ms = 0;
+ frame_data.rolling_average_packets_per_frame = -1;
+
+ // Key frame as first frame
+ packet.video_header.frame_type = VideoFrameType::kVideoFrameKey;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
+ EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
+ dec_state.SetState(&frame);
+
+ // Ref to 10, continuous
+ frame.Reset();
+ packet.video_header.frame_type = VideoFrameType::kVideoFrameDelta;
+ vp9_hdr.picture_id = 15;
+ vp9_hdr.num_ref_pics = 1;
+ vp9_hdr.pid_diff[0] = 5;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
+ EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
+ dec_state.SetState(&frame);
+
+ // Out of order, last id 15, this id 12, ref to 10, continuous
+ frame.Reset();
+ vp9_hdr.picture_id = 12;
+ vp9_hdr.pid_diff[0] = 2;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
+ EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
+ dec_state.SetState(&frame);
+
+ // Ref 10, 12, 15, continuous
+ frame.Reset();
+ vp9_hdr.picture_id = 20;
+ vp9_hdr.num_ref_pics = 3;
+ vp9_hdr.pid_diff[0] = 10;
+ vp9_hdr.pid_diff[1] = 8;
+ vp9_hdr.pid_diff[2] = 5;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
+ EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
+}
+
+TEST(TestDecodingState, FrameContinuityFlexibleModeGeneral) {
+ VCMDecodingState dec_state;
+ VCMFrameBuffer frame;
+ VCMPacket packet;
+ packet.video_header.is_first_packet_in_frame = true;
+ packet.timestamp = 1;
+ packet.seqNum = 0xffff;
+ uint8_t data[] = "I need a data pointer for this test!";
+ packet.sizeBytes = sizeof(data);
+ packet.dataPtr = data;
+ packet.video_header.codec = kVideoCodecVP9;
+
+ auto& vp9_hdr =
+ packet.video_header.video_type_header.emplace<RTPVideoHeaderVP9>();
+ vp9_hdr.picture_id = 10;
+ vp9_hdr.flexible_mode = true;
+
+ FrameData frame_data;
+ frame_data.rtt_ms = 0;
+ frame_data.rolling_average_packets_per_frame = -1;
+
+ // Key frame as first frame
+ packet.video_header.frame_type = VideoFrameType::kVideoFrameKey;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
+ EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
+
+ // Delta frame as first frame
+ frame.Reset();
+ packet.video_header.frame_type = VideoFrameType::kVideoFrameDelta;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
+ EXPECT_FALSE(dec_state.ContinuousFrame(&frame));
+
+ // Key frame then delta frame
+ frame.Reset();
+ packet.video_header.frame_type = VideoFrameType::kVideoFrameKey;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
+ dec_state.SetState(&frame);
+ frame.Reset();
+ packet.video_header.frame_type = VideoFrameType::kVideoFrameDelta;
+ vp9_hdr.num_ref_pics = 1;
+ vp9_hdr.picture_id = 15;
+ vp9_hdr.pid_diff[0] = 5;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
+ EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
+ dec_state.SetState(&frame);
+
+ // Ref to 11, not continuous
+ frame.Reset();
+ vp9_hdr.picture_id = 16;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
+ EXPECT_FALSE(dec_state.ContinuousFrame(&frame));
+
+ // Ref to 15, continuous
+ frame.Reset();
+ vp9_hdr.picture_id = 16;
+ vp9_hdr.pid_diff[0] = 1;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
+ EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
+ dec_state.SetState(&frame);
+
+ // Ref to 11 and 15, not continuous
+ frame.Reset();
+ vp9_hdr.picture_id = 20;
+ vp9_hdr.num_ref_pics = 2;
+ vp9_hdr.pid_diff[0] = 9;
+ vp9_hdr.pid_diff[1] = 5;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
+ EXPECT_FALSE(dec_state.ContinuousFrame(&frame));
+
+ // Ref to 10, 15 and 16, continuous
+ frame.Reset();
+ vp9_hdr.picture_id = 22;
+ vp9_hdr.num_ref_pics = 3;
+ vp9_hdr.pid_diff[0] = 12;
+ vp9_hdr.pid_diff[1] = 7;
+ vp9_hdr.pid_diff[2] = 6;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
+ EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
+ dec_state.SetState(&frame);
+
+ // Key Frame, continuous
+ frame.Reset();
+ packet.video_header.frame_type = VideoFrameType::kVideoFrameKey;
+ vp9_hdr.picture_id = VCMDecodingState::kFrameDecodedLength - 2;
+ vp9_hdr.num_ref_pics = 0;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
+ EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
+ dec_state.SetState(&frame);
+
+ // Frame at last index, ref to KF, continuous
+ frame.Reset();
+ packet.video_header.frame_type = VideoFrameType::kVideoFrameDelta;
+ vp9_hdr.picture_id = VCMDecodingState::kFrameDecodedLength - 1;
+ vp9_hdr.num_ref_pics = 1;
+ vp9_hdr.pid_diff[0] = 1;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
+ EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
+ dec_state.SetState(&frame);
+
+ // Frame after wrapping buffer length, ref to last index, continuous
+ frame.Reset();
+ vp9_hdr.picture_id = 0;
+ vp9_hdr.num_ref_pics = 1;
+ vp9_hdr.pid_diff[0] = 1;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
+ EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
+ dec_state.SetState(&frame);
+
+ // Frame after wrapping start frame, ref to 0, continuous
+ frame.Reset();
+ vp9_hdr.picture_id = 20;
+ vp9_hdr.num_ref_pics = 1;
+ vp9_hdr.pid_diff[0] = 20;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
+ EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
+ dec_state.SetState(&frame);
+
+ // Frame after wrapping start frame, ref to 10, not continuous
+ frame.Reset();
+ vp9_hdr.picture_id = 23;
+ vp9_hdr.num_ref_pics = 1;
+ vp9_hdr.pid_diff[0] = 13;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
+ EXPECT_FALSE(dec_state.ContinuousFrame(&frame));
+
+ // Key frame, continuous
+ frame.Reset();
+ packet.video_header.frame_type = VideoFrameType::kVideoFrameKey;
+ vp9_hdr.picture_id = 25;
+ vp9_hdr.num_ref_pics = 0;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
+ EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
+ dec_state.SetState(&frame);
+
+ // Ref to KF, continuous
+ frame.Reset();
+ packet.video_header.frame_type = VideoFrameType::kVideoFrameDelta;
+ vp9_hdr.picture_id = 26;
+ vp9_hdr.num_ref_pics = 1;
+ vp9_hdr.pid_diff[0] = 1;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
+ EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
+ dec_state.SetState(&frame);
+
+ // Ref to frame previous to KF, not continuous
+ frame.Reset();
+ vp9_hdr.picture_id = 30;
+ vp9_hdr.num_ref_pics = 1;
+ vp9_hdr.pid_diff[0] = 30;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
+ EXPECT_FALSE(dec_state.ContinuousFrame(&frame));
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/encoded_frame.cc b/third_party/libwebrtc/modules/video_coding/encoded_frame.cc
new file mode 100644
index 0000000000..637a20cfc9
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/encoded_frame.cc
@@ -0,0 +1,151 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/encoded_frame.h"
+
+#include <string.h>
+
+#include "absl/types/variant.h"
+#include "api/video/video_timing.h"
+#include "modules/video_coding/codecs/interface/common_constants.h"
+#include "modules/video_coding/codecs/vp8/include/vp8_globals.h"
+#include "modules/video_coding/codecs/vp9/include/vp9_globals.h"
+
+namespace webrtc {
+
+VCMEncodedFrame::VCMEncodedFrame()
+ : webrtc::EncodedImage(),
+ _renderTimeMs(-1),
+ _payloadType(0),
+ _missingFrame(false),
+ _codec(kVideoCodecGeneric) {
+ _codecSpecificInfo.codecType = kVideoCodecGeneric;
+}
+
+VCMEncodedFrame::VCMEncodedFrame(const VCMEncodedFrame&) = default;
+
+VCMEncodedFrame::~VCMEncodedFrame() {
+ Reset();
+}
+
+void VCMEncodedFrame::Reset() {
+ SetTimestamp(0);
+ SetSpatialIndex(absl::nullopt);
+ _renderTimeMs = -1;
+ _payloadType = 0;
+ _frameType = VideoFrameType::kVideoFrameDelta;
+ _encodedWidth = 0;
+ _encodedHeight = 0;
+ _missingFrame = false;
+ set_size(0);
+ _codecSpecificInfo.codecType = kVideoCodecGeneric;
+ _codec = kVideoCodecGeneric;
+ rotation_ = kVideoRotation_0;
+ content_type_ = VideoContentType::UNSPECIFIED;
+ timing_.flags = VideoSendTiming::kInvalid;
+}
+
+void VCMEncodedFrame::CopyCodecSpecific(const RTPVideoHeader* header) {
+ if (header) {
+ switch (header->codec) {
+ case kVideoCodecVP8: {
+ const auto& vp8_header =
+ absl::get<RTPVideoHeaderVP8>(header->video_type_header);
+ if (_codecSpecificInfo.codecType != kVideoCodecVP8) {
+ // This is the first packet for this frame.
+ _codecSpecificInfo.codecSpecific.VP8.temporalIdx = 0;
+ _codecSpecificInfo.codecSpecific.VP8.layerSync = false;
+ _codecSpecificInfo.codecSpecific.VP8.keyIdx = -1;
+ _codecSpecificInfo.codecType = kVideoCodecVP8;
+ }
+ _codecSpecificInfo.codecSpecific.VP8.nonReference =
+ vp8_header.nonReference;
+ if (vp8_header.temporalIdx != kNoTemporalIdx) {
+ _codecSpecificInfo.codecSpecific.VP8.temporalIdx =
+ vp8_header.temporalIdx;
+ _codecSpecificInfo.codecSpecific.VP8.layerSync = vp8_header.layerSync;
+ }
+ if (vp8_header.keyIdx != kNoKeyIdx) {
+ _codecSpecificInfo.codecSpecific.VP8.keyIdx = vp8_header.keyIdx;
+ }
+ break;
+ }
+ case kVideoCodecVP9: {
+ const auto& vp9_header =
+ absl::get<RTPVideoHeaderVP9>(header->video_type_header);
+ if (_codecSpecificInfo.codecType != kVideoCodecVP9) {
+ // This is the first packet for this frame.
+ _codecSpecificInfo.codecSpecific.VP9.temporal_idx = 0;
+ _codecSpecificInfo.codecSpecific.VP9.gof_idx = 0;
+ _codecSpecificInfo.codecSpecific.VP9.inter_layer_predicted = false;
+ _codecSpecificInfo.codecType = kVideoCodecVP9;
+ }
+ _codecSpecificInfo.codecSpecific.VP9.inter_pic_predicted =
+ vp9_header.inter_pic_predicted;
+ _codecSpecificInfo.codecSpecific.VP9.flexible_mode =
+ vp9_header.flexible_mode;
+ _codecSpecificInfo.codecSpecific.VP9.num_ref_pics =
+ vp9_header.num_ref_pics;
+ for (uint8_t r = 0; r < vp9_header.num_ref_pics; ++r) {
+ _codecSpecificInfo.codecSpecific.VP9.p_diff[r] =
+ vp9_header.pid_diff[r];
+ }
+ _codecSpecificInfo.codecSpecific.VP9.ss_data_available =
+ vp9_header.ss_data_available;
+ if (vp9_header.temporal_idx != kNoTemporalIdx) {
+ _codecSpecificInfo.codecSpecific.VP9.temporal_idx =
+ vp9_header.temporal_idx;
+ _codecSpecificInfo.codecSpecific.VP9.temporal_up_switch =
+ vp9_header.temporal_up_switch;
+ }
+ if (vp9_header.spatial_idx != kNoSpatialIdx) {
+ _codecSpecificInfo.codecSpecific.VP9.inter_layer_predicted =
+ vp9_header.inter_layer_predicted;
+ SetSpatialIndex(vp9_header.spatial_idx);
+ }
+ if (vp9_header.gof_idx != kNoGofIdx) {
+ _codecSpecificInfo.codecSpecific.VP9.gof_idx = vp9_header.gof_idx;
+ }
+ if (vp9_header.ss_data_available) {
+ _codecSpecificInfo.codecSpecific.VP9.num_spatial_layers =
+ vp9_header.num_spatial_layers;
+ _codecSpecificInfo.codecSpecific.VP9
+ .spatial_layer_resolution_present =
+ vp9_header.spatial_layer_resolution_present;
+ if (vp9_header.spatial_layer_resolution_present) {
+ for (size_t i = 0; i < vp9_header.num_spatial_layers; ++i) {
+ _codecSpecificInfo.codecSpecific.VP9.width[i] =
+ vp9_header.width[i];
+ _codecSpecificInfo.codecSpecific.VP9.height[i] =
+ vp9_header.height[i];
+ }
+ }
+ _codecSpecificInfo.codecSpecific.VP9.gof.CopyGofInfoVP9(
+ vp9_header.gof);
+ }
+ break;
+ }
+ case kVideoCodecH264: {
+ _codecSpecificInfo.codecType = kVideoCodecH264;
+ break;
+ }
+ case kVideoCodecAV1: {
+ _codecSpecificInfo.codecType = kVideoCodecAV1;
+ break;
+ }
+ default: {
+ _codecSpecificInfo.codecType = kVideoCodecGeneric;
+ break;
+ }
+ }
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/encoded_frame.h b/third_party/libwebrtc/modules/video_coding/encoded_frame.h
new file mode 100644
index 0000000000..9cc769277d
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/encoded_frame.h
@@ -0,0 +1,127 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_ENCODED_FRAME_H_
+#define MODULES_VIDEO_CODING_ENCODED_FRAME_H_
+
+#include <vector>
+
+#include "api/video/encoded_image.h"
+#include "modules/rtp_rtcp/source/rtp_video_header.h"
+#include "modules/video_coding/include/video_codec_interface.h"
+#include "modules/video_coding/include/video_coding_defines.h"
+#include "rtc_base/system/rtc_export.h"
+
+namespace webrtc {
+
+class RTC_EXPORT VCMEncodedFrame : public EncodedImage {
+ public:
+ VCMEncodedFrame();
+ VCMEncodedFrame(const VCMEncodedFrame&);
+
+ ~VCMEncodedFrame();
+ /**
+ * Set render time in milliseconds
+ */
+ void SetRenderTime(const int64_t renderTimeMs) {
+ _renderTimeMs = renderTimeMs;
+ }
+
+ VideoPlayoutDelay PlayoutDelay() const { return playout_delay_; }
+
+ void SetPlayoutDelay(VideoPlayoutDelay playout_delay) {
+ playout_delay_ = playout_delay;
+ }
+
+ /**
+ * Get the encoded image
+ */
+ const webrtc::EncodedImage& EncodedImage() const {
+ return static_cast<const webrtc::EncodedImage&>(*this);
+ }
+
+ using EncodedImage::ColorSpace;
+ using EncodedImage::data;
+ using EncodedImage::GetEncodedData;
+ using EncodedImage::NtpTimeMs;
+ using EncodedImage::PacketInfos;
+ using EncodedImage::set_size;
+ using EncodedImage::SetColorSpace;
+ using EncodedImage::SetEncodedData;
+ using EncodedImage::SetPacketInfos;
+ using EncodedImage::SetSpatialIndex;
+ using EncodedImage::SetSpatialLayerFrameSize;
+ using EncodedImage::SetTimestamp;
+ using EncodedImage::size;
+ using EncodedImage::SpatialIndex;
+ using EncodedImage::SpatialLayerFrameSize;
+ using EncodedImage::Timestamp;
+
+ /**
+ * Get render time in milliseconds
+ */
+ int64_t RenderTimeMs() const { return _renderTimeMs; }
+ /**
+ * Get frame type
+ */
+ webrtc::VideoFrameType FrameType() const { return _frameType; }
+ /**
+ * Set frame type
+ */
+ void SetFrameType(webrtc::VideoFrameType frame_type) {
+ _frameType = frame_type;
+ }
+ /**
+ * Get frame rotation
+ */
+ VideoRotation rotation() const { return rotation_; }
+ /**
+ * Get video content type
+ */
+ VideoContentType contentType() const { return content_type_; }
+ /**
+ * Get video timing
+ */
+ EncodedImage::Timing video_timing() const { return timing_; }
+ EncodedImage::Timing* video_timing_mutable() { return &timing_; }
+ /**
+ * True if there's a frame missing before this frame
+ */
+ bool MissingFrame() const { return _missingFrame; }
+ /**
+ * Payload type of the encoded payload
+ */
+ uint8_t PayloadType() const { return _payloadType; }
+ /**
+ * Get codec specific info.
+ * The returned pointer is only valid as long as the VCMEncodedFrame
+ * is valid. Also, VCMEncodedFrame owns the pointer and will delete
+ * the object.
+ */
+ const CodecSpecificInfo* CodecSpecific() const { return &_codecSpecificInfo; }
+ void SetCodecSpecific(const CodecSpecificInfo* codec_specific) {
+ _codecSpecificInfo = *codec_specific;
+ }
+
+ protected:
+ void Reset();
+
+ void CopyCodecSpecific(const RTPVideoHeader* header);
+
+ int64_t _renderTimeMs;
+ uint8_t _payloadType;
+ bool _missingFrame;
+ CodecSpecificInfo _codecSpecificInfo;
+ webrtc::VideoCodecType _codec;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_ENCODED_FRAME_H_
diff --git a/third_party/libwebrtc/modules/video_coding/encoded_frame_gn/moz.build b/third_party/libwebrtc/modules/video_coding/encoded_frame_gn/moz.build
new file mode 100644
index 0000000000..159b77d10c
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/encoded_frame_gn/moz.build
@@ -0,0 +1,232 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+DEFINES["WEBRTC_STRICT_FIELD_TRIALS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "!/third_party/libwebrtc/gen",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/video_coding/encoded_frame.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "crypt32",
+ "iphlpapi",
+ "secur32",
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "mips32":
+
+ DEFINES["MIPS32_LE"] = True
+ DEFINES["MIPS_FPU_LE"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "mips64":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("encoded_frame_gn")
diff --git a/third_party/libwebrtc/modules/video_coding/event_wrapper.cc b/third_party/libwebrtc/modules/video_coding/event_wrapper.cc
new file mode 100644
index 0000000000..748c92f637
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/event_wrapper.cc
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/event_wrapper.h"
+
+#include "rtc_base/event.h"
+
+namespace webrtc {
+
+class EventWrapperImpl : public EventWrapper {
+ public:
+ ~EventWrapperImpl() override {}
+
+ bool Set() override {
+ event_.Set();
+ return true;
+ }
+
+ // TODO(bugs.webrtc.org/14366): Migrate to TimeDelta.
+ EventTypeWrapper Wait(int max_time_ms) override {
+ return event_.Wait(TimeDelta::Millis(max_time_ms)) ? kEventSignaled
+ : kEventTimeout;
+ }
+
+ private:
+ rtc::Event event_;
+};
+
+// static
+EventWrapper* EventWrapper::Create() {
+ return new EventWrapperImpl();
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/event_wrapper.h b/third_party/libwebrtc/modules/video_coding/event_wrapper.h
new file mode 100644
index 0000000000..c5e5661282
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/event_wrapper.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_EVENT_WRAPPER_H_
+#define MODULES_VIDEO_CODING_EVENT_WRAPPER_H_
+
+namespace webrtc {
+enum EventTypeWrapper { kEventSignaled = 1, kEventTimeout = 2 };
+
+class EventWrapper {
+ public:
+ // Factory method. Constructor disabled.
+ static EventWrapper* Create();
+
+ virtual ~EventWrapper() {}
+
+ // Releases threads who are calling Wait() and has started waiting. Please
+ // note that a thread calling Wait() will not start waiting immediately.
+ // assumptions to the contrary is a very common source of issues in
+ // multithreaded programming.
+ // Set is sticky in the sense that it will release at least one thread
+ // either immediately or some time in the future.
+ virtual bool Set() = 0;
+
+ // Puts the calling thread into a wait state. The thread may be released
+ // by a Set() call depending on if other threads are waiting and if so on
+ // timing. The thread that was released will reset the event before leaving
+ // preventing more threads from being released. If multiple threads
+ // are waiting for the same Set(), only one (random) thread is guaranteed to
+ // be released. It is possible that multiple (random) threads are released
+ // Depending on timing.
+ //
+ // `max_time_ms` is the maximum time to wait in milliseconds.
+ // TODO(bugs.webrtc.org/14366): Migrate to TimeDelta.
+ virtual EventTypeWrapper Wait(int max_time_ms) = 0;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_EVENT_WRAPPER_H_
diff --git a/third_party/libwebrtc/modules/video_coding/fec_controller_default.cc b/third_party/libwebrtc/modules/video_coding/fec_controller_default.cc
new file mode 100644
index 0000000000..f204b01c7c
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/fec_controller_default.cc
@@ -0,0 +1,211 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/fec_controller_default.h" // NOLINT
+
+#include <stdlib.h>
+
+#include <algorithm>
+#include <string>
+
+#include "modules/include/module_fec_types.h"
+#include "rtc_base/logging.h"
+#include "system_wrappers/include/field_trial.h"
+
+namespace webrtc {
+
+const float kProtectionOverheadRateThreshold = 0.5;
+
+FecControllerDefault::FecControllerDefault(
+ Clock* clock,
+ VCMProtectionCallback* protection_callback)
+ : clock_(clock),
+ protection_callback_(protection_callback),
+ loss_prot_logic_(new media_optimization::VCMLossProtectionLogic(
+ clock_->TimeInMilliseconds())),
+ max_payload_size_(1460),
+ overhead_threshold_(GetProtectionOverheadRateThreshold()) {}
+
+FecControllerDefault::FecControllerDefault(Clock* clock)
+ : clock_(clock),
+ loss_prot_logic_(new media_optimization::VCMLossProtectionLogic(
+ clock_->TimeInMilliseconds())),
+ max_payload_size_(1460),
+ overhead_threshold_(GetProtectionOverheadRateThreshold()) {}
+
+FecControllerDefault::~FecControllerDefault(void) {
+ loss_prot_logic_->Release();
+}
+
+void FecControllerDefault::SetProtectionCallback(
+ VCMProtectionCallback* protection_callback) {
+ protection_callback_ = protection_callback;
+}
+
+void FecControllerDefault::SetEncodingData(size_t width,
+ size_t height,
+ size_t num_temporal_layers,
+ size_t max_payload_size) {
+ MutexLock lock(&mutex_);
+ loss_prot_logic_->UpdateFrameSize(width, height);
+ loss_prot_logic_->UpdateNumLayers(num_temporal_layers);
+ max_payload_size_ = max_payload_size;
+}
+
+float FecControllerDefault::GetProtectionOverheadRateThreshold() {
+ float overhead_threshold =
+ strtof(webrtc::field_trial::FindFullName(
+ "WebRTC-ProtectionOverheadRateThreshold")
+ .c_str(),
+ nullptr);
+ if (overhead_threshold > 0 && overhead_threshold <= 1) {
+ RTC_LOG(LS_INFO) << "ProtectionOverheadRateThreshold is set to "
+ << overhead_threshold;
+ return overhead_threshold;
+ } else if (overhead_threshold < 0 || overhead_threshold > 1) {
+ RTC_LOG(LS_WARNING)
+ << "ProtectionOverheadRateThreshold field trial is set to "
+ "an invalid value, expecting a value between (0, 1].";
+ }
+ // WebRTC-ProtectionOverheadRateThreshold field trial string is not found, use
+ // the default value.
+ return kProtectionOverheadRateThreshold;
+}
+
+uint32_t FecControllerDefault::UpdateFecRates(
+ uint32_t estimated_bitrate_bps,
+ int actual_framerate_fps,
+ uint8_t fraction_lost,
+ std::vector<bool> loss_mask_vector,
+ int64_t round_trip_time_ms) {
+ float target_bitrate_kbps =
+ static_cast<float>(estimated_bitrate_bps) / 1000.0f;
+ // Sanity check.
+ if (actual_framerate_fps < 1.0) {
+ actual_framerate_fps = 1.0;
+ }
+ FecProtectionParams delta_fec_params;
+ FecProtectionParams key_fec_params;
+ {
+ MutexLock lock(&mutex_);
+ loss_prot_logic_->UpdateBitRate(target_bitrate_kbps);
+ loss_prot_logic_->UpdateRtt(round_trip_time_ms);
+ // Update frame rate for the loss protection logic class: frame rate should
+ // be the actual/sent rate.
+ loss_prot_logic_->UpdateFrameRate(actual_framerate_fps);
+ // Returns the filtered packet loss, used for the protection setting.
+ // The filtered loss may be the received loss (no filter), or some
+ // filtered value (average or max window filter).
+ // Use max window filter for now.
+ media_optimization::FilterPacketLossMode filter_mode =
+ media_optimization::kMaxFilter;
+ uint8_t packet_loss_enc = loss_prot_logic_->FilteredLoss(
+ clock_->TimeInMilliseconds(), filter_mode, fraction_lost);
+ // For now use the filtered loss for computing the robustness settings.
+ loss_prot_logic_->UpdateFilteredLossPr(packet_loss_enc);
+ if (loss_prot_logic_->SelectedType() == media_optimization::kNone) {
+ return estimated_bitrate_bps;
+ }
+ // Update method will compute the robustness settings for the given
+ // protection method and the overhead cost
+ // the protection method is set by the user via SetVideoProtection.
+ loss_prot_logic_->UpdateMethod();
+ // Get the bit cost of protection method, based on the amount of
+ // overhead data actually transmitted (including headers) the last
+ // second.
+ // Get the FEC code rate for Key frames (set to 0 when NA).
+ key_fec_params.fec_rate =
+ loss_prot_logic_->SelectedMethod()->RequiredProtectionFactorK();
+ // Get the FEC code rate for Delta frames (set to 0 when NA).
+ delta_fec_params.fec_rate =
+ loss_prot_logic_->SelectedMethod()->RequiredProtectionFactorD();
+ // The RTP module currently requires the same `max_fec_frames` for both
+ // key and delta frames.
+ delta_fec_params.max_fec_frames =
+ loss_prot_logic_->SelectedMethod()->MaxFramesFec();
+ key_fec_params.max_fec_frames =
+ loss_prot_logic_->SelectedMethod()->MaxFramesFec();
+ }
+ // Set the FEC packet mask type. `kFecMaskBursty` is more effective for
+ // consecutive losses and little/no packet re-ordering. As we currently
+ // do not have feedback data on the degree of correlated losses and packet
+ // re-ordering, we keep default setting to `kFecMaskRandom` for now.
+ delta_fec_params.fec_mask_type = kFecMaskRandom;
+ key_fec_params.fec_mask_type = kFecMaskRandom;
+ // Update protection callback with protection settings.
+ uint32_t sent_video_rate_bps = 0;
+ uint32_t sent_nack_rate_bps = 0;
+ uint32_t sent_fec_rate_bps = 0;
+ // Rate cost of the protection methods.
+ float protection_overhead_rate = 0.0f;
+ // TODO(Marco): Pass FEC protection values per layer.
+ protection_callback_->ProtectionRequest(
+ &delta_fec_params, &key_fec_params, &sent_video_rate_bps,
+ &sent_nack_rate_bps, &sent_fec_rate_bps);
+ uint32_t sent_total_rate_bps =
+ sent_video_rate_bps + sent_nack_rate_bps + sent_fec_rate_bps;
+ // Estimate the overhead costs of the next second as staying the same
+ // wrt the source bitrate.
+ if (sent_total_rate_bps > 0) {
+ protection_overhead_rate =
+ static_cast<float>(sent_nack_rate_bps + sent_fec_rate_bps) /
+ sent_total_rate_bps;
+ }
+ // Cap the overhead estimate to a threshold, default is 50%.
+ protection_overhead_rate =
+ std::min(protection_overhead_rate, overhead_threshold_);
+ // Source coding rate: total rate - protection overhead.
+ return estimated_bitrate_bps * (1.0 - protection_overhead_rate);
+}
+
+void FecControllerDefault::SetProtectionMethod(bool enable_fec,
+ bool enable_nack) {
+ media_optimization::VCMProtectionMethodEnum method(media_optimization::kNone);
+ if (enable_fec && enable_nack) {
+ method = media_optimization::kNackFec;
+ } else if (enable_nack) {
+ method = media_optimization::kNack;
+ } else if (enable_fec) {
+ method = media_optimization::kFec;
+ }
+ MutexLock lock(&mutex_);
+ loss_prot_logic_->SetMethod(method);
+}
+
+void FecControllerDefault::UpdateWithEncodedData(
+ const size_t encoded_image_length,
+ const VideoFrameType encoded_image_frametype) {
+ const size_t encoded_length = encoded_image_length;
+ MutexLock lock(&mutex_);
+ if (encoded_length > 0) {
+ const bool delta_frame =
+ encoded_image_frametype != VideoFrameType::kVideoFrameKey;
+ if (max_payload_size_ > 0 && encoded_length > 0) {
+ const float min_packets_per_frame =
+ encoded_length / static_cast<float>(max_payload_size_);
+ if (delta_frame) {
+ loss_prot_logic_->UpdatePacketsPerFrame(min_packets_per_frame,
+ clock_->TimeInMilliseconds());
+ } else {
+ loss_prot_logic_->UpdatePacketsPerFrameKey(
+ min_packets_per_frame, clock_->TimeInMilliseconds());
+ }
+ }
+ if (!delta_frame && encoded_length > 0) {
+ loss_prot_logic_->UpdateKeyFrameSize(static_cast<float>(encoded_length));
+ }
+ }
+}
+
+bool FecControllerDefault::UseLossVectorMask() {
+ return false;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/fec_controller_default.h b/third_party/libwebrtc/modules/video_coding/fec_controller_default.h
new file mode 100644
index 0000000000..a97dea011b
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/fec_controller_default.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_FEC_CONTROLLER_DEFAULT_H_
+#define MODULES_VIDEO_CODING_FEC_CONTROLLER_DEFAULT_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <memory>
+#include <vector>
+
+#include "api/fec_controller.h"
+#include "modules/video_coding/media_opt_util.h"
+#include "rtc_base/synchronization/mutex.h"
+#include "rtc_base/thread_annotations.h"
+#include "system_wrappers/include/clock.h"
+
+namespace webrtc {
+
+class FecControllerDefault : public FecController {
+ public:
+ FecControllerDefault(Clock* clock,
+ VCMProtectionCallback* protection_callback);
+ explicit FecControllerDefault(Clock* clock);
+ ~FecControllerDefault() override;
+
+ FecControllerDefault(const FecControllerDefault&) = delete;
+ FecControllerDefault& operator=(const FecControllerDefault&) = delete;
+
+ void SetProtectionCallback(
+ VCMProtectionCallback* protection_callback) override;
+ void SetProtectionMethod(bool enable_fec, bool enable_nack) override;
+ void SetEncodingData(size_t width,
+ size_t height,
+ size_t num_temporal_layers,
+ size_t max_payload_size) override;
+ uint32_t UpdateFecRates(uint32_t estimated_bitrate_bps,
+ int actual_framerate_fps,
+ uint8_t fraction_lost,
+ std::vector<bool> loss_mask_vector,
+ int64_t round_trip_time_ms) override;
+ void UpdateWithEncodedData(size_t encoded_image_length,
+ VideoFrameType encoded_image_frametype) override;
+ bool UseLossVectorMask() override;
+ float GetProtectionOverheadRateThreshold();
+
+ private:
+ enum { kBitrateAverageWinMs = 1000 };
+ Clock* const clock_;
+ VCMProtectionCallback* protection_callback_;
+ Mutex mutex_;
+ std::unique_ptr<media_optimization::VCMLossProtectionLogic> loss_prot_logic_
+ RTC_GUARDED_BY(mutex_);
+ size_t max_payload_size_ RTC_GUARDED_BY(mutex_);
+
+ const float overhead_threshold_;
+};
+
+} // namespace webrtc
+#endif // MODULES_VIDEO_CODING_FEC_CONTROLLER_DEFAULT_H_
diff --git a/third_party/libwebrtc/modules/video_coding/fec_controller_unittest.cc b/third_party/libwebrtc/modules/video_coding/fec_controller_unittest.cc
new file mode 100644
index 0000000000..fda3d309a4
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/fec_controller_unittest.cc
@@ -0,0 +1,114 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "api/fec_controller.h"
+
+#include <stdint.h>
+
+#include <vector>
+
+#include "modules/include/module_fec_types.h"
+#include "modules/video_coding/fec_controller_default.h"
+#include "system_wrappers/include/clock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+static const int kCodecBitrateBps = 100000;
+
+class ProtectionBitrateCalculatorTest : public ::testing::Test {
+ protected:
+ enum {
+ kSampleRate = 90000 // RTP timestamps per second.
+ };
+
+ class ProtectionCallback : public VCMProtectionCallback {
+ public:
+ int ProtectionRequest(const FecProtectionParams* delta_params,
+ const FecProtectionParams* key_params,
+ uint32_t* sent_video_rate_bps,
+ uint32_t* sent_nack_rate_bps,
+ uint32_t* sent_fec_rate_bps) override {
+ *sent_video_rate_bps = kCodecBitrateBps;
+ *sent_nack_rate_bps = nack_rate_bps_;
+ *sent_fec_rate_bps = fec_rate_bps_;
+ return 0;
+ }
+
+ uint32_t fec_rate_bps_ = 0;
+ uint32_t nack_rate_bps_ = 0;
+ };
+
+ // Note: simulated clock starts at 1 seconds, since parts of webrtc use 0 as
+ // a special case (e.g. frame rate in media optimization).
+ ProtectionBitrateCalculatorTest()
+ : clock_(1000), fec_controller_(&clock_, &protection_callback_) {}
+
+ SimulatedClock clock_;
+ ProtectionCallback protection_callback_;
+ FecControllerDefault fec_controller_;
+};
+
+TEST_F(ProtectionBitrateCalculatorTest, ProtectsUsingFecBitrate) {
+ static const uint32_t kMaxBitrateBps = 130000;
+
+ fec_controller_.SetProtectionMethod(true /*enable_fec*/,
+ false /* enable_nack */);
+ fec_controller_.SetEncodingData(640, 480, 1, 1000);
+
+ // Using 10% of codec bitrate for FEC.
+ protection_callback_.fec_rate_bps_ = kCodecBitrateBps / 10;
+ uint32_t target_bitrate = fec_controller_.UpdateFecRates(
+ kMaxBitrateBps, 30, 0, std::vector<bool>(1, false), 0);
+
+ EXPECT_GT(target_bitrate, 0u);
+ EXPECT_GT(kMaxBitrateBps, target_bitrate);
+
+ // Using as much for codec bitrate as fec rate, new target rate should share
+ // both equally, but only be half of max (since that ceiling should be hit).
+ protection_callback_.fec_rate_bps_ = kCodecBitrateBps;
+ target_bitrate = fec_controller_.UpdateFecRates(
+ kMaxBitrateBps, 30, 128, std::vector<bool>(1, false), 100);
+ EXPECT_EQ(kMaxBitrateBps / 2, target_bitrate);
+}
+
+TEST_F(ProtectionBitrateCalculatorTest, ProtectsUsingNackBitrate) {
+ static const uint32_t kMaxBitrateBps = 130000;
+
+ fec_controller_.SetProtectionMethod(false /*enable_fec*/,
+ true /* enable_nack */);
+ fec_controller_.SetEncodingData(640, 480, 1, 1000);
+
+ uint32_t target_bitrate = fec_controller_.UpdateFecRates(
+ kMaxBitrateBps, 30, 0, std::vector<bool>(1, false), 0);
+
+ EXPECT_EQ(kMaxBitrateBps, target_bitrate);
+
+ // Using as much for codec bitrate as nack rate, new target rate should share
+ // both equally, but only be half of max (since that ceiling should be hit).
+ protection_callback_.nack_rate_bps_ = kMaxBitrateBps;
+ target_bitrate = fec_controller_.UpdateFecRates(
+ kMaxBitrateBps, 30, 128, std::vector<bool>(1, false), 100);
+ EXPECT_EQ(kMaxBitrateBps / 2, target_bitrate);
+}
+
+TEST_F(ProtectionBitrateCalculatorTest, NoProtection) {
+ static const uint32_t kMaxBitrateBps = 130000;
+
+ fec_controller_.SetProtectionMethod(false /*enable_fec*/,
+ false /* enable_nack */);
+ fec_controller_.SetEncodingData(640, 480, 1, 1000);
+
+ uint32_t target_bitrate = fec_controller_.UpdateFecRates(
+ kMaxBitrateBps, 30, 128, std::vector<bool>(1, false), 100);
+ EXPECT_EQ(kMaxBitrateBps, target_bitrate);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/fec_rate_table.h b/third_party/libwebrtc/modules/video_coding/fec_rate_table.h
new file mode 100644
index 0000000000..91ec0ce159
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/fec_rate_table.h
@@ -0,0 +1,461 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_FEC_RATE_TABLE_H_
+#define MODULES_VIDEO_CODING_FEC_RATE_TABLE_H_
+
+// This is a private header for media_opt_util.cc.
+// It should not be included by other files.
+
+namespace webrtc {
+
+// Table for Protection factor (code rate) of delta frames, for the XOR FEC.
+// Input is the packet loss and an effective rate (bits/frame).
+// Output is array kFecRateTable[k], where k = rate_i*129 + loss_j;
+// loss_j = 0,1,..128, and rate_i varies over some range.
+// TODO(brandtr): Consider replacing this big static table with a closed-form
+// expression instead.
+static const int kFecRateTableSize = 6450;
+static const unsigned char kFecRateTable[kFecRateTableSize] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
+ 11, 11, 11, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39,
+ 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39,
+ 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39,
+ 39, 39, 39, 39, 39, 39, 51, 51, 51, 51, 51, 51, 51, 51, 51,
+ 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51,
+ 51, 51, 51, 51, 51, 51, 51, 51, 51, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8, 8, 8,
+ 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 30, 30, 30,
+ 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 56, 56, 56,
+ 56, 56, 56, 56, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65,
+ 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65,
+ 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65,
+ 87, 87, 87, 87, 87, 87, 87, 87, 87, 87, 87, 87, 87, 87, 87,
+ 87, 87, 87, 87, 87, 87, 87, 87, 87, 78, 78, 78, 78, 78, 78,
+ 78, 78, 78, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 6, 6, 6, 23, 23, 23, 23, 23, 23, 23, 23, 23,
+ 23, 23, 23, 23, 23, 23, 44, 44, 44, 44, 44, 44, 50, 50, 50,
+ 50, 50, 50, 50, 50, 50, 68, 68, 68, 68, 68, 68, 68, 85, 85,
+ 85, 85, 85, 85, 85, 85, 85, 85, 85, 85, 85, 85, 85, 85, 85,
+ 85, 85, 85, 85, 85, 85, 85, 85, 85, 85, 85, 85, 85, 85, 85,
+ 85, 85, 85, 85, 85, 85, 85, 85, 85, 105, 105, 105, 105, 105, 105,
+ 105, 105, 105, 105, 105, 105, 105, 105, 105, 105, 105, 105, 105, 105, 105,
+ 105, 105, 105, 88, 88, 88, 88, 88, 88, 88, 88, 88, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 5, 5, 5, 5, 5, 5, 19, 19, 19,
+ 36, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41,
+ 55, 55, 55, 55, 55, 55, 69, 69, 69, 69, 69, 69, 69, 69, 69,
+ 75, 75, 80, 80, 80, 80, 80, 97, 97, 97, 97, 97, 97, 97, 97,
+ 97, 97, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102,
+ 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102,
+ 102, 102, 102, 116, 116, 116, 116, 116, 116, 116, 116, 116, 116, 116, 116,
+ 116, 116, 116, 116, 116, 116, 116, 116, 116, 116, 116, 116, 100, 100, 100,
+ 100, 100, 100, 100, 100, 100, 0, 0, 0, 0, 0, 0, 0, 0, 4,
+ 16, 16, 16, 16, 16, 16, 30, 35, 35, 47, 58, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, 58, 58, 63, 63, 63, 63, 63, 63,
+ 77, 77, 77, 77, 77, 77, 77, 82, 82, 82, 82, 94, 94, 94, 94,
+ 94, 105, 105, 105, 105, 110, 110, 110, 110, 110, 110, 122, 122, 122, 122,
+ 122, 122, 122, 122, 122, 122, 122, 122, 122, 122, 122, 122, 122, 122, 122,
+ 122, 122, 122, 122, 122, 122, 122, 122, 122, 122, 122, 122, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 115, 115, 115, 115, 115, 115, 115, 115, 115,
+ 0, 0, 0, 0, 0, 0, 0, 4, 14, 27, 27, 27, 27, 27, 31,
+ 41, 52, 52, 56, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69,
+ 69, 69, 69, 69, 69, 69, 69, 69, 69, 79, 79, 79, 79, 83, 83,
+ 83, 94, 94, 94, 94, 106, 106, 106, 106, 106, 115, 115, 115, 115, 125,
+ 125, 125, 125, 125, 125, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 0, 0, 0, 0, 3, 3,
+ 3, 17, 28, 38, 38, 38, 38, 38, 47, 51, 63, 63, 63, 72, 72,
+ 72, 72, 72, 72, 72, 76, 76, 76, 76, 80, 80, 80, 80, 80, 80,
+ 80, 80, 80, 84, 84, 84, 84, 93, 93, 93, 105, 105, 105, 105, 114,
+ 114, 114, 114, 114, 124, 124, 124, 124, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 0, 0, 0, 0, 12, 12, 12, 35, 43, 47, 47, 47,
+ 47, 47, 58, 58, 66, 66, 66, 70, 70, 70, 70, 70, 73, 73, 82,
+ 82, 82, 86, 94, 94, 94, 94, 94, 94, 94, 94, 94, 94, 94, 94,
+ 94, 105, 105, 105, 114, 114, 114, 114, 117, 117, 117, 117, 117, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 0, 0, 0,
+ 0, 24, 24, 24, 49, 53, 53, 53, 53, 53, 53, 61, 61, 64, 64,
+ 64, 64, 70, 70, 70, 70, 78, 78, 88, 88, 88, 96, 106, 106, 106,
+ 106, 106, 106, 106, 106, 106, 106, 112, 112, 112, 120, 120, 120, 124, 124,
+ 124, 124, 124, 124, 124, 124, 124, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 0, 0, 0, 5, 36, 36, 36, 55, 55,
+ 55, 55, 55, 55, 55, 58, 58, 58, 58, 58, 64, 78, 78, 78, 78,
+ 87, 87, 94, 94, 94, 103, 110, 110, 110, 110, 110, 110, 110, 110, 116,
+ 116, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 0, 0, 0, 18, 43, 43, 43, 53, 53, 53, 53, 53, 53, 53, 53,
+ 58, 58, 58, 58, 71, 87, 87, 87, 87, 94, 94, 97, 97, 97, 109,
+ 111, 111, 111, 111, 111, 111, 111, 111, 125, 125, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 0, 0, 0, 31, 46, 46,
+ 46, 48, 48, 48, 48, 48, 48, 48, 48, 66, 66, 66, 66, 80, 93,
+ 93, 93, 93, 95, 95, 95, 95, 100, 115, 115, 115, 115, 115, 115, 115,
+ 115, 115, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 0, 0, 4, 40, 45, 45, 45, 45, 45, 45, 45, 45,
+ 49, 49, 49, 74, 74, 74, 74, 86, 90, 90, 90, 90, 95, 95, 95,
+ 95, 106, 120, 120, 120, 120, 120, 120, 120, 120, 120, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 0, 0, 14,
+ 42, 42, 42, 42, 42, 42, 42, 42, 46, 56, 56, 56, 80, 80, 80,
+ 80, 84, 84, 84, 84, 88, 99, 99, 99, 99, 111, 122, 122, 122, 122,
+ 122, 122, 122, 122, 122, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 0, 0, 26, 40, 40, 40, 40, 40, 40,
+ 40, 40, 54, 66, 66, 66, 80, 80, 80, 80, 80, 80, 80, 84, 94,
+ 106, 106, 106, 106, 116, 120, 120, 120, 120, 120, 120, 120, 120, 124, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 0, 3, 34, 38, 38, 38, 38, 38, 42, 42, 42, 63, 72, 72, 76,
+ 80, 80, 80, 80, 80, 80, 80, 89, 101, 114, 114, 114, 114, 118, 118,
+ 118, 118, 118, 118, 118, 118, 118, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 0, 12, 36, 36, 36, 36,
+ 36, 36, 49, 49, 49, 69, 73, 76, 86, 86, 86, 86, 86, 86, 86,
+ 86, 97, 109, 122, 122, 122, 122, 122, 122, 122, 122, 122, 122, 122, 122,
+ 122, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 0, 22, 34, 34, 34, 34, 38, 38, 57, 57, 57, 69,
+ 73, 82, 92, 92, 92, 92, 92, 92, 96, 96, 104, 117, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 0, 29, 33,
+ 33, 33, 33, 44, 44, 62, 62, 62, 69, 77, 87, 95, 95, 95, 95,
+ 95, 95, 107, 107, 110, 120, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 0, 31, 31, 31, 31, 31, 51, 51, 62,
+ 65, 65, 73, 83, 91, 94, 94, 94, 94, 97, 97, 114, 114, 114, 122,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 0, 29, 29, 29, 29, 29, 56, 56, 59, 70, 70, 79, 86, 89, 89,
+ 89, 89, 89, 100, 100, 116, 116, 116, 122, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 0, 28, 28, 28, 28, 28,
+ 57, 57, 57, 76, 76, 83, 86, 86, 86, 86, 86, 89, 104, 104, 114,
+ 114, 114, 124, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 0, 27, 27, 27, 27, 30, 55, 55, 55, 80, 80, 83,
+ 86, 86, 86, 86, 86, 93, 108, 108, 111, 111, 111, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 0, 26, 26,
+ 26, 26, 36, 53, 53, 53, 80, 80, 80, 90, 90, 90, 90, 90, 98,
+ 107, 107, 107, 107, 107, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 0, 26, 26, 26, 28, 42, 52, 54, 54,
+ 78, 78, 78, 95, 95, 95, 97, 97, 104, 106, 106, 106, 106, 106, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 0, 24, 24, 24, 33, 47, 49, 58, 58, 74, 74, 74, 97, 97, 97,
+ 106, 106, 108, 108, 108, 108, 108, 108, 124, 124, 124, 124, 124, 124, 124,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 0, 24, 24, 24, 39, 48,
+ 50, 63, 63, 72, 74, 74, 96, 96, 96, 109, 111, 111, 111, 111, 111,
+ 111, 111, 119, 119, 122, 122, 122, 122, 122, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 0, 23, 23, 23, 43, 46, 54, 66, 66, 69, 77, 77,
+ 92, 92, 92, 105, 113, 113, 113, 113, 113, 113, 113, 115, 117, 123, 123,
+ 123, 123, 123, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 0, 22, 22,
+ 22, 44, 44, 59, 67, 67, 67, 81, 81, 89, 89, 89, 97, 112, 112,
+ 112, 112, 112, 112, 112, 112, 119, 126, 126, 126, 126, 126, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 0, 21, 21, 24, 43, 45, 63, 65, 65,
+ 67, 85, 85, 87, 87, 87, 91, 109, 109, 109, 111, 111, 111, 111, 111,
+ 123, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 0, 21, 21, 28, 42, 50, 63, 63, 66, 71, 85, 85, 85, 85, 87,
+ 92, 106, 106, 108, 114, 114, 114, 114, 114, 125, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 0, 20, 20, 34, 41, 54,
+ 62, 62, 69, 75, 82, 82, 82, 82, 92, 98, 105, 105, 110, 117, 117,
+ 117, 117, 117, 124, 124, 126, 126, 126, 126, 126, 126, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 0, 20, 20, 38, 40, 58, 60, 60, 73, 78, 80, 80,
+ 80, 80, 100, 105, 107, 107, 113, 118, 118, 118, 118, 118, 120, 120, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 0, 19, 21,
+ 38, 40, 58, 58, 60, 75, 77, 77, 77, 81, 81, 107, 109, 109, 109,
+ 114, 116, 116, 116, 116, 116, 116, 116, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 0, 18, 25, 37, 44, 56, 56, 63, 75,
+ 75, 75, 75, 88, 88, 111, 111, 111, 111, 112, 112, 112, 112, 112, 112,
+ 112, 114, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 0, 18, 30, 36, 48, 55, 55, 67, 73, 73, 73, 73, 97, 97, 110,
+ 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 116, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 0, 18, 34, 36, 52, 55,
+ 55, 70, 72, 73, 73, 73, 102, 104, 108, 108, 108, 108, 109, 109, 109,
+ 109, 109, 109, 109, 119, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 0, 17, 35, 35, 52, 59, 59, 70, 70, 76, 76, 76,
+ 99, 105, 105, 105, 105, 105, 111, 111, 111, 111, 111, 111, 111, 121, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 0, 17, 34,
+ 36, 51, 61, 62, 70, 70, 80, 80, 80, 93, 103, 103, 103, 103, 103,
+ 112, 112, 112, 112, 112, 116, 118, 124, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 0, 16, 33, 39, 50, 59, 65, 72, 72,
+ 82, 82, 82, 91, 100, 100, 100, 100, 100, 109, 109, 109, 109, 109, 121,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 0, 16, 32, 43, 48, 54, 66, 75, 75, 81, 83, 83, 92, 97, 97,
+ 97, 99, 99, 105, 105, 105, 105, 105, 123, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 0, 15, 31, 46, 47, 49,
+ 69, 77, 77, 81, 85, 85, 93, 95, 95, 95, 100, 100, 102, 102, 102,
+ 102, 102, 120, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 0, 15, 30, 46, 48, 48, 70, 75, 79, 82, 87, 87,
+ 92, 94, 94, 94, 103, 103, 103, 103, 103, 104, 104, 115, 120, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 0, 15, 30,
+ 45, 50, 50, 68, 70, 80, 85, 89, 89, 90, 95, 95, 95, 104, 104,
+ 104, 104, 104, 109, 109, 112, 114, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 0, 14, 29, 44, 54, 54, 64, 64, 83,
+ 87, 88, 88, 88, 98, 98, 98, 103, 103, 103, 103, 103, 113, 113, 113,
+ 113, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 0, 14, 29, 43, 56, 56, 61, 61, 84, 85, 88, 88, 88, 100, 100,
+ 100, 102, 102, 102, 102, 102, 113, 116, 116, 116, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 0, 14, 28, 42, 57, 57,
+ 62, 62, 80, 80, 91, 91, 91, 100, 100, 100, 100, 100, 100, 100, 100,
+ 109, 119, 119, 119, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 0, 14, 28, 42, 56, 56, 65, 66, 76, 76, 92, 92,
+ 92, 97, 97, 97, 101, 101, 101, 101, 101, 106, 121, 121, 121, 126, 126,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 0, 13, 27,
+ 41, 55, 55, 67, 72, 74, 74, 90, 90, 90, 91, 91, 91, 105, 105,
+ 105, 105, 105, 107, 122, 122, 122, 123, 123, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 0, 13, 27, 40, 54, 54, 67, 76, 76,
+ 76, 85, 85, 85, 85, 85, 85, 112, 112, 112, 112, 112, 112, 121, 121,
+ 121, 121, 121, 126, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_FEC_RATE_TABLE_H_
diff --git a/third_party/libwebrtc/modules/video_coding/frame_buffer.cc b/third_party/libwebrtc/modules/video_coding/frame_buffer.cc
new file mode 100644
index 0000000000..787da1e5a9
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/frame_buffer.cc
@@ -0,0 +1,265 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/frame_buffer.h"
+
+#include <string.h>
+
+#include "api/video/encoded_image.h"
+#include "api/video/video_timing.h"
+#include "modules/video_coding/include/video_codec_interface.h"
+#include "modules/video_coding/packet.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/trace_event.h"
+
+namespace webrtc {
+
+VCMFrameBuffer::VCMFrameBuffer()
+ : _state(kStateEmpty), _nackCount(0), _latestPacketTimeMs(-1) {}
+
+VCMFrameBuffer::~VCMFrameBuffer() {}
+
+webrtc::VideoFrameType VCMFrameBuffer::FrameType() const {
+ return _sessionInfo.FrameType();
+}
+
+int32_t VCMFrameBuffer::GetLowSeqNum() const {
+ return _sessionInfo.LowSequenceNumber();
+}
+
+int32_t VCMFrameBuffer::GetHighSeqNum() const {
+ return _sessionInfo.HighSequenceNumber();
+}
+
+int VCMFrameBuffer::PictureId() const {
+ return _sessionInfo.PictureId();
+}
+
+int VCMFrameBuffer::TemporalId() const {
+ return _sessionInfo.TemporalId();
+}
+
+bool VCMFrameBuffer::LayerSync() const {
+ return _sessionInfo.LayerSync();
+}
+
+int VCMFrameBuffer::Tl0PicId() const {
+ return _sessionInfo.Tl0PicId();
+}
+
+std::vector<NaluInfo> VCMFrameBuffer::GetNaluInfos() const {
+ return _sessionInfo.GetNaluInfos();
+}
+
+void VCMFrameBuffer::SetGofInfo(const GofInfoVP9& gof_info, size_t idx) {
+ TRACE_EVENT0("webrtc", "VCMFrameBuffer::SetGofInfo");
+ _sessionInfo.SetGofInfo(gof_info, idx);
+ // TODO(asapersson): Consider adding hdr->VP9.ref_picture_id for testing.
+ _codecSpecificInfo.codecSpecific.VP9.temporal_idx =
+ gof_info.temporal_idx[idx];
+ _codecSpecificInfo.codecSpecific.VP9.temporal_up_switch =
+ gof_info.temporal_up_switch[idx];
+}
+
+// Insert packet
+VCMFrameBufferEnum VCMFrameBuffer::InsertPacket(const VCMPacket& packet,
+ int64_t timeInMs,
+ const FrameData& frame_data) {
+ TRACE_EVENT0("webrtc", "VCMFrameBuffer::InsertPacket");
+ RTC_DCHECK(!(NULL == packet.dataPtr && packet.sizeBytes > 0));
+ if (packet.dataPtr != NULL) {
+ _payloadType = packet.payloadType;
+ }
+
+ if (kStateEmpty == _state) {
+ // First packet (empty and/or media) inserted into this frame.
+ // store some info and set some initial values.
+ SetTimestamp(packet.timestamp);
+ // We only take the ntp timestamp of the first packet of a frame.
+ ntp_time_ms_ = packet.ntp_time_ms_;
+ _codec = packet.codec();
+ if (packet.video_header.frame_type != VideoFrameType::kEmptyFrame) {
+ // first media packet
+ SetState(kStateIncomplete);
+ }
+ }
+
+ size_t oldSize = encoded_image_buffer_ ? encoded_image_buffer_->size() : 0;
+ uint32_t requiredSizeBytes =
+ size() + packet.sizeBytes +
+ (packet.insertStartCode ? kH264StartCodeLengthBytes : 0);
+ if (requiredSizeBytes > oldSize) {
+ const uint8_t* prevBuffer = data();
+ const uint32_t increments =
+ requiredSizeBytes / kBufferIncStepSizeBytes +
+ (requiredSizeBytes % kBufferIncStepSizeBytes > 0);
+ const uint32_t newSize = oldSize + increments * kBufferIncStepSizeBytes;
+ if (newSize > kMaxJBFrameSizeBytes) {
+ RTC_LOG(LS_ERROR) << "Failed to insert packet due to frame being too "
+ "big.";
+ return kSizeError;
+ }
+ if (data() == nullptr) {
+ encoded_image_buffer_ = EncodedImageBuffer::Create(newSize);
+ SetEncodedData(encoded_image_buffer_);
+ set_size(0);
+ } else {
+ RTC_CHECK(encoded_image_buffer_ != nullptr);
+ RTC_DCHECK_EQ(encoded_image_buffer_->data(), data());
+ encoded_image_buffer_->Realloc(newSize);
+ }
+ _sessionInfo.UpdateDataPointers(prevBuffer, data());
+ }
+
+ if (packet.width() > 0 && packet.height() > 0) {
+ _encodedWidth = packet.width();
+ _encodedHeight = packet.height();
+ }
+
+ // Don't copy payload specific data for empty packets (e.g padding packets).
+ if (packet.sizeBytes > 0)
+ CopyCodecSpecific(&packet.video_header);
+
+ int retVal = _sessionInfo.InsertPacket(
+ packet, encoded_image_buffer_ ? encoded_image_buffer_->data() : nullptr,
+ frame_data);
+ if (retVal == -1) {
+ return kSizeError;
+ } else if (retVal == -2) {
+ return kDuplicatePacket;
+ } else if (retVal == -3) {
+ return kOutOfBoundsPacket;
+ }
+ // update size
+ set_size(size() + static_cast<uint32_t>(retVal));
+
+ _latestPacketTimeMs = timeInMs;
+
+ // http://www.etsi.org/deliver/etsi_ts/126100_126199/126114/12.07.00_60/
+ // ts_126114v120700p.pdf Section 7.4.5.
+ // The MTSI client shall add the payload bytes as defined in this clause
+ // onto the last RTP packet in each group of packets which make up a key
+ // frame (I-frame or IDR frame in H.264 (AVC), or an IRAP picture in H.265
+ // (HEVC)).
+ if (packet.markerBit) {
+ rotation_ = packet.video_header.rotation;
+ content_type_ = packet.video_header.content_type;
+ if (packet.video_header.video_timing.flags != VideoSendTiming::kInvalid) {
+ timing_.encode_start_ms =
+ ntp_time_ms_ + packet.video_header.video_timing.encode_start_delta_ms;
+ timing_.encode_finish_ms =
+ ntp_time_ms_ +
+ packet.video_header.video_timing.encode_finish_delta_ms;
+ timing_.packetization_finish_ms =
+ ntp_time_ms_ +
+ packet.video_header.video_timing.packetization_finish_delta_ms;
+ timing_.pacer_exit_ms =
+ ntp_time_ms_ + packet.video_header.video_timing.pacer_exit_delta_ms;
+ timing_.network_timestamp_ms =
+ ntp_time_ms_ +
+ packet.video_header.video_timing.network_timestamp_delta_ms;
+ timing_.network2_timestamp_ms =
+ ntp_time_ms_ +
+ packet.video_header.video_timing.network2_timestamp_delta_ms;
+ }
+ timing_.flags = packet.video_header.video_timing.flags;
+ }
+
+ if (packet.is_first_packet_in_frame()) {
+ playout_delay_ = packet.video_header.playout_delay;
+ }
+
+ if (_sessionInfo.complete()) {
+ SetState(kStateComplete);
+ return kCompleteSession;
+ }
+ return kIncomplete;
+}
+
+int64_t VCMFrameBuffer::LatestPacketTimeMs() const {
+ TRACE_EVENT0("webrtc", "VCMFrameBuffer::LatestPacketTimeMs");
+ return _latestPacketTimeMs;
+}
+
+void VCMFrameBuffer::IncrementNackCount() {
+ TRACE_EVENT0("webrtc", "VCMFrameBuffer::IncrementNackCount");
+ _nackCount++;
+}
+
+int16_t VCMFrameBuffer::GetNackCount() const {
+ TRACE_EVENT0("webrtc", "VCMFrameBuffer::GetNackCount");
+ return _nackCount;
+}
+
+bool VCMFrameBuffer::HaveFirstPacket() const {
+ TRACE_EVENT0("webrtc", "VCMFrameBuffer::HaveFirstPacket");
+ return _sessionInfo.HaveFirstPacket();
+}
+
+int VCMFrameBuffer::NumPackets() const {
+ TRACE_EVENT0("webrtc", "VCMFrameBuffer::NumPackets");
+ return _sessionInfo.NumPackets();
+}
+
+void VCMFrameBuffer::Reset() {
+ TRACE_EVENT0("webrtc", "VCMFrameBuffer::Reset");
+ set_size(0);
+ _sessionInfo.Reset();
+ _payloadType = 0;
+ _nackCount = 0;
+ _latestPacketTimeMs = -1;
+ _state = kStateEmpty;
+ VCMEncodedFrame::Reset();
+}
+
+// Set state of frame
+void VCMFrameBuffer::SetState(VCMFrameBufferStateEnum state) {
+ TRACE_EVENT0("webrtc", "VCMFrameBuffer::SetState");
+ if (_state == state) {
+ return;
+ }
+ switch (state) {
+ case kStateIncomplete:
+ // we can go to this state from state kStateEmpty
+ RTC_DCHECK_EQ(_state, kStateEmpty);
+
+ // Do nothing, we received a packet
+ break;
+
+ case kStateComplete:
+ RTC_DCHECK(_state == kStateEmpty || _state == kStateIncomplete);
+
+ break;
+
+ case kStateEmpty:
+ // Should only be set to empty through Reset().
+ RTC_DCHECK_NOTREACHED();
+ break;
+ }
+ _state = state;
+}
+
+// Get current state of frame
+VCMFrameBufferStateEnum VCMFrameBuffer::GetState() const {
+ return _state;
+}
+
+void VCMFrameBuffer::PrepareForDecode(bool continuous) {
+ TRACE_EVENT0("webrtc", "VCMFrameBuffer::PrepareForDecode");
+ size_t bytes_removed = _sessionInfo.MakeDecodable();
+ set_size(size() - bytes_removed);
+ // Transfer frame information to EncodedFrame and create any codec
+ // specific information.
+ _frameType = _sessionInfo.FrameType();
+ _missingFrame = !continuous;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/frame_buffer.h b/third_party/libwebrtc/modules/video_coding/frame_buffer.h
new file mode 100644
index 0000000000..76df28e588
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/frame_buffer.h
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_FRAME_BUFFER_H_
+#define MODULES_VIDEO_CODING_FRAME_BUFFER_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <vector>
+
+#include "modules/video_coding/codecs/h264/include/h264_globals.h"
+#include "modules/video_coding/codecs/vp9/include/vp9_globals.h"
+#include "modules/video_coding/encoded_frame.h"
+#include "modules/video_coding/include/video_coding.h"
+#include "modules/video_coding/jitter_buffer_common.h"
+#include "modules/video_coding/packet.h"
+#include "modules/video_coding/session_info.h"
+
+namespace webrtc {
+
+class VCMFrameBuffer : public VCMEncodedFrame {
+ public:
+ VCMFrameBuffer();
+ virtual ~VCMFrameBuffer();
+
+ virtual void Reset();
+
+ VCMFrameBufferEnum InsertPacket(const VCMPacket& packet,
+ int64_t timeInMs,
+ const FrameData& frame_data);
+
+ // State
+ // Get current state of frame
+ VCMFrameBufferStateEnum GetState() const;
+ void PrepareForDecode(bool continuous);
+
+ bool IsSessionComplete() const;
+ bool HaveFirstPacket() const;
+ int NumPackets() const;
+
+ // Sequence numbers
+ // Get lowest packet sequence number in frame
+ int32_t GetLowSeqNum() const;
+ // Get highest packet sequence number in frame
+ int32_t GetHighSeqNum() const;
+
+ int PictureId() const;
+ int TemporalId() const;
+ bool LayerSync() const;
+ int Tl0PicId() const;
+
+ std::vector<NaluInfo> GetNaluInfos() const;
+
+ void SetGofInfo(const GofInfoVP9& gof_info, size_t idx);
+
+ // Increments a counter to keep track of the number of packets of this frame
+ // which were NACKed before they arrived.
+ void IncrementNackCount();
+ // Returns the number of packets of this frame which were NACKed before they
+ // arrived.
+ int16_t GetNackCount() const;
+
+ int64_t LatestPacketTimeMs() const;
+
+ webrtc::VideoFrameType FrameType() const;
+
+ private:
+ void SetState(VCMFrameBufferStateEnum state); // Set state of frame
+
+ VCMFrameBufferStateEnum _state; // Current state of the frame
+ // Set with SetEncodedData, but keep pointer to the concrete class here, to
+ // enable reallocation and mutation.
+ rtc::scoped_refptr<EncodedImageBuffer> encoded_image_buffer_;
+ VCMSessionInfo _sessionInfo;
+ uint16_t _nackCount;
+ int64_t _latestPacketTimeMs;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_FRAME_BUFFER_H_
diff --git a/third_party/libwebrtc/modules/video_coding/frame_buffer2.cc b/third_party/libwebrtc/modules/video_coding/frame_buffer2.cc
new file mode 100644
index 0000000000..a70b143a29
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/frame_buffer2.cc
@@ -0,0 +1,625 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/frame_buffer2.h"
+
+#include <algorithm>
+#include <cstdlib>
+#include <iterator>
+#include <memory>
+#include <queue>
+#include <utility>
+#include <vector>
+
+#include "absl/container/inlined_vector.h"
+#include "api/units/data_size.h"
+#include "api/units/time_delta.h"
+#include "api/video/encoded_image.h"
+#include "api/video/video_timing.h"
+#include "modules/video_coding/frame_helpers.h"
+#include "modules/video_coding/include/video_coding_defines.h"
+#include "modules/video_coding/timing/jitter_estimator.h"
+#include "modules/video_coding/timing/timing.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/experiments/rtt_mult_experiment.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/numerics/sequence_number_util.h"
+#include "rtc_base/trace_event.h"
+#include "system_wrappers/include/clock.h"
+
+namespace webrtc {
+namespace video_coding {
+
+namespace {
+// Max number of frames the buffer will hold.
+constexpr size_t kMaxFramesBuffered = 800;
+
+// Default value for the maximum decode queue size that is used when the
+// low-latency renderer is used.
+constexpr size_t kZeroPlayoutDelayDefaultMaxDecodeQueueSize = 8;
+
+// Max number of decoded frame info that will be saved.
+constexpr int kMaxFramesHistory = 1 << 13;
+
+// The time it's allowed for a frame to be late to its rendering prediction and
+// still be rendered.
+constexpr int kMaxAllowedFrameDelayMs = 5;
+
+constexpr int64_t kLogNonDecodedIntervalMs = 5000;
+} // namespace
+
+FrameBuffer::FrameBuffer(Clock* clock,
+ VCMTiming* timing,
+ const FieldTrialsView& field_trials)
+ : decoded_frames_history_(kMaxFramesHistory),
+ clock_(clock),
+ callback_queue_(nullptr),
+ jitter_estimator_(clock, field_trials),
+ timing_(timing),
+ stopped_(false),
+ protection_mode_(kProtectionNack),
+ last_log_non_decoded_ms_(-kLogNonDecodedIntervalMs),
+ rtt_mult_settings_(RttMultExperiment::GetRttMultValue()),
+ zero_playout_delay_max_decode_queue_size_(
+ "max_decode_queue_size",
+ kZeroPlayoutDelayDefaultMaxDecodeQueueSize) {
+ ParseFieldTrial({&zero_playout_delay_max_decode_queue_size_},
+ field_trials.Lookup("WebRTC-ZeroPlayoutDelay"));
+ callback_checker_.Detach();
+}
+
+FrameBuffer::~FrameBuffer() {
+ RTC_DCHECK_RUN_ON(&construction_checker_);
+}
+
+void FrameBuffer::NextFrame(int64_t max_wait_time_ms,
+ bool keyframe_required,
+ TaskQueueBase* callback_queue,
+ NextFrameCallback handler) {
+ RTC_DCHECK_RUN_ON(&callback_checker_);
+ RTC_DCHECK(callback_queue->IsCurrent());
+ TRACE_EVENT0("webrtc", "FrameBuffer::NextFrame");
+ int64_t latest_return_time_ms =
+ clock_->TimeInMilliseconds() + max_wait_time_ms;
+
+ MutexLock lock(&mutex_);
+ if (stopped_) {
+ return;
+ }
+ latest_return_time_ms_ = latest_return_time_ms;
+ keyframe_required_ = keyframe_required;
+ frame_handler_ = handler;
+ callback_queue_ = callback_queue;
+ StartWaitForNextFrameOnQueue();
+}
+
+void FrameBuffer::StartWaitForNextFrameOnQueue() {
+ RTC_DCHECK(callback_queue_);
+ RTC_DCHECK(!callback_task_.Running());
+ int64_t wait_ms = FindNextFrame(clock_->CurrentTime());
+ callback_task_ = RepeatingTaskHandle::DelayedStart(
+ callback_queue_, TimeDelta::Millis(wait_ms),
+ [this] {
+ RTC_DCHECK_RUN_ON(&callback_checker_);
+ // If this task has not been cancelled, we did not get any new frames
+ // while waiting. Continue with frame delivery.
+ std::unique_ptr<EncodedFrame> frame;
+ NextFrameCallback frame_handler;
+ {
+ MutexLock lock(&mutex_);
+ if (!frames_to_decode_.empty()) {
+ // We have frames, deliver!
+ frame = GetNextFrame();
+ timing_->SetLastDecodeScheduledTimestamp(clock_->CurrentTime());
+ } else if (clock_->TimeInMilliseconds() < latest_return_time_ms_) {
+ // If there's no frames to decode and there is still time left, it
+ // means that the frame buffer was cleared between creation and
+ // execution of this task. Continue waiting for the remaining time.
+ int64_t wait_ms = FindNextFrame(clock_->CurrentTime());
+ return TimeDelta::Millis(wait_ms);
+ }
+ frame_handler = std::move(frame_handler_);
+ CancelCallback();
+ }
+ // Deliver frame, if any. Otherwise signal timeout.
+ frame_handler(std::move(frame));
+ return TimeDelta::Zero(); // Ignored.
+ },
+ TaskQueueBase::DelayPrecision::kHigh);
+}
+
+int64_t FrameBuffer::FindNextFrame(Timestamp now) {
+ int64_t wait_ms = latest_return_time_ms_ - now.ms();
+ frames_to_decode_.clear();
+
+ // `last_continuous_frame_` may be empty below, but nullopt is smaller
+ // than everything else and loop will immediately terminate as expected.
+ for (auto frame_it = frames_.begin();
+ frame_it != frames_.end() && frame_it->first <= last_continuous_frame_;
+ ++frame_it) {
+ if (!frame_it->second.continuous ||
+ frame_it->second.num_missing_decodable > 0) {
+ continue;
+ }
+
+ EncodedFrame* frame = frame_it->second.frame.get();
+
+ if (keyframe_required_ && !frame->is_keyframe())
+ continue;
+
+ auto last_decoded_frame_timestamp =
+ decoded_frames_history_.GetLastDecodedFrameTimestamp();
+
+ // TODO(https://bugs.webrtc.org/9974): consider removing this check
+ // as it may make a stream undecodable after a very long delay between
+ // frames.
+ if (last_decoded_frame_timestamp &&
+ AheadOf(*last_decoded_frame_timestamp, frame->Timestamp())) {
+ continue;
+ }
+
+ // Gather all remaining frames for the same superframe.
+ std::vector<FrameMap::iterator> current_superframe;
+ current_superframe.push_back(frame_it);
+ bool last_layer_completed = frame_it->second.frame->is_last_spatial_layer;
+ FrameMap::iterator next_frame_it = frame_it;
+ while (!last_layer_completed) {
+ ++next_frame_it;
+
+ if (next_frame_it == frames_.end() || !next_frame_it->second.frame) {
+ break;
+ }
+
+ if (next_frame_it->second.frame->Timestamp() != frame->Timestamp() ||
+ !next_frame_it->second.continuous) {
+ break;
+ }
+
+ if (next_frame_it->second.num_missing_decodable > 0) {
+ bool has_inter_layer_dependency = false;
+ for (size_t i = 0; i < EncodedFrame::kMaxFrameReferences &&
+ i < next_frame_it->second.frame->num_references;
+ ++i) {
+ if (next_frame_it->second.frame->references[i] >= frame_it->first) {
+ has_inter_layer_dependency = true;
+ break;
+ }
+ }
+
+ // If the frame has an undecoded dependency that is not within the same
+ // temporal unit then this frame is not yet ready to be decoded. If it
+ // is within the same temporal unit then the not yet decoded dependency
+ // is just a lower spatial frame, which is ok.
+ if (!has_inter_layer_dependency ||
+ next_frame_it->second.num_missing_decodable > 1) {
+ break;
+ }
+ }
+
+ current_superframe.push_back(next_frame_it);
+ last_layer_completed = next_frame_it->second.frame->is_last_spatial_layer;
+ }
+ // Check if the current superframe is complete.
+ // TODO(bugs.webrtc.org/10064): consider returning all available to
+ // decode frames even if the superframe is not complete yet.
+ if (!last_layer_completed) {
+ continue;
+ }
+
+ frames_to_decode_ = std::move(current_superframe);
+
+ absl::optional<Timestamp> render_time = frame->RenderTimestamp();
+ if (!render_time) {
+ render_time = timing_->RenderTime(frame->Timestamp(), now);
+ frame->SetRenderTime(render_time->ms());
+ }
+ bool too_many_frames_queued =
+ frames_.size() > zero_playout_delay_max_decode_queue_size_ ? true
+ : false;
+ wait_ms =
+ timing_->MaxWaitingTime(*render_time, now, too_many_frames_queued).ms();
+
+ // This will cause the frame buffer to prefer high framerate rather
+ // than high resolution in the case of the decoder not decoding fast
+ // enough and the stream has multiple spatial and temporal layers.
+ // For multiple temporal layers it may cause non-base layer frames to be
+ // skipped if they are late.
+ if (wait_ms < -kMaxAllowedFrameDelayMs)
+ continue;
+
+ break;
+ }
+ wait_ms = std::min<int64_t>(wait_ms, latest_return_time_ms_ - now.ms());
+ wait_ms = std::max<int64_t>(wait_ms, 0);
+ return wait_ms;
+}
+
+std::unique_ptr<EncodedFrame> FrameBuffer::GetNextFrame() {
+ RTC_DCHECK_RUN_ON(&callback_checker_);
+ Timestamp now = clock_->CurrentTime();
+ // TODO(ilnik): remove `frames_out` use frames_to_decode_ directly.
+ std::vector<std::unique_ptr<EncodedFrame>> frames_out;
+
+ RTC_DCHECK(!frames_to_decode_.empty());
+ bool superframe_delayed_by_retransmission = false;
+ DataSize superframe_size = DataSize::Zero();
+ const EncodedFrame& first_frame = *frames_to_decode_[0]->second.frame;
+ absl::optional<Timestamp> render_time = first_frame.RenderTimestamp();
+ int64_t receive_time_ms = first_frame.ReceivedTime();
+ // Gracefully handle bad RTP timestamps and render time issues.
+ if (!render_time || FrameHasBadRenderTiming(*render_time, now) ||
+ TargetVideoDelayIsTooLarge(timing_->TargetVideoDelay())) {
+ RTC_LOG(LS_WARNING) << "Resetting jitter estimator and timing module due "
+ "to bad render timing for rtp_timestamp="
+ << first_frame.Timestamp();
+ jitter_estimator_.Reset();
+ timing_->Reset();
+ render_time = timing_->RenderTime(first_frame.Timestamp(), now);
+ }
+
+ for (FrameMap::iterator& frame_it : frames_to_decode_) {
+ RTC_DCHECK(frame_it != frames_.end());
+ std::unique_ptr<EncodedFrame> frame = std::move(frame_it->second.frame);
+
+ frame->SetRenderTime(render_time->ms());
+
+ superframe_delayed_by_retransmission |= frame->delayed_by_retransmission();
+ receive_time_ms = std::max(receive_time_ms, frame->ReceivedTime());
+ superframe_size += DataSize::Bytes(frame->size());
+
+ PropagateDecodability(frame_it->second);
+ decoded_frames_history_.InsertDecoded(frame_it->first, frame->Timestamp());
+
+ frames_.erase(frames_.begin(), ++frame_it);
+
+ frames_out.emplace_back(std::move(frame));
+ }
+
+ if (!superframe_delayed_by_retransmission) {
+ auto frame_delay = inter_frame_delay_.CalculateDelay(
+ first_frame.Timestamp(), Timestamp::Millis(receive_time_ms));
+
+ if (frame_delay) {
+ jitter_estimator_.UpdateEstimate(*frame_delay, superframe_size);
+ }
+
+ float rtt_mult = protection_mode_ == kProtectionNackFEC ? 0.0 : 1.0;
+ absl::optional<TimeDelta> rtt_mult_add_cap_ms = absl::nullopt;
+ if (rtt_mult_settings_.has_value()) {
+ rtt_mult = rtt_mult_settings_->rtt_mult_setting;
+ rtt_mult_add_cap_ms =
+ TimeDelta::Millis(rtt_mult_settings_->rtt_mult_add_cap_ms);
+ }
+ timing_->SetJitterDelay(
+ jitter_estimator_.GetJitterEstimate(rtt_mult, rtt_mult_add_cap_ms));
+ timing_->UpdateCurrentDelay(*render_time, now);
+ } else {
+ if (RttMultExperiment::RttMultEnabled())
+ jitter_estimator_.FrameNacked();
+ }
+
+ if (frames_out.size() == 1) {
+ return std::move(frames_out[0]);
+ } else {
+ return CombineAndDeleteFrames(std::move(frames_out));
+ }
+}
+
+void FrameBuffer::SetProtectionMode(VCMVideoProtection mode) {
+ TRACE_EVENT0("webrtc", "FrameBuffer::SetProtectionMode");
+ MutexLock lock(&mutex_);
+ protection_mode_ = mode;
+}
+
+void FrameBuffer::Stop() {
+ TRACE_EVENT0("webrtc", "FrameBuffer::Stop");
+ MutexLock lock(&mutex_);
+ if (stopped_)
+ return;
+ stopped_ = true;
+
+ CancelCallback();
+}
+
+void FrameBuffer::Clear() {
+ MutexLock lock(&mutex_);
+ ClearFramesAndHistory();
+}
+
+int FrameBuffer::Size() {
+ MutexLock lock(&mutex_);
+ return frames_.size();
+}
+
+void FrameBuffer::UpdateRtt(int64_t rtt_ms) {
+ MutexLock lock(&mutex_);
+ jitter_estimator_.UpdateRtt(TimeDelta::Millis(rtt_ms));
+}
+
+bool FrameBuffer::ValidReferences(const EncodedFrame& frame) const {
+ for (size_t i = 0; i < frame.num_references; ++i) {
+ if (frame.references[i] >= frame.Id())
+ return false;
+
+ for (size_t j = i + 1; j < frame.num_references; ++j) {
+ if (frame.references[i] == frame.references[j])
+ return false;
+ }
+ }
+
+ return true;
+}
+
+void FrameBuffer::CancelCallback() {
+ // Called from the callback queue or from within Stop().
+ frame_handler_ = {};
+ callback_task_.Stop();
+ callback_queue_ = nullptr;
+ callback_checker_.Detach();
+}
+
+int64_t FrameBuffer::InsertFrame(std::unique_ptr<EncodedFrame> frame) {
+ TRACE_EVENT0("webrtc", "FrameBuffer::InsertFrame");
+ RTC_DCHECK(frame);
+
+ MutexLock lock(&mutex_);
+
+ const auto& pis = frame->PacketInfos();
+ int64_t last_continuous_frame_id = last_continuous_frame_.value_or(-1);
+
+ if (!ValidReferences(*frame)) {
+ TRACE_EVENT2("webrtc",
+ "FrameBuffer::InsertFrame Frame dropped (Invalid references)",
+ "remote_ssrc", pis.empty() ? 0 : pis[0].ssrc(), "picture_id",
+ frame->Id());
+ RTC_LOG(LS_WARNING) << "Frame " << frame->Id()
+ << " has invalid frame references, dropping frame.";
+ return last_continuous_frame_id;
+ }
+
+ if (frames_.size() >= kMaxFramesBuffered) {
+ if (frame->is_keyframe()) {
+ TRACE_EVENT2("webrtc",
+ "FrameBuffer::InsertFrame Frames dropped (KF + Full buffer)",
+ "remote_ssrc", pis.empty() ? 0 : pis[0].ssrc(), "picture_id",
+ frame->Id());
+ RTC_LOG(LS_WARNING) << "Inserting keyframe " << frame->Id()
+ << " but buffer is full, clearing"
+ " buffer and inserting the frame.";
+ ClearFramesAndHistory();
+ } else {
+ TRACE_EVENT2("webrtc",
+ "FrameBuffer::InsertFrame Frame dropped (Full buffer)",
+ "remote_ssrc", pis.empty() ? 0 : pis[0].ssrc(), "picture_id",
+ frame->Id());
+ RTC_LOG(LS_WARNING) << "Frame " << frame->Id()
+ << " could not be inserted due to the frame "
+ "buffer being full, dropping frame.";
+ return last_continuous_frame_id;
+ }
+ }
+
+ auto last_decoded_frame = decoded_frames_history_.GetLastDecodedFrameId();
+ auto last_decoded_frame_timestamp =
+ decoded_frames_history_.GetLastDecodedFrameTimestamp();
+ if (last_decoded_frame && frame->Id() <= *last_decoded_frame) {
+ if (AheadOf(frame->Timestamp(), *last_decoded_frame_timestamp) &&
+ frame->is_keyframe()) {
+ // If this frame has a newer timestamp but an earlier frame id then we
+ // assume there has been a jump in the frame id due to some encoder
+ // reconfiguration or some other reason. Even though this is not according
+ // to spec we can still continue to decode from this frame if it is a
+ // keyframe.
+ TRACE_EVENT2("webrtc",
+ "FrameBuffer::InsertFrame Frames dropped (OOO + PicId jump)",
+ "remote_ssrc", pis.empty() ? 0 : pis[0].ssrc(), "picture_id",
+ frame->Id());
+ RTC_LOG(LS_WARNING)
+ << "A jump in frame id was detected, clearing buffer.";
+ ClearFramesAndHistory();
+ last_continuous_frame_id = -1;
+ } else {
+ TRACE_EVENT2("webrtc",
+ "FrameBuffer::InsertFrame Frame dropped (Out of order)",
+ "remote_ssrc", pis.empty() ? 0 : pis[0].ssrc(), "picture_id",
+ frame->Id());
+ RTC_LOG(LS_WARNING) << "Frame " << frame->Id() << " inserted after frame "
+ << *last_decoded_frame
+ << " was handed off for decoding, dropping frame.";
+ return last_continuous_frame_id;
+ }
+ }
+
+ // Test if inserting this frame would cause the order of the frames to become
+ // ambiguous (covering more than half the interval of 2^16). This can happen
+ // when the frame id make large jumps mid stream.
+ if (!frames_.empty() && frame->Id() < frames_.begin()->first &&
+ frames_.rbegin()->first < frame->Id()) {
+ TRACE_EVENT2("webrtc",
+ "FrameBuffer::InsertFrame Frames dropped (PicId big-jump)",
+ "remote_ssrc", pis.empty() ? 0 : pis[0].ssrc(), "picture_id",
+ frame->Id());
+ RTC_LOG(LS_WARNING) << "A jump in frame id was detected, clearing buffer.";
+ ClearFramesAndHistory();
+ last_continuous_frame_id = -1;
+ }
+
+ auto info = frames_.emplace(frame->Id(), FrameInfo()).first;
+
+ if (info->second.frame) {
+ return last_continuous_frame_id;
+ }
+
+ if (!UpdateFrameInfoWithIncomingFrame(*frame, info))
+ return last_continuous_frame_id;
+
+ // If ReceiveTime is negative then it is not a valid timestamp.
+ if (!frame->delayed_by_retransmission() && frame->ReceivedTime() >= 0)
+ timing_->IncomingTimestamp(frame->Timestamp(),
+ Timestamp::Millis(frame->ReceivedTime()));
+
+ // It can happen that a frame will be reported as fully received even if a
+ // lower spatial layer frame is missing.
+ info->second.frame = std::move(frame);
+
+ if (info->second.num_missing_continuous == 0) {
+ info->second.continuous = true;
+ PropagateContinuity(info);
+ last_continuous_frame_id = *last_continuous_frame_;
+
+ // Since we now have new continuous frames there might be a better frame
+ // to return from NextFrame.
+ if (callback_queue_) {
+ callback_queue_->PostTask([this] {
+ MutexLock lock(&mutex_);
+ if (!callback_task_.Running())
+ return;
+ RTC_CHECK(frame_handler_);
+ callback_task_.Stop();
+ StartWaitForNextFrameOnQueue();
+ });
+ }
+ }
+
+ return last_continuous_frame_id;
+}
+
+void FrameBuffer::PropagateContinuity(FrameMap::iterator start) {
+ TRACE_EVENT0("webrtc", "FrameBuffer::PropagateContinuity");
+ RTC_DCHECK(start->second.continuous);
+
+ std::queue<FrameMap::iterator> continuous_frames;
+ continuous_frames.push(start);
+
+ // A simple BFS to traverse continuous frames.
+ while (!continuous_frames.empty()) {
+ auto frame = continuous_frames.front();
+ continuous_frames.pop();
+
+ if (!last_continuous_frame_ || *last_continuous_frame_ < frame->first) {
+ last_continuous_frame_ = frame->first;
+ }
+
+ // Loop through all dependent frames, and if that frame no longer has
+ // any unfulfilled dependencies then that frame is continuous as well.
+ for (size_t d = 0; d < frame->second.dependent_frames.size(); ++d) {
+ auto frame_ref = frames_.find(frame->second.dependent_frames[d]);
+ RTC_DCHECK(frame_ref != frames_.end());
+
+ // TODO(philipel): Look into why we've seen this happen.
+ if (frame_ref != frames_.end()) {
+ --frame_ref->second.num_missing_continuous;
+ if (frame_ref->second.num_missing_continuous == 0) {
+ frame_ref->second.continuous = true;
+ continuous_frames.push(frame_ref);
+ }
+ }
+ }
+ }
+}
+
+void FrameBuffer::PropagateDecodability(const FrameInfo& info) {
+ TRACE_EVENT0("webrtc", "FrameBuffer::PropagateDecodability");
+ for (size_t d = 0; d < info.dependent_frames.size(); ++d) {
+ auto ref_info = frames_.find(info.dependent_frames[d]);
+ RTC_DCHECK(ref_info != frames_.end());
+ // TODO(philipel): Look into why we've seen this happen.
+ if (ref_info != frames_.end()) {
+ RTC_DCHECK_GT(ref_info->second.num_missing_decodable, 0U);
+ --ref_info->second.num_missing_decodable;
+ }
+ }
+}
+
+bool FrameBuffer::UpdateFrameInfoWithIncomingFrame(const EncodedFrame& frame,
+ FrameMap::iterator info) {
+ TRACE_EVENT0("webrtc", "FrameBuffer::UpdateFrameInfoWithIncomingFrame");
+ auto last_decoded_frame = decoded_frames_history_.GetLastDecodedFrameId();
+ RTC_DCHECK(!last_decoded_frame || *last_decoded_frame < info->first);
+
+ // In this function we determine how many missing dependencies this `frame`
+ // has to become continuous/decodable. If a frame that this `frame` depend
+ // on has already been decoded then we can ignore that dependency since it has
+ // already been fulfilled.
+ //
+ // For all other frames we will register a backwards reference to this `frame`
+ // so that `num_missing_continuous` and `num_missing_decodable` can be
+ // decremented as frames become continuous/are decoded.
+ struct Dependency {
+ int64_t frame_id;
+ bool continuous;
+ };
+ std::vector<Dependency> not_yet_fulfilled_dependencies;
+
+ // Find all dependencies that have not yet been fulfilled.
+ for (size_t i = 0; i < frame.num_references; ++i) {
+ // Does `frame` depend on a frame earlier than the last decoded one?
+ if (last_decoded_frame && frame.references[i] <= *last_decoded_frame) {
+ // Was that frame decoded? If not, this `frame` will never become
+ // decodable.
+ if (!decoded_frames_history_.WasDecoded(frame.references[i])) {
+ int64_t now_ms = clock_->TimeInMilliseconds();
+ if (last_log_non_decoded_ms_ + kLogNonDecodedIntervalMs < now_ms) {
+ RTC_LOG(LS_WARNING)
+ << "Frame " << frame.Id()
+ << " depends on a non-decoded frame more previous than the last "
+ "decoded frame, dropping frame.";
+ last_log_non_decoded_ms_ = now_ms;
+ }
+ return false;
+ }
+ } else {
+ auto ref_info = frames_.find(frame.references[i]);
+ bool ref_continuous =
+ ref_info != frames_.end() && ref_info->second.continuous;
+ not_yet_fulfilled_dependencies.push_back(
+ {frame.references[i], ref_continuous});
+ }
+ }
+
+ info->second.num_missing_continuous = not_yet_fulfilled_dependencies.size();
+ info->second.num_missing_decodable = not_yet_fulfilled_dependencies.size();
+
+ for (const Dependency& dep : not_yet_fulfilled_dependencies) {
+ if (dep.continuous)
+ --info->second.num_missing_continuous;
+
+ frames_[dep.frame_id].dependent_frames.push_back(frame.Id());
+ }
+
+ return true;
+}
+
+void FrameBuffer::ClearFramesAndHistory() {
+ TRACE_EVENT0("webrtc", "FrameBuffer::ClearFramesAndHistory");
+ frames_.clear();
+ last_continuous_frame_.reset();
+ frames_to_decode_.clear();
+ decoded_frames_history_.Clear();
+}
+
+// TODO(philipel): Avoid the concatenation of frames here, by replacing
+// NextFrame and GetNextFrame with methods returning multiple frames.
+std::unique_ptr<EncodedFrame> FrameBuffer::CombineAndDeleteFrames(
+ std::vector<std::unique_ptr<EncodedFrame>> frames) const {
+ RTC_DCHECK(!frames.empty());
+ absl::InlinedVector<std::unique_ptr<EncodedFrame>, 4> inlined;
+ for (auto& frame : frames) {
+ inlined.push_back(std::move(frame));
+ }
+ return webrtc::CombineAndDeleteFrames(std::move(inlined));
+}
+
+FrameBuffer::FrameInfo::FrameInfo() = default;
+FrameBuffer::FrameInfo::FrameInfo(FrameInfo&&) = default;
+FrameBuffer::FrameInfo::~FrameInfo() = default;
+
+} // namespace video_coding
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/frame_buffer2.h b/third_party/libwebrtc/modules/video_coding/frame_buffer2.h
new file mode 100644
index 0000000000..1383c40ae3
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/frame_buffer2.h
@@ -0,0 +1,193 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_FRAME_BUFFER2_H_
+#define MODULES_VIDEO_CODING_FRAME_BUFFER2_H_
+
+#include <array>
+#include <map>
+#include <memory>
+#include <utility>
+#include <vector>
+
+#include "absl/container/inlined_vector.h"
+#include "api/field_trials_view.h"
+#include "api/sequence_checker.h"
+#include "api/task_queue/task_queue_base.h"
+#include "api/video/encoded_frame.h"
+#include "modules/video_coding/include/video_coding_defines.h"
+#include "modules/video_coding/timing/inter_frame_delay.h"
+#include "modules/video_coding/timing/jitter_estimator.h"
+#include "modules/video_coding/utility/decoded_frames_history.h"
+#include "rtc_base/event.h"
+#include "rtc_base/experiments/field_trial_parser.h"
+#include "rtc_base/experiments/rtt_mult_experiment.h"
+#include "rtc_base/numerics/sequence_number_util.h"
+#include "rtc_base/synchronization/mutex.h"
+#include "rtc_base/system/no_unique_address.h"
+#include "rtc_base/task_utils/repeating_task.h"
+#include "rtc_base/thread_annotations.h"
+
+namespace webrtc {
+
+class Clock;
+class VCMReceiveStatisticsCallback;
+class JitterEstimator;
+class VCMTiming;
+
+namespace video_coding {
+
+class FrameBuffer {
+ public:
+ FrameBuffer(Clock* clock,
+ VCMTiming* timing,
+ const FieldTrialsView& field_trials);
+
+ FrameBuffer() = delete;
+ FrameBuffer(const FrameBuffer&) = delete;
+ FrameBuffer& operator=(const FrameBuffer&) = delete;
+
+ virtual ~FrameBuffer();
+
+ // Insert a frame into the frame buffer. Returns the picture id
+ // of the last continuous frame or -1 if there is no continuous frame.
+ int64_t InsertFrame(std::unique_ptr<EncodedFrame> frame);
+
+ using NextFrameCallback = std::function<void(std::unique_ptr<EncodedFrame>)>;
+ // Get the next frame for decoding. `handler` is invoked with the next frame
+ // or with nullptr if no frame is ready for decoding after `max_wait_time_ms`.
+ void NextFrame(int64_t max_wait_time_ms,
+ bool keyframe_required,
+ TaskQueueBase* callback_queue,
+ NextFrameCallback handler);
+
+ // Tells the FrameBuffer which protection mode that is in use. Affects
+ // the frame timing.
+ // TODO(philipel): Remove this when new timing calculations has been
+ // implemented.
+ void SetProtectionMode(VCMVideoProtection mode);
+
+ // Stop the frame buffer, causing any sleeping thread in NextFrame to
+ // return immediately.
+ void Stop();
+
+ // Updates the RTT for jitter buffer estimation.
+ void UpdateRtt(int64_t rtt_ms);
+
+ // Clears the FrameBuffer, removing all the buffered frames.
+ void Clear();
+
+ int Size();
+
+ private:
+ struct FrameInfo {
+ FrameInfo();
+ FrameInfo(FrameInfo&&);
+ ~FrameInfo();
+
+ // Which other frames that have direct unfulfilled dependencies
+ // on this frame.
+ absl::InlinedVector<int64_t, 8> dependent_frames;
+
+ // A frame is continiuous if it has all its referenced/indirectly
+ // referenced frames.
+ //
+ // How many unfulfilled frames this frame have until it becomes continuous.
+ size_t num_missing_continuous = 0;
+
+ // A frame is decodable if all its referenced frames have been decoded.
+ //
+ // How many unfulfilled frames this frame have until it becomes decodable.
+ size_t num_missing_decodable = 0;
+
+ // If this frame is continuous or not.
+ bool continuous = false;
+
+ // The actual EncodedFrame.
+ std::unique_ptr<EncodedFrame> frame;
+ };
+
+ using FrameMap = std::map<int64_t, FrameInfo>;
+
+ // Check that the references of `frame` are valid.
+ bool ValidReferences(const EncodedFrame& frame) const;
+
+ int64_t FindNextFrame(Timestamp now) RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+ std::unique_ptr<EncodedFrame> GetNextFrame()
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+
+ void StartWaitForNextFrameOnQueue() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+ void CancelCallback() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+
+ // Update all directly dependent and indirectly dependent frames and mark
+ // them as continuous if all their references has been fulfilled.
+ void PropagateContinuity(FrameMap::iterator start)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+
+ // Marks the frame as decoded and updates all directly dependent frames.
+ void PropagateDecodability(const FrameInfo& info)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+
+ // Update the corresponding FrameInfo of `frame` and all FrameInfos that
+ // `frame` references.
+ // Return false if `frame` will never be decodable, true otherwise.
+ bool UpdateFrameInfoWithIncomingFrame(const EncodedFrame& frame,
+ FrameMap::iterator info)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+
+ void ClearFramesAndHistory() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+
+ // The cleaner solution would be to have the NextFrame function return a
+ // vector of frames, but until the decoding pipeline can support decoding
+ // multiple frames at the same time we combine all frames to one frame and
+ // return it. See bugs.webrtc.org/10064
+ std::unique_ptr<EncodedFrame> CombineAndDeleteFrames(
+ std::vector<std::unique_ptr<EncodedFrame>> frames) const;
+
+ RTC_NO_UNIQUE_ADDRESS SequenceChecker construction_checker_;
+ RTC_NO_UNIQUE_ADDRESS SequenceChecker callback_checker_;
+
+ // Stores only undecoded frames.
+ FrameMap frames_ RTC_GUARDED_BY(mutex_);
+ DecodedFramesHistory decoded_frames_history_ RTC_GUARDED_BY(mutex_);
+
+ Mutex mutex_;
+ Clock* const clock_;
+
+ TaskQueueBase* callback_queue_ RTC_GUARDED_BY(mutex_);
+ RepeatingTaskHandle callback_task_ RTC_GUARDED_BY(mutex_);
+ NextFrameCallback frame_handler_ RTC_GUARDED_BY(mutex_);
+ int64_t latest_return_time_ms_ RTC_GUARDED_BY(mutex_);
+ bool keyframe_required_ RTC_GUARDED_BY(mutex_);
+
+ JitterEstimator jitter_estimator_ RTC_GUARDED_BY(mutex_);
+ VCMTiming* const timing_ RTC_GUARDED_BY(mutex_);
+ InterFrameDelay inter_frame_delay_ RTC_GUARDED_BY(mutex_);
+ absl::optional<int64_t> last_continuous_frame_ RTC_GUARDED_BY(mutex_);
+ std::vector<FrameMap::iterator> frames_to_decode_ RTC_GUARDED_BY(mutex_);
+ bool stopped_ RTC_GUARDED_BY(mutex_);
+ VCMVideoProtection protection_mode_ RTC_GUARDED_BY(mutex_);
+ int64_t last_log_non_decoded_ms_ RTC_GUARDED_BY(mutex_);
+
+ // rtt_mult experiment settings.
+ const absl::optional<RttMultExperiment::Settings> rtt_mult_settings_;
+
+ // Maximum number of frames in the decode queue to allow pacing. If the
+ // queue grows beyond the max limit, pacing will be disabled and frames will
+ // be pushed to the decoder as soon as possible. This only has an effect
+ // when the low-latency rendering path is active, which is indicated by
+ // the frame's render time == 0.
+ FieldTrialParameter<unsigned> zero_playout_delay_max_decode_queue_size_;
+};
+
+} // namespace video_coding
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_FRAME_BUFFER2_H_
diff --git a/third_party/libwebrtc/modules/video_coding/frame_buffer2_unittest.cc b/third_party/libwebrtc/modules/video_coding/frame_buffer2_unittest.cc
new file mode 100644
index 0000000000..0fabd9b496
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/frame_buffer2_unittest.cc
@@ -0,0 +1,665 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/frame_buffer2.h"
+
+#include <algorithm>
+#include <cstring>
+#include <limits>
+#include <memory>
+#include <vector>
+
+#include "api/task_queue/task_queue_base.h"
+#include "api/units/time_delta.h"
+#include "api/units/timestamp.h"
+#include "modules/video_coding/frame_object.h"
+#include "modules/video_coding/timing/jitter_estimator.h"
+#include "modules/video_coding/timing/timing.h"
+#include "rtc_base/numerics/sequence_number_util.h"
+#include "rtc_base/platform_thread.h"
+#include "rtc_base/random.h"
+#include "system_wrappers/include/clock.h"
+#include "test/field_trial.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+#include "test/scoped_key_value_config.h"
+#include "test/time_controller/simulated_time_controller.h"
+
+using ::testing::_;
+using ::testing::IsEmpty;
+using ::testing::Return;
+using ::testing::SizeIs;
+
+namespace webrtc {
+namespace video_coding {
+
+class VCMTimingFake : public VCMTiming {
+ public:
+ explicit VCMTimingFake(Clock* clock, const FieldTrialsView& field_trials)
+ : VCMTiming(clock, field_trials) {}
+
+ Timestamp RenderTime(uint32_t frame_timestamp, Timestamp now) const override {
+ if (last_render_time_.IsMinusInfinity()) {
+ last_render_time_ = now + kDelay;
+ last_timestamp_ = frame_timestamp;
+ }
+
+ auto diff = MinDiff(frame_timestamp, last_timestamp_);
+ auto timeDiff = TimeDelta::Millis(diff / 90);
+ if (AheadOf(frame_timestamp, last_timestamp_))
+ last_render_time_ += timeDiff;
+ else
+ last_render_time_ -= timeDiff;
+
+ last_timestamp_ = frame_timestamp;
+ return last_render_time_;
+ }
+
+ TimeDelta MaxWaitingTime(Timestamp render_time,
+ Timestamp now,
+ bool too_many_frames_queued) const override {
+ return render_time - now - kDecodeTime;
+ }
+
+ TimeDelta GetCurrentJitter() {
+ return VCMTiming::GetTimings().jitter_buffer_delay;
+ }
+
+ private:
+ static constexpr TimeDelta kDelay = TimeDelta::Millis(50);
+ const TimeDelta kDecodeTime = kDelay / 2;
+ mutable uint32_t last_timestamp_ = 0;
+ mutable Timestamp last_render_time_ = Timestamp::MinusInfinity();
+};
+
+class FrameObjectFake : public EncodedFrame {
+ public:
+ int64_t ReceivedTime() const override { return 0; }
+
+ int64_t RenderTime() const override { return _renderTimeMs; }
+
+ bool delayed_by_retransmission() const override {
+ return delayed_by_retransmission_;
+ }
+ void set_delayed_by_retransmission(bool delayed) {
+ delayed_by_retransmission_ = delayed;
+ }
+
+ private:
+ bool delayed_by_retransmission_ = false;
+};
+
+class VCMReceiveStatisticsCallbackMock : public VCMReceiveStatisticsCallback {
+ public:
+ MOCK_METHOD(void,
+ OnCompleteFrame,
+ (bool is_keyframe,
+ size_t size_bytes,
+ VideoContentType content_type),
+ (override));
+ MOCK_METHOD(void, OnDroppedFrames, (uint32_t frames_dropped), (override));
+ MOCK_METHOD(void,
+ OnFrameBufferTimingsUpdated,
+ (int max_decode,
+ int current_delay,
+ int target_delay,
+ int jitter_buffer,
+ int min_playout_delay,
+ int render_delay),
+ (override));
+ MOCK_METHOD(void,
+ OnTimingFrameInfoUpdated,
+ (const TimingFrameInfo& info),
+ (override));
+};
+
+class TestFrameBuffer2 : public ::testing::Test {
+ protected:
+ static constexpr int kMaxReferences = 5;
+ static constexpr int kFps1 = 1000;
+ static constexpr int kFps10 = kFps1 / 10;
+ static constexpr int kFps20 = kFps1 / 20;
+ static constexpr size_t kFrameSize = 10;
+
+ TestFrameBuffer2()
+ : time_controller_(Timestamp::Seconds(0)),
+ time_task_queue_(
+ time_controller_.GetTaskQueueFactory()->CreateTaskQueue(
+ "extract queue",
+ TaskQueueFactory::Priority::NORMAL)),
+ timing_(time_controller_.GetClock(), field_trials_),
+ buffer_(new FrameBuffer(time_controller_.GetClock(),
+ &timing_,
+ field_trials_)),
+ rand_(0x34678213) {}
+
+ template <typename... T>
+ std::unique_ptr<FrameObjectFake> CreateFrame(uint16_t picture_id,
+ uint8_t spatial_layer,
+ int64_t ts_ms,
+ bool last_spatial_layer,
+ size_t frame_size_bytes,
+ T... refs) {
+ static_assert(sizeof...(refs) <= kMaxReferences,
+ "To many references specified for EncodedFrame.");
+ std::array<uint16_t, sizeof...(refs)> references = {
+ {rtc::checked_cast<uint16_t>(refs)...}};
+
+ auto frame = std::make_unique<FrameObjectFake>();
+ frame->SetId(picture_id);
+ frame->SetSpatialIndex(spatial_layer);
+ frame->SetTimestamp(ts_ms * 90);
+ frame->num_references = references.size();
+ frame->is_last_spatial_layer = last_spatial_layer;
+ // Add some data to buffer.
+ frame->SetEncodedData(EncodedImageBuffer::Create(frame_size_bytes));
+ for (size_t r = 0; r < references.size(); ++r)
+ frame->references[r] = references[r];
+ return frame;
+ }
+
+ template <typename... T>
+ int InsertFrame(uint16_t picture_id,
+ uint8_t spatial_layer,
+ int64_t ts_ms,
+ bool last_spatial_layer,
+ size_t frame_size_bytes,
+ T... refs) {
+ return buffer_->InsertFrame(CreateFrame(picture_id, spatial_layer, ts_ms,
+ last_spatial_layer,
+ frame_size_bytes, refs...));
+ }
+
+ int InsertNackedFrame(uint16_t picture_id, int64_t ts_ms) {
+ std::unique_ptr<FrameObjectFake> frame =
+ CreateFrame(picture_id, 0, ts_ms, true, kFrameSize);
+ frame->set_delayed_by_retransmission(true);
+ return buffer_->InsertFrame(std::move(frame));
+ }
+
+ void ExtractFrame(int64_t max_wait_time = 0, bool keyframe_required = false) {
+ time_task_queue_->PostTask([this, max_wait_time, keyframe_required]() {
+ buffer_->NextFrame(max_wait_time, keyframe_required,
+ time_task_queue_.get(),
+ [this](std::unique_ptr<EncodedFrame> frame) {
+ frames_.emplace_back(std::move(frame));
+ });
+ });
+ if (max_wait_time == 0) {
+ time_controller_.AdvanceTime(TimeDelta::Zero());
+ }
+ }
+
+ void CheckFrame(size_t index, int picture_id, int spatial_layer) {
+ ASSERT_LT(index, frames_.size());
+ ASSERT_TRUE(frames_[index]);
+ ASSERT_EQ(picture_id, frames_[index]->Id());
+ ASSERT_EQ(spatial_layer, frames_[index]->SpatialIndex().value_or(0));
+ }
+
+ void CheckFrameSize(size_t index, size_t size) {
+ ASSERT_LT(index, frames_.size());
+ ASSERT_TRUE(frames_[index]);
+ ASSERT_EQ(frames_[index]->size(), size);
+ }
+
+ void CheckNoFrame(size_t index) {
+ ASSERT_LT(index, frames_.size());
+ ASSERT_FALSE(frames_[index]);
+ }
+
+ uint32_t Rand() { return rand_.Rand<uint32_t>(); }
+
+ test::ScopedKeyValueConfig field_trials_;
+ webrtc::GlobalSimulatedTimeController time_controller_;
+ std::unique_ptr<TaskQueueBase, TaskQueueDeleter> time_task_queue_;
+ VCMTimingFake timing_;
+ std::unique_ptr<FrameBuffer> buffer_;
+ std::vector<std::unique_ptr<EncodedFrame>> frames_;
+ Random rand_;
+};
+
+// From https://en.cppreference.com/w/cpp/language/static: "If ... a constexpr
+// static data member (since C++11) is odr-used, a definition at namespace scope
+// is still required... This definition is deprecated for constexpr data members
+// since C++17."
+// kFrameSize is odr-used since it is passed by reference to EXPECT_EQ().
+#if __cplusplus < 201703L
+constexpr size_t TestFrameBuffer2::kFrameSize;
+#endif
+
+TEST_F(TestFrameBuffer2, WaitForFrame) {
+ uint16_t pid = Rand();
+ uint32_t ts = Rand();
+
+ ExtractFrame(50);
+ InsertFrame(pid, 0, ts, true, kFrameSize);
+ time_controller_.AdvanceTime(TimeDelta::Millis(50));
+ CheckFrame(0, pid, 0);
+}
+
+TEST_F(TestFrameBuffer2, ClearWhileWaitingForFrame) {
+ const uint16_t pid = Rand();
+
+ // Insert a frame and wait for it for max 100ms.
+ InsertFrame(pid, 0, 25, true, kFrameSize);
+ ExtractFrame(100);
+ // After 10ms, clear the buffer.
+ time_controller_.AdvanceTime(TimeDelta::Millis(10));
+ buffer_->Clear();
+ // Confirm that the frame was not sent for rendering.
+ time_controller_.AdvanceTime(TimeDelta::Millis(15));
+ EXPECT_THAT(frames_, IsEmpty());
+
+ // We are still waiting for a frame, since 100ms has not passed. Insert a new
+ // frame. This new frame should be the one that is returned as the old frame
+ // was cleared.
+ const uint16_t new_pid = pid + 1;
+ InsertFrame(new_pid, 0, 50, true, kFrameSize);
+ time_controller_.AdvanceTime(TimeDelta::Millis(25));
+ ASSERT_THAT(frames_, SizeIs(1));
+ CheckFrame(0, new_pid, 0);
+}
+
+TEST_F(TestFrameBuffer2, OneSuperFrame) {
+ uint16_t pid = Rand();
+ uint32_t ts = Rand();
+
+ InsertFrame(pid, 0, ts, false, kFrameSize);
+ InsertFrame(pid + 1, 1, ts, true, kFrameSize);
+ ExtractFrame();
+
+ CheckFrame(0, pid, 1);
+}
+
+TEST_F(TestFrameBuffer2, ZeroPlayoutDelay) {
+ test::ScopedKeyValueConfig field_trials;
+ VCMTiming timing(time_controller_.GetClock(), field_trials);
+ buffer_ = std::make_unique<FrameBuffer>(time_controller_.GetClock(), &timing,
+ field_trials);
+ const VideoPlayoutDelay kPlayoutDelayMs = {0, 0};
+ std::unique_ptr<FrameObjectFake> test_frame(new FrameObjectFake());
+ test_frame->SetId(0);
+ test_frame->SetPlayoutDelay(kPlayoutDelayMs);
+ buffer_->InsertFrame(std::move(test_frame));
+ ExtractFrame(0, false);
+ CheckFrame(0, 0, 0);
+ EXPECT_EQ(0, frames_[0]->RenderTimeMs());
+}
+
+// Flaky test, see bugs.webrtc.org/7068.
+TEST_F(TestFrameBuffer2, DISABLED_OneUnorderedSuperFrame) {
+ uint16_t pid = Rand();
+ uint32_t ts = Rand();
+
+ ExtractFrame(50);
+ InsertFrame(pid, 1, ts, true, kFrameSize);
+ InsertFrame(pid, 0, ts, false, kFrameSize);
+ time_controller_.AdvanceTime(TimeDelta::Zero());
+
+ CheckFrame(0, pid, 0);
+ CheckFrame(1, pid, 1);
+}
+
+TEST_F(TestFrameBuffer2, DISABLED_OneLayerStreamReordered) {
+ uint16_t pid = Rand();
+ uint32_t ts = Rand();
+
+ InsertFrame(pid, 0, ts, false, true, kFrameSize);
+ ExtractFrame();
+ CheckFrame(0, pid, 0);
+ for (int i = 1; i < 10; i += 2) {
+ ExtractFrame(50);
+ InsertFrame(pid + i + 1, 0, ts + (i + 1) * kFps10, true, kFrameSize,
+ pid + i);
+ time_controller_.AdvanceTime(TimeDelta::Millis(kFps10));
+ InsertFrame(pid + i, 0, ts + i * kFps10, true, kFrameSize, pid + i - 1);
+ time_controller_.AdvanceTime(TimeDelta::Millis(kFps10));
+ ExtractFrame();
+ CheckFrame(i, pid + i, 0);
+ CheckFrame(i + 1, pid + i + 1, 0);
+ }
+}
+
+TEST_F(TestFrameBuffer2, ExtractFromEmptyBuffer) {
+ ExtractFrame();
+ CheckNoFrame(0);
+}
+
+TEST_F(TestFrameBuffer2, MissingFrame) {
+ uint16_t pid = Rand();
+ uint32_t ts = Rand();
+
+ InsertFrame(pid, 0, ts, true, kFrameSize);
+ InsertFrame(pid + 2, 0, ts, true, kFrameSize, pid);
+ InsertFrame(pid + 3, 0, ts, true, kFrameSize, pid + 1, pid + 2);
+ ExtractFrame();
+ ExtractFrame();
+ ExtractFrame();
+
+ CheckFrame(0, pid, 0);
+ CheckFrame(1, pid + 2, 0);
+ CheckNoFrame(2);
+}
+
+TEST_F(TestFrameBuffer2, OneLayerStream) {
+ uint16_t pid = Rand();
+ uint32_t ts = Rand();
+
+ InsertFrame(pid, 0, ts, true, kFrameSize);
+ ExtractFrame();
+ CheckFrame(0, pid, 0);
+ for (int i = 1; i < 10; ++i) {
+ InsertFrame(pid + i, 0, ts + i * kFps10, true, kFrameSize, pid + i - 1);
+ ExtractFrame();
+ time_controller_.AdvanceTime(TimeDelta::Millis(kFps10));
+ CheckFrame(i, pid + i, 0);
+ }
+}
+
+TEST_F(TestFrameBuffer2, DropTemporalLayerSlowDecoder) {
+ uint16_t pid = Rand();
+ uint32_t ts = Rand();
+
+ InsertFrame(pid, 0, ts, true, kFrameSize);
+ InsertFrame(pid + 1, 0, ts + kFps20, true, kFrameSize, pid);
+ for (int i = 2; i < 10; i += 2) {
+ uint32_t ts_tl0 = ts + i / 2 * kFps10;
+ InsertFrame(pid + i, 0, ts_tl0, true, kFrameSize, pid + i - 2);
+ InsertFrame(pid + i + 1, 0, ts_tl0 + kFps20, true, kFrameSize, pid + i,
+ pid + i - 1);
+ }
+
+ for (int i = 0; i < 10; ++i) {
+ ExtractFrame();
+ time_controller_.AdvanceTime(TimeDelta::Millis(70));
+ }
+
+ CheckFrame(0, pid, 0);
+ CheckFrame(1, pid + 1, 0);
+ CheckFrame(2, pid + 2, 0);
+ CheckFrame(3, pid + 4, 0);
+ CheckFrame(4, pid + 6, 0);
+ CheckFrame(5, pid + 8, 0);
+ CheckNoFrame(6);
+ CheckNoFrame(7);
+ CheckNoFrame(8);
+ CheckNoFrame(9);
+}
+
+TEST_F(TestFrameBuffer2, DropFramesIfSystemIsStalled) {
+ uint16_t pid = Rand();
+ uint32_t ts = Rand();
+
+ InsertFrame(pid, 0, ts, true, kFrameSize);
+ InsertFrame(pid + 1, 0, ts + 1 * kFps10, true, kFrameSize, pid);
+ InsertFrame(pid + 2, 0, ts + 2 * kFps10, true, kFrameSize, pid + 1);
+ InsertFrame(pid + 3, 0, ts + 3 * kFps10, true, kFrameSize);
+
+ ExtractFrame();
+ // Jump forward in time, simulating the system being stalled for some reason.
+ time_controller_.AdvanceTime(TimeDelta::Millis(3) * kFps10);
+ // Extract one more frame, expect second and third frame to be dropped.
+ ExtractFrame();
+
+ CheckFrame(0, pid + 0, 0);
+ CheckFrame(1, pid + 3, 0);
+}
+
+TEST_F(TestFrameBuffer2, DroppedFramesCountedOnClear) {
+ uint16_t pid = Rand();
+ uint32_t ts = Rand();
+
+ InsertFrame(pid, 0, ts, true, kFrameSize);
+ for (int i = 1; i < 5; ++i) {
+ InsertFrame(pid + i, 0, ts + i * kFps10, true, kFrameSize, pid + i - 1);
+ }
+
+ // All frames should be dropped when Clear is called.
+ buffer_->Clear();
+}
+
+TEST_F(TestFrameBuffer2, InsertLateFrame) {
+ uint16_t pid = Rand();
+ uint32_t ts = Rand();
+
+ InsertFrame(pid, 0, ts, true, kFrameSize);
+ ExtractFrame();
+ InsertFrame(pid + 2, 0, ts, true, kFrameSize);
+ ExtractFrame();
+ InsertFrame(pid + 1, 0, ts, true, kFrameSize, pid);
+ ExtractFrame();
+
+ CheckFrame(0, pid, 0);
+ CheckFrame(1, pid + 2, 0);
+ CheckNoFrame(2);
+}
+
+TEST_F(TestFrameBuffer2, ProtectionModeNackFEC) {
+ uint16_t pid = Rand();
+ uint32_t ts = Rand();
+ constexpr int64_t kRttMs = 200;
+ buffer_->UpdateRtt(kRttMs);
+
+ // Jitter estimate unaffected by RTT in this protection mode.
+ buffer_->SetProtectionMode(kProtectionNackFEC);
+ InsertNackedFrame(pid, ts);
+ InsertNackedFrame(pid + 1, ts + 100);
+ InsertNackedFrame(pid + 2, ts + 200);
+ InsertFrame(pid + 3, 0, ts + 300, true, kFrameSize);
+ ExtractFrame();
+ ExtractFrame();
+ ExtractFrame();
+ ExtractFrame();
+ ASSERT_EQ(4u, frames_.size());
+ EXPECT_LT(timing_.GetCurrentJitter().ms(), kRttMs);
+}
+
+TEST_F(TestFrameBuffer2, NoContinuousFrame) {
+ uint16_t pid = Rand();
+ uint32_t ts = Rand();
+
+ EXPECT_EQ(-1, InsertFrame(pid + 1, 0, ts, true, kFrameSize, pid));
+}
+
+TEST_F(TestFrameBuffer2, LastContinuousFrameSingleLayer) {
+ uint16_t pid = Rand();
+ uint32_t ts = Rand();
+
+ EXPECT_EQ(pid, InsertFrame(pid, 0, ts, true, kFrameSize));
+ EXPECT_EQ(pid, InsertFrame(pid + 2, 0, ts, true, kFrameSize, pid + 1));
+ EXPECT_EQ(pid + 2, InsertFrame(pid + 1, 0, ts, true, kFrameSize, pid));
+ EXPECT_EQ(pid + 2, InsertFrame(pid + 4, 0, ts, true, kFrameSize, pid + 3));
+ EXPECT_EQ(pid + 5, InsertFrame(pid + 5, 0, ts, true, kFrameSize));
+}
+
+TEST_F(TestFrameBuffer2, LastContinuousFrameTwoLayers) {
+ uint16_t pid = Rand();
+ uint32_t ts = Rand();
+
+ EXPECT_EQ(pid, InsertFrame(pid, 0, ts, false, kFrameSize));
+ EXPECT_EQ(pid + 1, InsertFrame(pid + 1, 1, ts, true, kFrameSize));
+ EXPECT_EQ(pid + 1,
+ InsertFrame(pid + 3, 1, ts, true, kFrameSize, pid + 1, pid + 2));
+ EXPECT_EQ(pid + 1, InsertFrame(pid + 4, 0, ts, false, kFrameSize, pid + 2));
+ EXPECT_EQ(pid + 1,
+ InsertFrame(pid + 5, 1, ts, true, kFrameSize, pid + 3, pid + 4));
+ EXPECT_EQ(pid + 1, InsertFrame(pid + 6, 0, ts, false, kFrameSize, pid + 4));
+ EXPECT_EQ(pid + 6, InsertFrame(pid + 2, 0, ts, false, kFrameSize, pid));
+ EXPECT_EQ(pid + 7,
+ InsertFrame(pid + 7, 1, ts, true, kFrameSize, pid + 5, pid + 6));
+}
+
+TEST_F(TestFrameBuffer2, PictureIdJumpBack) {
+ uint16_t pid = Rand();
+ uint32_t ts = Rand();
+
+ EXPECT_EQ(pid, InsertFrame(pid, 0, ts, true, kFrameSize));
+ EXPECT_EQ(pid + 1, InsertFrame(pid + 1, 0, ts + 1, true, kFrameSize, pid));
+ ExtractFrame();
+ CheckFrame(0, pid, 0);
+
+ // Jump back in pid but increase ts.
+ EXPECT_EQ(pid - 1, InsertFrame(pid - 1, 0, ts + 2, true, kFrameSize));
+ ExtractFrame();
+ ExtractFrame();
+ CheckFrame(1, pid - 1, 0);
+ CheckNoFrame(2);
+}
+
+TEST_F(TestFrameBuffer2, ForwardJumps) {
+ EXPECT_EQ(5453, InsertFrame(5453, 0, 1, true, kFrameSize));
+ ExtractFrame();
+ EXPECT_EQ(5454, InsertFrame(5454, 0, 1, true, kFrameSize, 5453));
+ ExtractFrame();
+ EXPECT_EQ(15670, InsertFrame(15670, 0, 1, true, kFrameSize));
+ ExtractFrame();
+ EXPECT_EQ(29804, InsertFrame(29804, 0, 1, true, kFrameSize));
+ ExtractFrame();
+ EXPECT_EQ(29805, InsertFrame(29805, 0, 1, true, kFrameSize, 29804));
+ ExtractFrame();
+ EXPECT_EQ(29806, InsertFrame(29806, 0, 1, true, kFrameSize, 29805));
+ ExtractFrame();
+ EXPECT_EQ(33819, InsertFrame(33819, 0, 1, true, kFrameSize));
+ ExtractFrame();
+ EXPECT_EQ(41248, InsertFrame(41248, 0, 1, true, kFrameSize));
+ ExtractFrame();
+}
+
+TEST_F(TestFrameBuffer2, DuplicateFrames) {
+ EXPECT_EQ(22256, InsertFrame(22256, 0, 1, true, kFrameSize));
+ ExtractFrame();
+ EXPECT_EQ(22256, InsertFrame(22256, 0, 1, true, kFrameSize));
+}
+
+// TODO(philipel): implement more unittests related to invalid references.
+TEST_F(TestFrameBuffer2, InvalidReferences) {
+ EXPECT_EQ(-1, InsertFrame(0, 0, 1000, true, kFrameSize, 2));
+ EXPECT_EQ(1, InsertFrame(1, 0, 2000, true, kFrameSize));
+ ExtractFrame();
+ EXPECT_EQ(2, InsertFrame(2, 0, 3000, true, kFrameSize, 1));
+}
+
+TEST_F(TestFrameBuffer2, KeyframeRequired) {
+ EXPECT_EQ(1, InsertFrame(1, 0, 1000, true, kFrameSize));
+ EXPECT_EQ(2, InsertFrame(2, 0, 2000, true, kFrameSize, 1));
+ EXPECT_EQ(3, InsertFrame(3, 0, 3000, true, kFrameSize));
+ ExtractFrame();
+ ExtractFrame(0, true);
+ ExtractFrame();
+
+ CheckFrame(0, 1, 0);
+ CheckFrame(1, 3, 0);
+ CheckNoFrame(2);
+}
+
+TEST_F(TestFrameBuffer2, KeyframeClearsFullBuffer) {
+ const int kMaxBufferSize = 600;
+
+ for (int i = 1; i <= kMaxBufferSize; ++i)
+ EXPECT_EQ(-1, InsertFrame(i, 0, i * 1000, true, kFrameSize, i - 1));
+ ExtractFrame();
+ CheckNoFrame(0);
+
+ EXPECT_EQ(kMaxBufferSize + 1,
+ InsertFrame(kMaxBufferSize + 1, 0, (kMaxBufferSize + 1) * 1000,
+ true, kFrameSize));
+ ExtractFrame();
+ CheckFrame(1, kMaxBufferSize + 1, 0);
+}
+
+TEST_F(TestFrameBuffer2, DontUpdateOnUndecodableFrame) {
+ InsertFrame(1, 0, 0, true, kFrameSize);
+ ExtractFrame(0, true);
+ InsertFrame(3, 0, 0, true, kFrameSize, 2, 0);
+ InsertFrame(3, 0, 0, true, kFrameSize, 0);
+ InsertFrame(2, 0, 0, true, kFrameSize);
+ ExtractFrame(0, true);
+ ExtractFrame(0, true);
+}
+
+TEST_F(TestFrameBuffer2, DontDecodeOlderTimestamp) {
+ InsertFrame(2, 0, 1, true, kFrameSize);
+ InsertFrame(1, 0, 2, true,
+ kFrameSize); // Older picture id but newer timestamp.
+ ExtractFrame(0);
+ ExtractFrame(0);
+ CheckFrame(0, 1, 0);
+ CheckNoFrame(1);
+
+ InsertFrame(3, 0, 4, true, kFrameSize);
+ InsertFrame(4, 0, 3, true,
+ kFrameSize); // Newer picture id but older timestamp.
+ ExtractFrame(0);
+ ExtractFrame(0);
+ CheckFrame(2, 3, 0);
+ CheckNoFrame(3);
+}
+
+TEST_F(TestFrameBuffer2, CombineFramesToSuperframe) {
+ uint16_t pid = Rand();
+ uint32_t ts = Rand();
+
+ InsertFrame(pid, 0, ts, false, kFrameSize);
+ InsertFrame(pid + 1, 1, ts, true, 2 * kFrameSize, pid);
+ ExtractFrame(0);
+ ExtractFrame(0);
+ CheckFrame(0, pid, 1);
+ CheckNoFrame(1);
+ // Two frames should be combined and returned together.
+ CheckFrameSize(0, 3 * kFrameSize);
+
+ EXPECT_EQ(frames_[0]->SpatialIndex(), 1);
+ EXPECT_EQ(frames_[0]->SpatialLayerFrameSize(0), kFrameSize);
+ EXPECT_EQ(frames_[0]->SpatialLayerFrameSize(1), 2 * kFrameSize);
+}
+
+TEST_F(TestFrameBuffer2, HigherSpatialLayerNonDecodable) {
+ uint16_t pid = Rand();
+ uint32_t ts = Rand();
+
+ InsertFrame(pid, 0, ts, false, kFrameSize);
+ InsertFrame(pid + 1, 1, ts, true, kFrameSize, pid);
+
+ ExtractFrame(0);
+ CheckFrame(0, pid, 1);
+
+ InsertFrame(pid + 3, 1, ts + kFps20, true, kFrameSize, pid);
+ InsertFrame(pid + 4, 0, ts + kFps10, false, kFrameSize, pid);
+ InsertFrame(pid + 5, 1, ts + kFps10, true, kFrameSize, pid + 3, pid + 4);
+
+ time_controller_.AdvanceTime(TimeDelta::Millis(1000));
+ // Frame pid+3 is decodable but too late.
+ // In superframe pid+4 is decodable, but frame pid+5 is not.
+ // Incorrect implementation might skip pid+2 frame and output undecodable
+ // pid+5 instead.
+ ExtractFrame();
+ ExtractFrame();
+ CheckFrame(1, pid + 3, 1);
+ CheckFrame(2, pid + 4, 1);
+}
+
+TEST_F(TestFrameBuffer2, StopWhileWaitingForFrame) {
+ uint16_t pid = Rand();
+ uint32_t ts = Rand();
+
+ InsertFrame(pid, 0, ts, true, kFrameSize);
+ ExtractFrame(10);
+ buffer_->Stop();
+ time_controller_.AdvanceTime(TimeDelta::Millis(10));
+ EXPECT_THAT(frames_, IsEmpty());
+
+ // A new frame request should exit immediately and return no new frame.
+ ExtractFrame(0);
+ EXPECT_THAT(frames_, IsEmpty());
+}
+
+} // namespace video_coding
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/frame_dependencies_calculator.cc b/third_party/libwebrtc/modules/video_coding/frame_dependencies_calculator.cc
new file mode 100644
index 0000000000..7ca59f779a
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/frame_dependencies_calculator.cc
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include "modules/video_coding/frame_dependencies_calculator.h"
+
+#include <stdint.h>
+
+#include <iterator>
+#include <set>
+
+#include "absl/algorithm/container.h"
+#include "absl/container/inlined_vector.h"
+#include "api/array_view.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+
+absl::InlinedVector<int64_t, 5> FrameDependenciesCalculator::FromBuffersUsage(
+ int64_t frame_id,
+ rtc::ArrayView<const CodecBufferUsage> buffers_usage) {
+ absl::InlinedVector<int64_t, 5> dependencies;
+ RTC_DCHECK_GT(buffers_usage.size(), 0);
+ for (const CodecBufferUsage& buffer_usage : buffers_usage) {
+ RTC_CHECK_GE(buffer_usage.id, 0);
+ if (buffers_.size() <= static_cast<size_t>(buffer_usage.id)) {
+ buffers_.resize(buffer_usage.id + 1);
+ }
+ }
+ std::set<int64_t> direct_depenendencies;
+ std::set<int64_t> indirect_depenendencies;
+
+ for (const CodecBufferUsage& buffer_usage : buffers_usage) {
+ if (!buffer_usage.referenced) {
+ continue;
+ }
+ const BufferUsage& buffer = buffers_[buffer_usage.id];
+ if (buffer.frame_id == absl::nullopt) {
+ RTC_LOG(LS_ERROR) << "Odd configuration: frame " << frame_id
+ << " references buffer #" << buffer_usage.id
+ << " that was never updated.";
+ continue;
+ }
+ direct_depenendencies.insert(*buffer.frame_id);
+ indirect_depenendencies.insert(buffer.dependencies.begin(),
+ buffer.dependencies.end());
+ }
+ // Reduce references: if frame #3 depends on frame #2 and #1, and frame #2
+ // depends on frame #1, then frame #3 needs to depend just on frame #2.
+ // Though this set diff removes only 1 level of indirection, it seems
+ // enough for all currently used structures.
+ absl::c_set_difference(direct_depenendencies, indirect_depenendencies,
+ std::back_inserter(dependencies));
+
+ // Update buffers.
+ for (const CodecBufferUsage& buffer_usage : buffers_usage) {
+ if (!buffer_usage.updated) {
+ continue;
+ }
+ BufferUsage& buffer = buffers_[buffer_usage.id];
+ buffer.frame_id = frame_id;
+ buffer.dependencies.assign(direct_depenendencies.begin(),
+ direct_depenendencies.end());
+ }
+
+ return dependencies;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/frame_dependencies_calculator.h b/third_party/libwebrtc/modules/video_coding/frame_dependencies_calculator.h
new file mode 100644
index 0000000000..2c4a8502e1
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/frame_dependencies_calculator.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_FRAME_DEPENDENCIES_CALCULATOR_H_
+#define MODULES_VIDEO_CODING_FRAME_DEPENDENCIES_CALCULATOR_H_
+
+#include <stdint.h>
+
+#include <vector>
+
+#include "absl/container/inlined_vector.h"
+#include "absl/types/optional.h"
+#include "api/array_view.h"
+#include "common_video/generic_frame_descriptor/generic_frame_info.h"
+
+namespace webrtc {
+
+// This class is thread compatible.
+class FrameDependenciesCalculator {
+ public:
+ FrameDependenciesCalculator() = default;
+ FrameDependenciesCalculator(const FrameDependenciesCalculator&) = default;
+ FrameDependenciesCalculator& operator=(const FrameDependenciesCalculator&) =
+ default;
+
+ // Calculates frame dependencies based on previous encoder buffer usage.
+ absl::InlinedVector<int64_t, 5> FromBuffersUsage(
+ int64_t frame_id,
+ rtc::ArrayView<const CodecBufferUsage> buffers_usage);
+
+ private:
+ struct BufferUsage {
+ absl::optional<int64_t> frame_id;
+ absl::InlinedVector<int64_t, 4> dependencies;
+ };
+
+ absl::InlinedVector<BufferUsage, 4> buffers_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_FRAME_DEPENDENCIES_CALCULATOR_H_
diff --git a/third_party/libwebrtc/modules/video_coding/frame_dependencies_calculator_gn/moz.build b/third_party/libwebrtc/modules/video_coding/frame_dependencies_calculator_gn/moz.build
new file mode 100644
index 0000000000..c518044c94
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/frame_dependencies_calculator_gn/moz.build
@@ -0,0 +1,225 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+DEFINES["WEBRTC_STRICT_FIELD_TRIALS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "!/third_party/libwebrtc/gen",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/video_coding/frame_dependencies_calculator.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "mips32":
+
+ DEFINES["MIPS32_LE"] = True
+ DEFINES["MIPS_FPU_LE"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "mips64":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("frame_dependencies_calculator_gn")
diff --git a/third_party/libwebrtc/modules/video_coding/frame_dependencies_calculator_unittest.cc b/third_party/libwebrtc/modules/video_coding/frame_dependencies_calculator_unittest.cc
new file mode 100644
index 0000000000..a09650401a
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/frame_dependencies_calculator_unittest.cc
@@ -0,0 +1,121 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/frame_dependencies_calculator.h"
+
+#include "common_video/generic_frame_descriptor/generic_frame_info.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+
+using ::testing::ElementsAre;
+using ::testing::IsEmpty;
+using ::testing::UnorderedElementsAre;
+
+constexpr CodecBufferUsage ReferenceAndUpdate(int id) {
+ return CodecBufferUsage(id, /*referenced=*/true, /*updated=*/true);
+}
+constexpr CodecBufferUsage Reference(int id) {
+ return CodecBufferUsage(id, /*referenced=*/true, /*updated=*/false);
+}
+constexpr CodecBufferUsage Update(int id) {
+ return CodecBufferUsage(id, /*referenced=*/false, /*updated=*/true);
+}
+
+TEST(FrameDependenciesCalculatorTest, SingleLayer) {
+ CodecBufferUsage pattern[] = {ReferenceAndUpdate(0)};
+ FrameDependenciesCalculator calculator;
+
+ EXPECT_THAT(calculator.FromBuffersUsage(/*frame_id=*/1, pattern), IsEmpty());
+ EXPECT_THAT(calculator.FromBuffersUsage(/*frame_id=*/3, pattern),
+ ElementsAre(1));
+ EXPECT_THAT(calculator.FromBuffersUsage(/*frame_id=*/6, pattern),
+ ElementsAre(3));
+}
+
+TEST(FrameDependenciesCalculatorTest, TwoTemporalLayers) {
+ // Shortened 4-frame pattern:
+ // T1: 2---4 6---8 ...
+ // / / / /
+ // T0: 1---3---5---7 ...
+ CodecBufferUsage pattern0[] = {ReferenceAndUpdate(0)};
+ CodecBufferUsage pattern1[] = {Reference(0), Update(1)};
+ CodecBufferUsage pattern2[] = {ReferenceAndUpdate(0)};
+ CodecBufferUsage pattern3[] = {Reference(0), Reference(1)};
+ FrameDependenciesCalculator calculator;
+
+ EXPECT_THAT(calculator.FromBuffersUsage(/*frame_id=*/1, pattern0), IsEmpty());
+ EXPECT_THAT(calculator.FromBuffersUsage(/*frame_id=*/2, pattern1),
+ ElementsAre(1));
+ EXPECT_THAT(calculator.FromBuffersUsage(/*frame_id=*/3, pattern2),
+ ElementsAre(1));
+ EXPECT_THAT(calculator.FromBuffersUsage(/*frame_id=*/4, pattern3),
+ UnorderedElementsAre(2, 3));
+ EXPECT_THAT(calculator.FromBuffersUsage(/*frame_id=*/5, pattern0),
+ ElementsAre(3));
+ EXPECT_THAT(calculator.FromBuffersUsage(/*frame_id=*/6, pattern1),
+ ElementsAre(5));
+ EXPECT_THAT(calculator.FromBuffersUsage(/*frame_id=*/7, pattern2),
+ ElementsAre(5));
+ EXPECT_THAT(calculator.FromBuffersUsage(/*frame_id=*/8, pattern3),
+ UnorderedElementsAre(6, 7));
+}
+
+TEST(FrameDependenciesCalculatorTest, ThreeTemporalLayers4FramePattern) {
+ // T2: 2---4 6---8 ...
+ // / / / /
+ // T1: | 3 | 7 ...
+ // /_/ /_/
+ // T0: 1-------5----- ...
+ CodecBufferUsage pattern0[] = {ReferenceAndUpdate(0)};
+ CodecBufferUsage pattern1[] = {Reference(0), Update(2)};
+ CodecBufferUsage pattern2[] = {Reference(0), Update(1)};
+ CodecBufferUsage pattern3[] = {Reference(0), Reference(1), Reference(2)};
+ FrameDependenciesCalculator calculator;
+
+ EXPECT_THAT(calculator.FromBuffersUsage(/*frame_id=*/1, pattern0), IsEmpty());
+ EXPECT_THAT(calculator.FromBuffersUsage(/*frame_id=*/2, pattern1),
+ ElementsAre(1));
+ EXPECT_THAT(calculator.FromBuffersUsage(/*frame_id=*/3, pattern2),
+ ElementsAre(1));
+ // Note that frame#4 references buffer#0 that is updated by frame#1,
+ // yet there is no direct dependency from frame#4 to frame#1.
+ EXPECT_THAT(calculator.FromBuffersUsage(/*frame_id=*/4, pattern3),
+ UnorderedElementsAre(2, 3));
+ EXPECT_THAT(calculator.FromBuffersUsage(/*frame_id=*/5, pattern0),
+ ElementsAre(1));
+ EXPECT_THAT(calculator.FromBuffersUsage(/*frame_id=*/6, pattern1),
+ ElementsAre(5));
+}
+
+TEST(FrameDependenciesCalculatorTest, SimulcastWith2Layers) {
+ // S1: 2---4---6- ...
+ //
+ // S0: 1---3---5- ...
+ CodecBufferUsage pattern0[] = {ReferenceAndUpdate(0)};
+ CodecBufferUsage pattern1[] = {ReferenceAndUpdate(1)};
+ FrameDependenciesCalculator calculator;
+
+ EXPECT_THAT(calculator.FromBuffersUsage(/*frame_id=*/1, pattern0), IsEmpty());
+ EXPECT_THAT(calculator.FromBuffersUsage(/*frame_id=*/2, pattern1), IsEmpty());
+ EXPECT_THAT(calculator.FromBuffersUsage(/*frame_id=*/3, pattern0),
+ ElementsAre(1));
+ EXPECT_THAT(calculator.FromBuffersUsage(/*frame_id=*/4, pattern1),
+ ElementsAre(2));
+ EXPECT_THAT(calculator.FromBuffersUsage(/*frame_id=*/5, pattern0),
+ ElementsAre(3));
+ EXPECT_THAT(calculator.FromBuffersUsage(/*frame_id=*/6, pattern1),
+ ElementsAre(4));
+}
+
+} // namespace
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/frame_helpers.cc b/third_party/libwebrtc/modules/video_coding/frame_helpers.cc
new file mode 100644
index 0000000000..e25eac8a18
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/frame_helpers.cc
@@ -0,0 +1,96 @@
+/*
+ * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/frame_helpers.h"
+
+#include <utility>
+
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+
+namespace {
+constexpr TimeDelta kMaxVideoDelay = TimeDelta::Millis(10000);
+}
+
+bool FrameHasBadRenderTiming(Timestamp render_time, Timestamp now) {
+ // Zero render time means render immediately.
+ if (render_time.IsZero()) {
+ return false;
+ }
+ if (render_time < Timestamp::Zero()) {
+ return true;
+ }
+ TimeDelta frame_delay = render_time - now;
+ if (frame_delay.Abs() > kMaxVideoDelay) {
+ RTC_LOG(LS_WARNING) << "Frame has bad render timing because it is out of "
+ "the delay bounds (frame_delay_ms="
+ << frame_delay.ms()
+ << ", kMaxVideoDelay_ms=" << kMaxVideoDelay.ms() << ")";
+ return true;
+ }
+ return false;
+}
+
+bool TargetVideoDelayIsTooLarge(TimeDelta target_video_delay) {
+ if (target_video_delay > kMaxVideoDelay) {
+ RTC_LOG(LS_WARNING)
+ << "Target video delay is too large. (target_video_delay_ms="
+ << target_video_delay.ms()
+ << ", kMaxVideoDelay_ms=" << kMaxVideoDelay.ms() << ")";
+ return true;
+ }
+ return false;
+}
+
+std::unique_ptr<EncodedFrame> CombineAndDeleteFrames(
+ absl::InlinedVector<std::unique_ptr<EncodedFrame>, 4> frames) {
+ RTC_DCHECK(!frames.empty());
+
+ if (frames.size() == 1) {
+ return std::move(frames[0]);
+ }
+
+ size_t total_length = 0;
+ for (const auto& frame : frames) {
+ total_length += frame->size();
+ }
+ const EncodedFrame& last_frame = *frames.back();
+ std::unique_ptr<EncodedFrame> first_frame = std::move(frames[0]);
+ auto encoded_image_buffer = EncodedImageBuffer::Create(total_length);
+ uint8_t* buffer = encoded_image_buffer->data();
+ first_frame->SetSpatialLayerFrameSize(first_frame->SpatialIndex().value_or(0),
+ first_frame->size());
+ memcpy(buffer, first_frame->data(), first_frame->size());
+ buffer += first_frame->size();
+
+ // Spatial index of combined frame is set equal to spatial index of its top
+ // spatial layer.
+ first_frame->SetSpatialIndex(last_frame.SpatialIndex().value_or(0));
+
+ first_frame->video_timing_mutable()->network2_timestamp_ms =
+ last_frame.video_timing().network2_timestamp_ms;
+ first_frame->video_timing_mutable()->receive_finish_ms =
+ last_frame.video_timing().receive_finish_ms;
+
+ // Append all remaining frames to the first one.
+ for (size_t i = 1; i < frames.size(); ++i) {
+ // Let |next_frame| fall out of scope so it is deleted after copying.
+ std::unique_ptr<EncodedFrame> next_frame = std::move(frames[i]);
+ first_frame->SetSpatialLayerFrameSize(
+ next_frame->SpatialIndex().value_or(0), next_frame->size());
+ memcpy(buffer, next_frame->data(), next_frame->size());
+ buffer += next_frame->size();
+ }
+ first_frame->SetEncodedData(encoded_image_buffer);
+ return first_frame;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/frame_helpers.h b/third_party/libwebrtc/modules/video_coding/frame_helpers.h
new file mode 100644
index 0000000000..56ee593678
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/frame_helpers.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_FRAME_HELPERS_H_
+#define MODULES_VIDEO_CODING_FRAME_HELPERS_H_
+
+#include <memory>
+
+#include "absl/container/inlined_vector.h"
+#include "api/video/encoded_frame.h"
+
+namespace webrtc {
+
+bool FrameHasBadRenderTiming(Timestamp render_time, Timestamp now);
+
+bool TargetVideoDelayIsTooLarge(TimeDelta target_video_delay);
+
+std::unique_ptr<EncodedFrame> CombineAndDeleteFrames(
+ absl::InlinedVector<std::unique_ptr<EncodedFrame>, 4> frames);
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_FRAME_HELPERS_H_
diff --git a/third_party/libwebrtc/modules/video_coding/frame_helpers_gn/moz.build b/third_party/libwebrtc/modules/video_coding/frame_helpers_gn/moz.build
new file mode 100644
index 0000000000..d6342c48f7
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/frame_helpers_gn/moz.build
@@ -0,0 +1,232 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+DEFINES["WEBRTC_STRICT_FIELD_TRIALS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "!/third_party/libwebrtc/gen",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/video_coding/frame_helpers.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "crypt32",
+ "iphlpapi",
+ "secur32",
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "mips32":
+
+ DEFINES["MIPS32_LE"] = True
+ DEFINES["MIPS_FPU_LE"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "mips64":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("frame_helpers_gn")
diff --git a/third_party/libwebrtc/modules/video_coding/frame_helpers_unittest.cc b/third_party/libwebrtc/modules/video_coding/frame_helpers_unittest.cc
new file mode 100644
index 0000000000..1f73689c0a
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/frame_helpers_unittest.cc
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/frame_helpers.h"
+
+#include "api/units/timestamp.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+
+TEST(FrameHasBadRenderTimingTest, LargePositiveFrameDelayIsBad) {
+ Timestamp render_time = Timestamp::Seconds(12);
+ Timestamp now = Timestamp::Seconds(0);
+
+ EXPECT_TRUE(FrameHasBadRenderTiming(render_time, now));
+}
+
+TEST(FrameHasBadRenderTimingTest, LargeNegativeFrameDelayIsBad) {
+ Timestamp render_time = Timestamp::Seconds(12);
+ Timestamp now = Timestamp::Seconds(24);
+
+ EXPECT_TRUE(FrameHasBadRenderTiming(render_time, now));
+}
+
+} // namespace
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/frame_object.cc b/third_party/libwebrtc/modules/video_coding/frame_object.cc
new file mode 100644
index 0000000000..d226dcd013
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/frame_object.cc
@@ -0,0 +1,131 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/frame_object.h"
+
+#include <string.h>
+
+#include <utility>
+
+#include "api/video/encoded_image.h"
+#include "api/video/video_timing.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+RtpFrameObject::RtpFrameObject(
+ uint16_t first_seq_num,
+ uint16_t last_seq_num,
+ bool markerBit,
+ int times_nacked,
+ int64_t first_packet_received_time,
+ int64_t last_packet_received_time,
+ uint32_t rtp_timestamp,
+ int64_t ntp_time_ms,
+ const VideoSendTiming& timing,
+ uint8_t payload_type,
+ VideoCodecType codec,
+ VideoRotation rotation,
+ VideoContentType content_type,
+ const RTPVideoHeader& video_header,
+ const absl::optional<webrtc::ColorSpace>& color_space,
+ RtpPacketInfos packet_infos,
+ rtc::scoped_refptr<EncodedImageBuffer> image_buffer)
+ : image_buffer_(image_buffer),
+ first_seq_num_(first_seq_num),
+ last_seq_num_(last_seq_num),
+ last_packet_received_time_(last_packet_received_time),
+ times_nacked_(times_nacked) {
+ rtp_video_header_ = video_header;
+
+ // EncodedFrame members
+ codec_type_ = codec;
+
+ // TODO(philipel): Remove when encoded image is replaced by EncodedFrame.
+ // VCMEncodedFrame members
+ CopyCodecSpecific(&rtp_video_header_);
+ _payloadType = payload_type;
+ SetTimestamp(rtp_timestamp);
+ ntp_time_ms_ = ntp_time_ms;
+ _frameType = rtp_video_header_.frame_type;
+
+ // Setting frame's playout delays to the same values
+ // as of the first packet's.
+ SetPlayoutDelay(rtp_video_header_.playout_delay);
+
+ SetEncodedData(image_buffer_);
+ _encodedWidth = rtp_video_header_.width;
+ _encodedHeight = rtp_video_header_.height;
+
+ // EncodedFrame members
+ SetPacketInfos(std::move(packet_infos));
+
+ rotation_ = rotation;
+ SetColorSpace(color_space);
+ SetVideoFrameTrackingId(rtp_video_header_.video_frame_tracking_id);
+ content_type_ = content_type;
+ if (timing.flags != VideoSendTiming::kInvalid) {
+ // ntp_time_ms_ may be -1 if not estimated yet. This is not a problem,
+ // as this will be dealt with at the time of reporting.
+ timing_.encode_start_ms = ntp_time_ms_ + timing.encode_start_delta_ms;
+ timing_.encode_finish_ms = ntp_time_ms_ + timing.encode_finish_delta_ms;
+ timing_.packetization_finish_ms =
+ ntp_time_ms_ + timing.packetization_finish_delta_ms;
+ timing_.pacer_exit_ms = ntp_time_ms_ + timing.pacer_exit_delta_ms;
+ timing_.network_timestamp_ms =
+ ntp_time_ms_ + timing.network_timestamp_delta_ms;
+ timing_.network2_timestamp_ms =
+ ntp_time_ms_ + timing.network2_timestamp_delta_ms;
+ }
+ timing_.receive_start_ms = first_packet_received_time;
+ timing_.receive_finish_ms = last_packet_received_time;
+ timing_.flags = timing.flags;
+ is_last_spatial_layer = markerBit;
+}
+
+RtpFrameObject::~RtpFrameObject() {
+}
+
+uint16_t RtpFrameObject::first_seq_num() const {
+ return first_seq_num_;
+}
+
+uint16_t RtpFrameObject::last_seq_num() const {
+ return last_seq_num_;
+}
+
+int RtpFrameObject::times_nacked() const {
+ return times_nacked_;
+}
+
+VideoFrameType RtpFrameObject::frame_type() const {
+ return rtp_video_header_.frame_type;
+}
+
+VideoCodecType RtpFrameObject::codec_type() const {
+ return codec_type_;
+}
+
+int64_t RtpFrameObject::ReceivedTime() const {
+ return last_packet_received_time_;
+}
+
+int64_t RtpFrameObject::RenderTime() const {
+ return _renderTimeMs;
+}
+
+bool RtpFrameObject::delayed_by_retransmission() const {
+ return times_nacked() > 0;
+}
+
+const RTPVideoHeader& RtpFrameObject::GetRtpVideoHeader() const {
+ return rtp_video_header_;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/frame_object.h b/third_party/libwebrtc/modules/video_coding/frame_object.h
new file mode 100644
index 0000000000..c6f069f241
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/frame_object.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_FRAME_OBJECT_H_
+#define MODULES_VIDEO_CODING_FRAME_OBJECT_H_
+
+#include "absl/types/optional.h"
+#include "api/video/encoded_frame.h"
+
+namespace webrtc {
+
+class RtpFrameObject : public EncodedFrame {
+ public:
+ RtpFrameObject(uint16_t first_seq_num,
+ uint16_t last_seq_num,
+ bool markerBit,
+ int times_nacked,
+ int64_t first_packet_received_time,
+ int64_t last_packet_received_time,
+ uint32_t rtp_timestamp,
+ int64_t ntp_time_ms,
+ const VideoSendTiming& timing,
+ uint8_t payload_type,
+ VideoCodecType codec,
+ VideoRotation rotation,
+ VideoContentType content_type,
+ const RTPVideoHeader& video_header,
+ const absl::optional<webrtc::ColorSpace>& color_space,
+ RtpPacketInfos packet_infos,
+ rtc::scoped_refptr<EncodedImageBuffer> image_buffer);
+
+ ~RtpFrameObject() override;
+ uint16_t first_seq_num() const;
+ uint16_t last_seq_num() const;
+ int times_nacked() const;
+ VideoFrameType frame_type() const;
+ VideoCodecType codec_type() const;
+ int64_t ReceivedTime() const override;
+ int64_t RenderTime() const override;
+ bool delayed_by_retransmission() const override;
+ const RTPVideoHeader& GetRtpVideoHeader() const;
+
+ uint8_t* mutable_data() { return image_buffer_->data(); }
+
+ private:
+ // Reference for mutable access.
+ rtc::scoped_refptr<EncodedImageBuffer> image_buffer_;
+ RTPVideoHeader rtp_video_header_;
+ VideoCodecType codec_type_;
+ uint16_t first_seq_num_;
+ uint16_t last_seq_num_;
+ int64_t last_packet_received_time_;
+
+ // Equal to times nacked of the packet with the highet times nacked
+ // belonging to this frame.
+ int times_nacked_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_FRAME_OBJECT_H_
diff --git a/third_party/libwebrtc/modules/video_coding/g3doc/index.md b/third_party/libwebrtc/modules/video_coding/g3doc/index.md
new file mode 100644
index 0000000000..fdf39982fa
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/g3doc/index.md
@@ -0,0 +1,177 @@
+<!-- go/cmark -->
+<!--* freshness: {owner: 'brandtr' reviewed: '2021-04-15'} *-->
+
+# Video coding in WebRTC
+
+## Introduction to layered video coding
+
+[Video coding][video-coding-wiki] is the process of encoding a stream of
+uncompressed video frames into a compressed bitstream, whose bitrate is lower
+than that of the original stream.
+
+### Block-based hybrid video coding
+
+All video codecs in WebRTC are based on the block-based hybrid video coding
+paradigm, which entails prediction of the original video frame using either
+[information from previously encoded frames][motion-compensation-wiki] or
+information from previously encoded portions of the current frame, subtraction
+of the prediction from the original video, and
+[transform][transform-coding-wiki] and [quantization][quantization-wiki] of the
+resulting difference. The output of the quantization process, quantized
+transform coefficients, is losslessly [entropy coded][entropy-coding-wiki] along
+with other encoder parameters (e.g., those related to the prediction process)
+and then a reconstruction is constructed by inverse quantizing and inverse
+transforming the quantized transform coefficients and adding the result to the
+prediction. Finally, in-loop filtering is applied and the resulting
+reconstruction is stored as a reference frame to be used to develop predictions
+for future frames.
+
+### Frame types
+
+When an encoded frame depends on previously encoded frames (i.e., it has one or
+more inter-frame dependencies), the prior frames must be available at the
+receiver before the current frame can be decoded. In order for a receiver to
+start decoding an encoded bitstream, a frame which has no prior dependencies is
+required. Such a frame is called a "key frame". For real-time-communications
+encoding, key frames typically compress less efficiently than "delta frames"
+(i.e., frames whose predictions are derived from previously encoded frames).
+
+### Single-layer coding
+
+In 1:1 calls, the encoded bitstream has a single recipient. Using end-to-end
+bandwidth estimation, the target bitrate can thus be well tailored for the
+intended recipient. The number of key frames can be kept to a minimum and the
+compressability of the stream can be maximized. One way of achiving this is by
+using "single-layer coding", where each delta frame only depends on the frame
+that was most recently encoded.
+
+### Scalable video coding
+
+In multiway conferences, on the other hand, the encoded bitstream has multiple
+recipients each of whom may have different downlink bandwidths. In order to
+tailor the encoded bitstreams to a heterogeneous network of receivers,
+[scalable video coding][svc-wiki] can be used. The idea is to introduce
+structure into the dependency graph of the encoded bitstream, such that _layers_ of
+the full stream can be decoded using only available lower layers. This structure
+allows for a [selective forwarding unit][sfu-webrtc-glossary] to discard upper
+layers of the of the bitstream in order to achieve the intended downlink
+bandwidth.
+
+There are multiple types of scalability:
+
+* _Temporal scalability_ are layers whose framerate (and bitrate) is lower than that of the upper layer(s)
+* _Spatial scalability_ are layers whose resolution (and bitrate) is lower than that of the upper layer(s)
+* _Quality scalability_ are layers whose bitrate is lower than that of the upper layer(s)
+
+WebRTC supports temporal scalability for `VP8`, `VP9` and `AV1`, and spatial
+scalability for `VP9` and `AV1`.
+
+### Simulcast
+
+Simulcast is another approach for multiway conferencing, where multiple
+_independent_ bitstreams are produced by the encoder.
+
+In cases where multiple encodings of the same source are required (e.g., uplink
+transmission in a multiway call), spatial scalability with inter-layer
+prediction generally offers superior coding efficiency compared with simulcast.
+When a single encoding is required (e.g., downlink transmission in any call),
+simulcast generally provides better coding efficiency for the upper spatial
+layers. The `K-SVC` concept, where spatial inter-layer dependencies are only
+used to encode key frames, for which inter-layer prediction is typically
+significantly more effective than it is for delta frames, can be seen as a
+compromise between full spatial scalability and simulcast.
+
+## Overview of implementation in `modules/video_coding`
+
+Given the general introduction to video coding above, we now describe some
+specifics of the [`modules/video_coding`][modules-video-coding] folder in WebRTC.
+
+### Built-in software codecs in [`modules/video_coding/codecs`][modules-video-coding-codecs]
+
+This folder contains WebRTC-specific classes that wrap software codec
+implementations for different video coding standards:
+
+* [libaom][libaom-src] for [AV1][av1-spec]
+* [libvpx][libvpx-src] for [VP8][vp8-spec] and [VP9][vp9-spec]
+* [OpenH264][openh264-src] for [H.264 constrained baseline profile][h264-spec]
+
+Users of the library can also inject their own codecs, using the
+[VideoEncoderFactory][video-encoder-factory-interface] and
+[VideoDecoderFactory][video-decoder-factory-interface] interfaces. This is how
+platform-supported codecs, such as hardware backed codecs, are implemented.
+
+### Video codec test framework in [`modules/video_coding/codecs/test`][modules-video-coding-codecs-test]
+
+This folder contains a test framework that can be used to evaluate video quality
+performance of different video codec implementations.
+
+### SVC helper classes in [`modules/video_coding/svc`][modules-video-coding-svc]
+
+* [`ScalabilityStructure*`][scalabilitystructure] - different
+ [standardized scalability structures][scalability-structure-spec]
+* [`ScalableVideoController`][scalablevideocontroller] - provides instructions to the video encoder how
+ to create a scalable stream
+* [`SvcRateAllocator`][svcrateallocator] - bitrate allocation to different spatial and temporal
+ layers
+
+### Utility classes in [`modules/video_coding/utility`][modules-video-coding-utility]
+
+* [`FrameDropper`][framedropper] - drops incoming frames when encoder systematically
+ overshoots its target bitrate
+* [`FramerateController`][frameratecontroller] - drops incoming frames to achieve a target framerate
+* [`QpParser`][qpparser] - parses the quantization parameter from a bitstream
+* [`QualityScaler`][qualityscaler] - signals when an encoder generates encoded frames whose
+ quantization parameter is outside the window of acceptable values
+* [`SimulcastRateAllocator`][simulcastrateallocator] - bitrate allocation to simulcast layers
+
+### General helper classes in [`modules/video_coding`][modules-video-coding]
+
+* [`FecControllerDefault`][feccontrollerdefault] - provides a default implementation for rate
+ allocation to [forward error correction][fec-wiki]
+* [`VideoCodecInitializer`][videocodecinitializer] - converts between different encoder configuration
+ structs
+
+### Receiver buffer classes in [`modules/video_coding`][modules-video-coding]
+
+* [`PacketBuffer`][packetbuffer] - (re-)combines RTP packets into frames
+* [`RtpFrameReferenceFinder`][rtpframereferencefinder] - determines dependencies between frames based on information in the RTP header, payload header and RTP extensions
+* [`FrameBuffer`][framebuffer] - order frames based on their dependencies to be fed to the decoder
+
+[video-coding-wiki]: https://en.wikipedia.org/wiki/Video_coding_format
+[motion-compensation-wiki]: https://en.wikipedia.org/wiki/Motion_compensation
+[transform-coding-wiki]: https://en.wikipedia.org/wiki/Transform_coding
+[motion-vector-wiki]: https://en.wikipedia.org/wiki/Motion_vector
+[mpeg-wiki]: https://en.wikipedia.org/wiki/Moving_Picture_Experts_Group
+[svc-wiki]: https://en.wikipedia.org/wiki/Scalable_Video_Coding
+[sfu-webrtc-glossary]: https://webrtcglossary.com/sfu/
+[libvpx-src]: https://chromium.googlesource.com/webm/libvpx/
+[libaom-src]: https://aomedia.googlesource.com/aom/
+[openh264-src]: https://github.com/cisco/openh264
+[vp8-spec]: https://tools.ietf.org/html/rfc6386
+[vp9-spec]: https://storage.googleapis.com/downloads.webmproject.org/docs/vp9/vp9-bitstream-specification-v0.6-20160331-draft.pdf
+[av1-spec]: https://aomediacodec.github.io/av1-spec/
+[h264-spec]: https://www.itu.int/rec/T-REC-H.264-201906-I/en
+[video-encoder-factory-interface]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/api/video_codecs/video_encoder_factory.h;l=27;drc=afadfb24a5e608da6ae102b20b0add53a083dcf3
+[video-decoder-factory-interface]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/api/video_codecs/video_decoder_factory.h;l=27;drc=49c293f03d8f593aa3aca282577fcb14daa63207
+[scalability-structure-spec]: https://w3c.github.io/webrtc-svc/#scalabilitymodes*
+[fec-wiki]: https://en.wikipedia.org/wiki/Error_correction_code#Forward_error_correction
+[entropy-coding-wiki]: https://en.wikipedia.org/wiki/Entropy_encoding
+[modules-video-coding]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/video_coding/
+[modules-video-coding-codecs]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/video_coding/codecs/
+[modules-video-coding-codecs-test]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/video_coding/codecs/test/
+[modules-video-coding-svc]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/video_coding/svc/
+[modules-video-coding-utility]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/video_coding/utility/
+[scalabilitystructure]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/video_coding/svc/create_scalability_structure.h?q=CreateScalabilityStructure
+[scalablevideocontroller]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/video_coding/svc/scalable_video_controller.h?q=ScalableVideoController
+[svcrateallocator]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/video_coding/svc/svc_rate_allocator.h?q=SvcRateAllocator
+[framedropper]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/video_coding/utility/frame_dropper.h?q=FrameDropper
+[frameratecontroller]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/video_coding/utility/framerate_controller.h?q=FramerateController
+[qpparser]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/video_coding/utility/qp_parser.h?q=QpParser
+[qualityscaler]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/video_coding/utility/quality_scaler.h?q=QualityScaler
+[simulcastrateallocator]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/video_coding/utility/simulcast_rate_allocator.h?q=SimulcastRateAllocator
+[feccontrollerdefault]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/video_coding/fec_controller_default.h?q=FecControllerDefault
+[videocodecinitializer]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/video_coding/include/video_codec_initializer.h?q=VideoCodecInitializer
+[packetbuffer]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/video_coding/packet_buffer.h?q=PacketBuffer
+[rtpframereferencefinder]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/video_coding/rtp_frame_reference_finder.h?q=RtpFrameReferenceFinder
+[framebuffer]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/video_coding/frame_buffer2.h?q=FrameBuffer
+[quantization-wiki]: https://en.wikipedia.org/wiki/Quantization_(signal_processing)
diff --git a/third_party/libwebrtc/modules/video_coding/generic_decoder.cc b/third_party/libwebrtc/modules/video_coding/generic_decoder.cc
new file mode 100644
index 0000000000..54467d1477
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/generic_decoder.cc
@@ -0,0 +1,325 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/generic_decoder.h"
+
+#include <stddef.h>
+
+#include <algorithm>
+#include <cmath>
+#include <iterator>
+#include <utility>
+
+#include "absl/algorithm/container.h"
+#include "absl/types/optional.h"
+#include "api/video/video_timing.h"
+#include "api/video_codecs/video_decoder.h"
+#include "modules/include/module_common_types_public.h"
+#include "modules/video_coding/include/video_error_codes.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/trace_event.h"
+#include "system_wrappers/include/clock.h"
+
+namespace webrtc {
+
+namespace {
+
+// Changed from 10 to 30 in Mozilla Bug 989944: Increase decode
+// timestamp map to handle delayed decode on 8x10. The map is
+// now a deque (as of libwebrtc upstream commit 1c51ec4d74).
+constexpr size_t kDecoderFrameMemoryLength = 30;
+
+}
+
+VCMDecodedFrameCallback::VCMDecodedFrameCallback(
+ VCMTiming* timing,
+ Clock* clock,
+ const FieldTrialsView& field_trials)
+ : _clock(clock), _timing(timing) {
+ ntp_offset_ =
+ _clock->CurrentNtpInMilliseconds() - _clock->TimeInMilliseconds();
+}
+
+VCMDecodedFrameCallback::~VCMDecodedFrameCallback() {}
+
+void VCMDecodedFrameCallback::SetUserReceiveCallback(
+ VCMReceiveCallback* receiveCallback) {
+ RTC_DCHECK(construction_thread_.IsCurrent());
+ RTC_DCHECK((!_receiveCallback && receiveCallback) ||
+ (_receiveCallback && !receiveCallback));
+ _receiveCallback = receiveCallback;
+}
+
+VCMReceiveCallback* VCMDecodedFrameCallback::UserReceiveCallback() {
+ // Called on the decode thread via VCMCodecDataBase::GetDecoder.
+ // The callback must always have been set before this happens.
+ RTC_DCHECK(_receiveCallback);
+ return _receiveCallback;
+}
+
+int32_t VCMDecodedFrameCallback::Decoded(VideoFrame& decodedImage) {
+ // This function may be called on the decode TaskQueue, but may also be called
+ // on an OS provided queue such as on iOS (see e.g. b/153465112).
+ return Decoded(decodedImage, -1);
+}
+
+int32_t VCMDecodedFrameCallback::Decoded(VideoFrame& decodedImage,
+ int64_t decode_time_ms) {
+ Decoded(decodedImage,
+ decode_time_ms >= 0 ? absl::optional<int32_t>(decode_time_ms)
+ : absl::nullopt,
+ absl::nullopt);
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+std::pair<absl::optional<FrameInfo>, size_t>
+VCMDecodedFrameCallback::FindFrameInfo(uint32_t rtp_timestamp) {
+ absl::optional<FrameInfo> frame_info;
+
+ auto it = absl::c_find_if(frame_infos_, [rtp_timestamp](const auto& entry) {
+ return entry.rtp_timestamp == rtp_timestamp ||
+ IsNewerTimestamp(entry.rtp_timestamp, rtp_timestamp);
+ });
+ size_t dropped_frames = std::distance(frame_infos_.begin(), it);
+
+ if (it != frame_infos_.end() && it->rtp_timestamp == rtp_timestamp) {
+ // Frame was found and should also be removed from the queue.
+ frame_info = std::move(*it);
+ ++it;
+ }
+
+ frame_infos_.erase(frame_infos_.begin(), it);
+ return std::make_pair(std::move(frame_info), dropped_frames);
+}
+
+void VCMDecodedFrameCallback::Decoded(VideoFrame& decodedImage,
+ absl::optional<int32_t> decode_time_ms,
+ absl::optional<uint8_t> qp) {
+ RTC_DCHECK(_receiveCallback) << "Callback must not be null at this point";
+ TRACE_EVENT_INSTANT1("webrtc", "VCMDecodedFrameCallback::Decoded",
+ "timestamp", decodedImage.timestamp());
+ // TODO(holmer): We should improve this so that we can handle multiple
+ // callbacks from one call to Decode().
+ absl::optional<FrameInfo> frame_info;
+ int timestamp_map_size = 0;
+ int dropped_frames = 0;
+ {
+ MutexLock lock(&lock_);
+ std::tie(frame_info, dropped_frames) =
+ FindFrameInfo(decodedImage.timestamp());
+ timestamp_map_size = frame_infos_.size();
+ }
+ if (dropped_frames > 0) {
+ _receiveCallback->OnDroppedFrames(dropped_frames);
+ }
+
+ if (!frame_info) {
+ RTC_LOG(LS_WARNING) << "Too many frames backed up in the decoder, dropping "
+ "frame with timestamp "
+ << decodedImage.timestamp();
+ return;
+ }
+
+ decodedImage.set_ntp_time_ms(frame_info->ntp_time_ms);
+ decodedImage.set_packet_infos(frame_info->packet_infos);
+ decodedImage.set_rotation(frame_info->rotation);
+ VideoFrame::RenderParameters render_parameters = _timing->RenderParameters();
+ if (render_parameters.max_composition_delay_in_frames) {
+ // Subtract frames that are in flight.
+ render_parameters.max_composition_delay_in_frames =
+ std::max(0, *render_parameters.max_composition_delay_in_frames -
+ timestamp_map_size);
+ }
+ decodedImage.set_render_parameters(render_parameters);
+
+ RTC_DCHECK(frame_info->decode_start);
+ const Timestamp now = _clock->CurrentTime();
+ const TimeDelta decode_time = decode_time_ms
+ ? TimeDelta::Millis(*decode_time_ms)
+ : now - *frame_info->decode_start;
+ _timing->StopDecodeTimer(decode_time, now);
+ decodedImage.set_processing_time(
+ {*frame_info->decode_start, *frame_info->decode_start + decode_time});
+
+ // Report timing information.
+ TimingFrameInfo timing_frame_info;
+ if (frame_info->timing.flags != VideoSendTiming::kInvalid) {
+ int64_t capture_time_ms = decodedImage.ntp_time_ms() - ntp_offset_;
+ // Convert remote timestamps to local time from ntp timestamps.
+ frame_info->timing.encode_start_ms -= ntp_offset_;
+ frame_info->timing.encode_finish_ms -= ntp_offset_;
+ frame_info->timing.packetization_finish_ms -= ntp_offset_;
+ frame_info->timing.pacer_exit_ms -= ntp_offset_;
+ frame_info->timing.network_timestamp_ms -= ntp_offset_;
+ frame_info->timing.network2_timestamp_ms -= ntp_offset_;
+
+ int64_t sender_delta_ms = 0;
+ if (decodedImage.ntp_time_ms() < 0) {
+ // Sender clock is not estimated yet. Make sure that sender times are all
+ // negative to indicate that. Yet they still should be relatively correct.
+ sender_delta_ms =
+ std::max({capture_time_ms, frame_info->timing.encode_start_ms,
+ frame_info->timing.encode_finish_ms,
+ frame_info->timing.packetization_finish_ms,
+ frame_info->timing.pacer_exit_ms,
+ frame_info->timing.network_timestamp_ms,
+ frame_info->timing.network2_timestamp_ms}) +
+ 1;
+ }
+
+ timing_frame_info.capture_time_ms = capture_time_ms - sender_delta_ms;
+ timing_frame_info.encode_start_ms =
+ frame_info->timing.encode_start_ms - sender_delta_ms;
+ timing_frame_info.encode_finish_ms =
+ frame_info->timing.encode_finish_ms - sender_delta_ms;
+ timing_frame_info.packetization_finish_ms =
+ frame_info->timing.packetization_finish_ms - sender_delta_ms;
+ timing_frame_info.pacer_exit_ms =
+ frame_info->timing.pacer_exit_ms - sender_delta_ms;
+ timing_frame_info.network_timestamp_ms =
+ frame_info->timing.network_timestamp_ms - sender_delta_ms;
+ timing_frame_info.network2_timestamp_ms =
+ frame_info->timing.network2_timestamp_ms - sender_delta_ms;
+ }
+
+ timing_frame_info.flags = frame_info->timing.flags;
+ timing_frame_info.decode_start_ms = frame_info->decode_start->ms();
+ timing_frame_info.decode_finish_ms = now.ms();
+ timing_frame_info.render_time_ms =
+ frame_info->render_time ? frame_info->render_time->ms() : -1;
+ timing_frame_info.rtp_timestamp = decodedImage.timestamp();
+ timing_frame_info.receive_start_ms = frame_info->timing.receive_start_ms;
+ timing_frame_info.receive_finish_ms = frame_info->timing.receive_finish_ms;
+ _timing->SetTimingFrameInfo(timing_frame_info);
+
+ decodedImage.set_timestamp_us(
+ frame_info->render_time ? frame_info->render_time->us() : -1);
+ _receiveCallback->FrameToRender(decodedImage, qp, decode_time,
+ frame_info->content_type);
+}
+
+void VCMDecodedFrameCallback::OnDecoderInfoChanged(
+ const VideoDecoder::DecoderInfo& decoder_info) {
+ _receiveCallback->OnDecoderInfoChanged(decoder_info);
+}
+
+void VCMDecodedFrameCallback::Map(FrameInfo frameInfo) {
+ int dropped_frames = 0;
+ {
+ MutexLock lock(&lock_);
+ int initial_size = frame_infos_.size();
+ if (initial_size == kDecoderFrameMemoryLength) {
+ frame_infos_.pop_front();
+ dropped_frames = 1;
+ }
+ frame_infos_.push_back(std::move(frameInfo));
+ // If no frame is dropped, the new size should be `initial_size` + 1
+ }
+ if (dropped_frames > 0) {
+ _receiveCallback->OnDroppedFrames(dropped_frames);
+ }
+}
+
+void VCMDecodedFrameCallback::ClearTimestampMap() {
+ int dropped_frames = 0;
+ {
+ MutexLock lock(&lock_);
+ dropped_frames = frame_infos_.size();
+ frame_infos_.clear();
+ }
+ if (dropped_frames > 0) {
+ _receiveCallback->OnDroppedFrames(dropped_frames);
+ }
+}
+
+VCMGenericDecoder::VCMGenericDecoder(VideoDecoder* decoder)
+ : _callback(NULL),
+ decoder_(decoder),
+ _last_keyframe_content_type(VideoContentType::UNSPECIFIED) {
+ RTC_DCHECK(decoder_);
+}
+
+VCMGenericDecoder::~VCMGenericDecoder() {
+ decoder_->Release();
+}
+
+bool VCMGenericDecoder::Configure(const VideoDecoder::Settings& settings) {
+ TRACE_EVENT0("webrtc", "VCMGenericDecoder::Configure");
+
+ bool ok = decoder_->Configure(settings);
+ decoder_info_ = decoder_->GetDecoderInfo();
+ RTC_LOG(LS_INFO) << "Decoder implementation: " << decoder_info_.ToString();
+ if (_callback) {
+ _callback->OnDecoderInfoChanged(decoder_info_);
+ }
+ return ok;
+}
+
+int32_t VCMGenericDecoder::Decode(const VCMEncodedFrame& frame, Timestamp now) {
+ TRACE_EVENT1("webrtc", "VCMGenericDecoder::Decode", "timestamp",
+ frame.Timestamp());
+ FrameInfo frame_info;
+ frame_info.rtp_timestamp = frame.Timestamp();
+ frame_info.decode_start = now;
+ frame_info.render_time =
+ frame.RenderTimeMs() >= 0
+ ? absl::make_optional(Timestamp::Millis(frame.RenderTimeMs()))
+ : absl::nullopt;
+ frame_info.rotation = frame.rotation();
+ frame_info.timing = frame.video_timing();
+ frame_info.ntp_time_ms = frame.EncodedImage().ntp_time_ms_;
+ frame_info.packet_infos = frame.PacketInfos();
+
+ // Set correctly only for key frames. Thus, use latest key frame
+ // content type. If the corresponding key frame was lost, decode will fail
+ // and content type will be ignored.
+ if (frame.FrameType() == VideoFrameType::kVideoFrameKey) {
+ frame_info.content_type = frame.contentType();
+ _last_keyframe_content_type = frame.contentType();
+ } else {
+ frame_info.content_type = _last_keyframe_content_type;
+ }
+ _callback->Map(std::move(frame_info));
+
+ int32_t ret = decoder_->Decode(frame.EncodedImage(), frame.MissingFrame(),
+ frame.RenderTimeMs());
+ VideoDecoder::DecoderInfo decoder_info = decoder_->GetDecoderInfo();
+ if (decoder_info != decoder_info_) {
+ RTC_LOG(LS_INFO) << "Changed decoder implementation to: "
+ << decoder_info.ToString();
+ decoder_info_ = decoder_info;
+ if (decoder_info.implementation_name.empty()) {
+ decoder_info.implementation_name = "unknown";
+ }
+ _callback->OnDecoderInfoChanged(std::move(decoder_info));
+ }
+ if (ret < WEBRTC_VIDEO_CODEC_OK) {
+ RTC_LOG(LS_WARNING) << "Failed to decode frame with timestamp "
+ << frame.Timestamp() << ", error code: " << ret;
+ _callback->ClearTimestampMap();
+ } else if (ret == WEBRTC_VIDEO_CODEC_NO_OUTPUT) {
+ // No output.
+ _callback->ClearTimestampMap();
+ }
+ return ret;
+}
+
+int32_t VCMGenericDecoder::RegisterDecodeCompleteCallback(
+ VCMDecodedFrameCallback* callback) {
+ _callback = callback;
+ int32_t ret = decoder_->RegisterDecodeCompleteCallback(callback);
+ if (callback && !decoder_info_.implementation_name.empty()) {
+ callback->OnDecoderInfoChanged(decoder_info_);
+ }
+ return ret;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/generic_decoder.h b/third_party/libwebrtc/modules/video_coding/generic_decoder.h
new file mode 100644
index 0000000000..7dc6d34c01
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/generic_decoder.h
@@ -0,0 +1,124 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_GENERIC_DECODER_H_
+#define MODULES_VIDEO_CODING_GENERIC_DECODER_H_
+
+#include <cstdint>
+#include <deque>
+#include <string>
+#include <utility>
+
+#include "api/field_trials_view.h"
+#include "api/sequence_checker.h"
+#include "api/video_codecs/video_decoder.h"
+#include "modules/video_coding/encoded_frame.h"
+#include "modules/video_coding/timing/timing.h"
+#include "rtc_base/synchronization/mutex.h"
+
+namespace webrtc {
+
+class VCMReceiveCallback;
+
+struct FrameInfo {
+ FrameInfo() = default;
+ FrameInfo(const FrameInfo&) = delete;
+ FrameInfo& operator=(const FrameInfo&) = delete;
+ FrameInfo(FrameInfo&&) = default;
+ FrameInfo& operator=(FrameInfo&&) = default;
+
+ uint32_t rtp_timestamp;
+ // This is likely not optional, but some inputs seem to sometimes be negative.
+ // TODO(bugs.webrtc.org/13756): See if this can be replaced with Timestamp
+ // once all inputs to this field use Timestamp instead of an integer.
+ absl::optional<Timestamp> render_time;
+ absl::optional<Timestamp> decode_start;
+ VideoRotation rotation;
+ VideoContentType content_type;
+ EncodedImage::Timing timing;
+ int64_t ntp_time_ms;
+ RtpPacketInfos packet_infos;
+ // ColorSpace is not stored here, as it might be modified by decoders.
+};
+
+class VCMDecodedFrameCallback : public DecodedImageCallback {
+ public:
+ VCMDecodedFrameCallback(VCMTiming* timing,
+ Clock* clock,
+ const FieldTrialsView& field_trials);
+ ~VCMDecodedFrameCallback() override;
+ void SetUserReceiveCallback(VCMReceiveCallback* receiveCallback);
+ VCMReceiveCallback* UserReceiveCallback();
+
+ int32_t Decoded(VideoFrame& decodedImage) override;
+ int32_t Decoded(VideoFrame& decodedImage, int64_t decode_time_ms) override;
+ void Decoded(VideoFrame& decodedImage,
+ absl::optional<int32_t> decode_time_ms,
+ absl::optional<uint8_t> qp) override;
+
+ void OnDecoderInfoChanged(const VideoDecoder::DecoderInfo& decoder_info);
+
+ void Map(FrameInfo frameInfo);
+ void ClearTimestampMap();
+
+ private:
+ std::pair<absl::optional<FrameInfo>, size_t> FindFrameInfo(
+ uint32_t rtp_timestamp) RTC_EXCLUSIVE_LOCKS_REQUIRED(lock_);
+
+ SequenceChecker construction_thread_;
+ Clock* const _clock;
+ // This callback must be set before the decoder thread starts running
+ // and must only be unset when external threads (e.g decoder thread)
+ // have been stopped. Due to that, the variable should regarded as const
+ // while there are more than one threads involved, it must be set
+ // from the same thread, and therfore a lock is not required to access it.
+ VCMReceiveCallback* _receiveCallback = nullptr;
+ VCMTiming* _timing;
+ Mutex lock_;
+ std::deque<FrameInfo> frame_infos_ RTC_GUARDED_BY(lock_);
+ int64_t ntp_offset_;
+};
+
+class VCMGenericDecoder {
+ public:
+ explicit VCMGenericDecoder(VideoDecoder* decoder);
+ ~VCMGenericDecoder();
+
+ /**
+ * Initialize the decoder with the information from the `settings`
+ */
+ bool Configure(const VideoDecoder::Settings& settings);
+
+ /**
+ * Decode to a raw I420 frame,
+ *
+ * inputVideoBuffer reference to encoded video frame
+ */
+ int32_t Decode(const VCMEncodedFrame& inputFrame, Timestamp now);
+
+ /**
+ * Set decode callback. Deregistering while decoding is illegal.
+ */
+ int32_t RegisterDecodeCompleteCallback(VCMDecodedFrameCallback* callback);
+
+ bool IsSameDecoder(VideoDecoder* decoder) const {
+ return decoder_ == decoder;
+ }
+
+ private:
+ VCMDecodedFrameCallback* _callback = nullptr;
+ VideoDecoder* const decoder_;
+ VideoContentType _last_keyframe_content_type;
+ VideoDecoder::DecoderInfo decoder_info_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_GENERIC_DECODER_H_
diff --git a/third_party/libwebrtc/modules/video_coding/generic_decoder_unittest.cc b/third_party/libwebrtc/modules/video_coding/generic_decoder_unittest.cc
new file mode 100644
index 0000000000..68bc307e65
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/generic_decoder_unittest.cc
@@ -0,0 +1,190 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/generic_decoder.h"
+
+#include <cstdint>
+#include <memory>
+#include <vector>
+
+#include "absl/types/optional.h"
+#include "api/array_view.h"
+#include "api/rtp_packet_infos.h"
+#include "api/video_codecs/video_decoder.h"
+#include "common_video/test/utilities.h"
+#include "modules/video_coding/timing/timing.h"
+#include "system_wrappers/include/clock.h"
+#include "test/fake_decoder.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+#include "test/scoped_key_value_config.h"
+#include "test/time_controller/simulated_time_controller.h"
+
+namespace webrtc {
+namespace video_coding {
+
+class ReceiveCallback : public VCMReceiveCallback {
+ public:
+ int32_t FrameToRender(VideoFrame& frame,
+ absl::optional<uint8_t> qp,
+ TimeDelta decode_time,
+ VideoContentType content_type) override {
+ frames_.push_back(frame);
+ return 0;
+ }
+
+ absl::optional<VideoFrame> PopLastFrame() {
+ if (frames_.empty())
+ return absl::nullopt;
+ auto ret = frames_.front();
+ frames_.pop_back();
+ return ret;
+ }
+
+ rtc::ArrayView<const VideoFrame> GetAllFrames() const { return frames_; }
+
+ void OnDroppedFrames(uint32_t frames_dropped) {
+ frames_dropped_ += frames_dropped;
+ }
+
+ uint32_t frames_dropped() const { return frames_dropped_; }
+
+ private:
+ std::vector<VideoFrame> frames_;
+ uint32_t frames_dropped_ = 0;
+};
+
+class GenericDecoderTest : public ::testing::Test {
+ protected:
+ GenericDecoderTest()
+ : time_controller_(Timestamp::Zero()),
+ clock_(time_controller_.GetClock()),
+ timing_(time_controller_.GetClock(), field_trials_),
+ decoder_(time_controller_.GetTaskQueueFactory()),
+ vcm_callback_(&timing_, time_controller_.GetClock(), field_trials_),
+ generic_decoder_(&decoder_) {}
+
+ void SetUp() override {
+ generic_decoder_.RegisterDecodeCompleteCallback(&vcm_callback_);
+ vcm_callback_.SetUserReceiveCallback(&user_callback_);
+ VideoDecoder::Settings settings;
+ settings.set_codec_type(kVideoCodecVP8);
+ settings.set_max_render_resolution({10, 10});
+ settings.set_number_of_cores(4);
+ generic_decoder_.Configure(settings);
+ }
+
+ GlobalSimulatedTimeController time_controller_;
+ Clock* const clock_;
+ test::ScopedKeyValueConfig field_trials_;
+ VCMTiming timing_;
+ webrtc::test::FakeDecoder decoder_;
+ VCMDecodedFrameCallback vcm_callback_;
+ VCMGenericDecoder generic_decoder_;
+ ReceiveCallback user_callback_;
+};
+
+TEST_F(GenericDecoderTest, PassesPacketInfos) {
+ RtpPacketInfos packet_infos = CreatePacketInfos(3);
+ VCMEncodedFrame encoded_frame;
+ encoded_frame.SetPacketInfos(packet_infos);
+ generic_decoder_.Decode(encoded_frame, clock_->CurrentTime());
+ time_controller_.AdvanceTime(TimeDelta::Millis(10));
+ absl::optional<VideoFrame> decoded_frame = user_callback_.PopLastFrame();
+ ASSERT_TRUE(decoded_frame.has_value());
+ EXPECT_EQ(decoded_frame->packet_infos().size(), 3U);
+}
+
+TEST_F(GenericDecoderTest, FrameDroppedIfTooManyFramesInFlight) {
+ constexpr int kMaxFramesInFlight = 10;
+ decoder_.SetDelayedDecoding(10);
+ for (int i = 0; i < kMaxFramesInFlight + 1; ++i) {
+ VCMEncodedFrame encoded_frame;
+ encoded_frame.SetTimestamp(90000 * i);
+ generic_decoder_.Decode(encoded_frame, clock_->CurrentTime());
+ }
+
+ time_controller_.AdvanceTime(TimeDelta::Millis(10));
+
+ auto frames = user_callback_.GetAllFrames();
+ ASSERT_EQ(10U, frames.size());
+ // Expect that the first frame was dropped since all decodes released at the
+ // same time and the oldest frame info is the first one dropped.
+ EXPECT_EQ(frames[0].timestamp(), 90000u);
+ EXPECT_EQ(1u, user_callback_.frames_dropped());
+}
+
+TEST_F(GenericDecoderTest, PassesPacketInfosForDelayedDecoders) {
+ RtpPacketInfos packet_infos = CreatePacketInfos(3);
+ decoder_.SetDelayedDecoding(100);
+
+ {
+ // Ensure the original frame is destroyed before the decoding is completed.
+ VCMEncodedFrame encoded_frame;
+ encoded_frame.SetPacketInfos(packet_infos);
+ generic_decoder_.Decode(encoded_frame, clock_->CurrentTime());
+ }
+
+ time_controller_.AdvanceTime(TimeDelta::Millis(200));
+ absl::optional<VideoFrame> decoded_frame = user_callback_.PopLastFrame();
+ ASSERT_TRUE(decoded_frame.has_value());
+ EXPECT_EQ(decoded_frame->packet_infos().size(), 3U);
+}
+
+TEST_F(GenericDecoderTest, MaxCompositionDelayNotSetByDefault) {
+ VCMEncodedFrame encoded_frame;
+ generic_decoder_.Decode(encoded_frame, clock_->CurrentTime());
+ time_controller_.AdvanceTime(TimeDelta::Millis(10));
+ absl::optional<VideoFrame> decoded_frame = user_callback_.PopLastFrame();
+ ASSERT_TRUE(decoded_frame.has_value());
+ EXPECT_THAT(
+ decoded_frame->render_parameters().max_composition_delay_in_frames,
+ testing::Eq(absl::nullopt));
+}
+
+TEST_F(GenericDecoderTest, MaxCompositionDelayActivatedByPlayoutDelay) {
+ VCMEncodedFrame encoded_frame;
+ // VideoReceiveStream2 would set MaxCompositionDelayInFrames if playout delay
+ // is specified as X,Y, where X=0, Y>0.
+ constexpr int kMaxCompositionDelayInFrames = 3; // ~50 ms at 60 fps.
+ timing_.SetMaxCompositionDelayInFrames(
+ absl::make_optional(kMaxCompositionDelayInFrames));
+ generic_decoder_.Decode(encoded_frame, clock_->CurrentTime());
+ time_controller_.AdvanceTime(TimeDelta::Millis(10));
+ absl::optional<VideoFrame> decoded_frame = user_callback_.PopLastFrame();
+ ASSERT_TRUE(decoded_frame.has_value());
+ EXPECT_THAT(
+ decoded_frame->render_parameters().max_composition_delay_in_frames,
+ testing::Optional(kMaxCompositionDelayInFrames));
+}
+
+TEST_F(GenericDecoderTest, IsLowLatencyStreamFalseByDefault) {
+ VCMEncodedFrame encoded_frame;
+ generic_decoder_.Decode(encoded_frame, clock_->CurrentTime());
+ time_controller_.AdvanceTime(TimeDelta::Millis(10));
+ absl::optional<VideoFrame> decoded_frame = user_callback_.PopLastFrame();
+ ASSERT_TRUE(decoded_frame.has_value());
+ EXPECT_FALSE(decoded_frame->render_parameters().use_low_latency_rendering);
+}
+
+TEST_F(GenericDecoderTest, IsLowLatencyStreamActivatedByPlayoutDelay) {
+ VCMEncodedFrame encoded_frame;
+ const VideoPlayoutDelay kPlayoutDelay = {0, 50};
+ timing_.set_min_playout_delay(TimeDelta::Millis(kPlayoutDelay.min_ms));
+ timing_.set_max_playout_delay(TimeDelta::Millis(kPlayoutDelay.max_ms));
+ generic_decoder_.Decode(encoded_frame, clock_->CurrentTime());
+ time_controller_.AdvanceTime(TimeDelta::Millis(10));
+ absl::optional<VideoFrame> decoded_frame = user_callback_.PopLastFrame();
+ ASSERT_TRUE(decoded_frame.has_value());
+ EXPECT_TRUE(decoded_frame->render_parameters().use_low_latency_rendering);
+}
+
+} // namespace video_coding
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/h264_packet_buffer.cc b/third_party/libwebrtc/modules/video_coding/h264_packet_buffer.cc
new file mode 100644
index 0000000000..6096665bda
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/h264_packet_buffer.cc
@@ -0,0 +1,287 @@
+/*
+ * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/h264_packet_buffer.h"
+
+#include <algorithm>
+#include <cstdint>
+#include <utility>
+#include <vector>
+
+#include "api/array_view.h"
+#include "api/rtp_packet_info.h"
+#include "api/video/video_frame_type.h"
+#include "common_video/h264/h264_common.h"
+#include "modules/rtp_rtcp/source/rtp_header_extensions.h"
+#include "modules/rtp_rtcp/source/rtp_packet_received.h"
+#include "modules/rtp_rtcp/source/rtp_video_header.h"
+#include "modules/video_coding/codecs/h264/include/h264_globals.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/copy_on_write_buffer.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/numerics/sequence_number_util.h"
+
+namespace webrtc {
+namespace {
+int64_t EuclideanMod(int64_t n, int64_t div) {
+ RTC_DCHECK_GT(div, 0);
+ return (n %= div) < 0 ? n + div : n;
+}
+
+rtc::ArrayView<const NaluInfo> GetNaluInfos(
+ const RTPVideoHeaderH264& h264_header) {
+ if (h264_header.nalus_length > kMaxNalusPerPacket) {
+ return {};
+ }
+
+ return rtc::MakeArrayView(h264_header.nalus, h264_header.nalus_length);
+}
+
+bool IsFirstPacketOfFragment(const RTPVideoHeaderH264& h264_header) {
+ return h264_header.nalus_length > 0;
+}
+
+bool BeginningOfIdr(const H264PacketBuffer::Packet& packet) {
+ const auto& h264_header =
+ absl::get<RTPVideoHeaderH264>(packet.video_header.video_type_header);
+ const bool contains_idr_nalu =
+ absl::c_any_of(GetNaluInfos(h264_header), [](const auto& nalu_info) {
+ return nalu_info.type == H264::NaluType::kIdr;
+ });
+ switch (h264_header.packetization_type) {
+ case kH264StapA:
+ case kH264SingleNalu: {
+ return contains_idr_nalu;
+ }
+ case kH264FuA: {
+ return contains_idr_nalu && IsFirstPacketOfFragment(h264_header);
+ }
+ }
+}
+
+bool HasSps(const H264PacketBuffer::Packet& packet) {
+ auto& h264_header =
+ absl::get<RTPVideoHeaderH264>(packet.video_header.video_type_header);
+ return absl::c_any_of(GetNaluInfos(h264_header), [](const auto& nalu_info) {
+ return nalu_info.type == H264::NaluType::kSps;
+ });
+}
+
+// TODO(bugs.webrtc.org/13157): Update the H264 depacketizer so we don't have to
+// fiddle with the payload at this point.
+rtc::CopyOnWriteBuffer FixVideoPayload(rtc::ArrayView<const uint8_t> payload,
+ const RTPVideoHeader& video_header) {
+ constexpr uint8_t kStartCode[] = {0, 0, 0, 1};
+
+ const auto& h264_header =
+ absl::get<RTPVideoHeaderH264>(video_header.video_type_header);
+
+ rtc::CopyOnWriteBuffer result;
+ switch (h264_header.packetization_type) {
+ case kH264StapA: {
+ const uint8_t* payload_end = payload.data() + payload.size();
+ const uint8_t* nalu_ptr = payload.data() + 1;
+ while (nalu_ptr < payload_end - 1) {
+ // The first two bytes describe the length of the segment, where a
+ // segment is the nalu type plus nalu payload.
+ uint16_t segment_length = nalu_ptr[0] << 8 | nalu_ptr[1];
+ nalu_ptr += 2;
+
+ if (nalu_ptr + segment_length <= payload_end) {
+ result.AppendData(kStartCode);
+ result.AppendData(nalu_ptr, segment_length);
+ }
+ nalu_ptr += segment_length;
+ }
+ return result;
+ }
+
+ case kH264FuA: {
+ if (IsFirstPacketOfFragment(h264_header)) {
+ result.AppendData(kStartCode);
+ }
+ result.AppendData(payload);
+ return result;
+ }
+
+ case kH264SingleNalu: {
+ result.AppendData(kStartCode);
+ result.AppendData(payload);
+ return result;
+ }
+ }
+
+ RTC_DCHECK_NOTREACHED();
+ return result;
+}
+
+} // namespace
+
+H264PacketBuffer::H264PacketBuffer(bool idr_only_keyframes_allowed)
+ : idr_only_keyframes_allowed_(idr_only_keyframes_allowed) {}
+
+H264PacketBuffer::InsertResult H264PacketBuffer::InsertPacket(
+ std::unique_ptr<Packet> packet) {
+ RTC_DCHECK(packet->video_header.codec == kVideoCodecH264);
+
+ InsertResult result;
+ if (!absl::holds_alternative<RTPVideoHeaderH264>(
+ packet->video_header.video_type_header)) {
+ return result;
+ }
+
+ int64_t unwrapped_seq_num = seq_num_unwrapper_.Unwrap(packet->seq_num);
+ auto& packet_slot = GetPacket(unwrapped_seq_num);
+ if (packet_slot != nullptr &&
+ AheadOrAt(packet_slot->timestamp, packet->timestamp)) {
+ // The incoming `packet` is old or a duplicate.
+ return result;
+ } else {
+ packet_slot = std::move(packet);
+ }
+
+ result.packets = FindFrames(unwrapped_seq_num);
+ return result;
+}
+
+std::unique_ptr<H264PacketBuffer::Packet>& H264PacketBuffer::GetPacket(
+ int64_t unwrapped_seq_num) {
+ return buffer_[EuclideanMod(unwrapped_seq_num, kBufferSize)];
+}
+
+bool H264PacketBuffer::BeginningOfStream(
+ const H264PacketBuffer::Packet& packet) const {
+ return HasSps(packet) ||
+ (idr_only_keyframes_allowed_ && BeginningOfIdr(packet));
+}
+
+std::vector<std::unique_ptr<H264PacketBuffer::Packet>>
+H264PacketBuffer::FindFrames(int64_t unwrapped_seq_num) {
+ std::vector<std::unique_ptr<Packet>> found_frames;
+
+ Packet* packet = GetPacket(unwrapped_seq_num).get();
+ RTC_CHECK(packet != nullptr);
+
+ // Check if the packet is continuous or the beginning of a new coded video
+ // sequence.
+ if (unwrapped_seq_num - 1 != last_continuous_unwrapped_seq_num_) {
+ if (unwrapped_seq_num <= last_continuous_unwrapped_seq_num_ ||
+ !BeginningOfStream(*packet)) {
+ return found_frames;
+ }
+
+ last_continuous_unwrapped_seq_num_ = unwrapped_seq_num;
+ }
+
+ for (int64_t seq_num = unwrapped_seq_num;
+ seq_num < unwrapped_seq_num + kBufferSize;) {
+ RTC_DCHECK_GE(seq_num, *last_continuous_unwrapped_seq_num_);
+
+ // Packets that were never assembled into a completed frame will stay in
+ // the 'buffer_'. Check that the `packet` sequence number match the expected
+ // unwrapped sequence number.
+ if (static_cast<uint16_t>(seq_num) != packet->seq_num) {
+ return found_frames;
+ }
+
+ last_continuous_unwrapped_seq_num_ = seq_num;
+ // Last packet of the frame, try to assemble the frame.
+ if (packet->marker_bit) {
+ uint32_t rtp_timestamp = packet->timestamp;
+
+ // Iterate backwards to find where the frame starts.
+ for (int64_t seq_num_start = seq_num;
+ seq_num_start > seq_num - kBufferSize; --seq_num_start) {
+ auto& prev_packet = GetPacket(seq_num_start - 1);
+
+ if (prev_packet == nullptr || prev_packet->timestamp != rtp_timestamp) {
+ if (MaybeAssembleFrame(seq_num_start, seq_num, found_frames)) {
+ // Frame was assembled, continue to look for more frames.
+ break;
+ } else {
+ // Frame was not assembled, no subsequent frame will be continuous.
+ return found_frames;
+ }
+ }
+ }
+ }
+
+ seq_num++;
+ packet = GetPacket(seq_num).get();
+ if (packet == nullptr) {
+ return found_frames;
+ }
+ }
+
+ return found_frames;
+}
+
+bool H264PacketBuffer::MaybeAssembleFrame(
+ int64_t start_seq_num_unwrapped,
+ int64_t end_sequence_number_unwrapped,
+ std::vector<std::unique_ptr<Packet>>& frames) {
+ bool has_sps = false;
+ bool has_pps = false;
+ bool has_idr = false;
+
+ int width = -1;
+ int height = -1;
+
+ for (int64_t seq_num = start_seq_num_unwrapped;
+ seq_num <= end_sequence_number_unwrapped; ++seq_num) {
+ const auto& packet = GetPacket(seq_num);
+ const auto& h264_header =
+ absl::get<RTPVideoHeaderH264>(packet->video_header.video_type_header);
+ for (const auto& nalu : GetNaluInfos(h264_header)) {
+ has_idr |= nalu.type == H264::NaluType::kIdr;
+ has_sps |= nalu.type == H264::NaluType::kSps;
+ has_pps |= nalu.type == H264::NaluType::kPps;
+ }
+
+ width = std::max<int>(packet->video_header.width, width);
+ height = std::max<int>(packet->video_header.height, height);
+ }
+
+ if (has_idr) {
+ if (!idr_only_keyframes_allowed_ && (!has_sps || !has_pps)) {
+ return false;
+ }
+ }
+
+ for (int64_t seq_num = start_seq_num_unwrapped;
+ seq_num <= end_sequence_number_unwrapped; ++seq_num) {
+ auto& packet = GetPacket(seq_num);
+
+ packet->video_header.is_first_packet_in_frame =
+ (seq_num == start_seq_num_unwrapped);
+ packet->video_header.is_last_packet_in_frame =
+ (seq_num == end_sequence_number_unwrapped);
+
+ if (packet->video_header.is_first_packet_in_frame) {
+ if (width > 0 && height > 0) {
+ packet->video_header.width = width;
+ packet->video_header.height = height;
+ }
+
+ packet->video_header.frame_type = has_idr
+ ? VideoFrameType::kVideoFrameKey
+ : VideoFrameType::kVideoFrameDelta;
+ }
+
+ packet->video_payload =
+ FixVideoPayload(packet->video_payload, packet->video_header);
+
+ frames.push_back(std::move(packet));
+ }
+
+ return true;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/h264_packet_buffer.h b/third_party/libwebrtc/modules/video_coding/h264_packet_buffer.h
new file mode 100644
index 0000000000..a72c240e82
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/h264_packet_buffer.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_H264_PACKET_BUFFER_H_
+#define MODULES_VIDEO_CODING_H264_PACKET_BUFFER_H_
+
+#include <array>
+#include <memory>
+#include <vector>
+
+#include "absl/base/attributes.h"
+#include "absl/types/optional.h"
+#include "modules/video_coding/packet_buffer.h"
+#include "rtc_base/numerics/sequence_number_unwrapper.h"
+
+namespace webrtc {
+
+class H264PacketBuffer {
+ public:
+ // The H264PacketBuffer does the same job as the PacketBuffer but for H264
+ // only. To make it fit in with surronding code the PacketBuffer input/output
+ // classes are used.
+ using Packet = video_coding::PacketBuffer::Packet;
+ using InsertResult = video_coding::PacketBuffer::InsertResult;
+
+ explicit H264PacketBuffer(bool idr_only_keyframes_allowed);
+
+ ABSL_MUST_USE_RESULT InsertResult
+ InsertPacket(std::unique_ptr<Packet> packet);
+
+ private:
+ static constexpr int kBufferSize = 2048;
+
+ std::unique_ptr<Packet>& GetPacket(int64_t unwrapped_seq_num);
+ bool BeginningOfStream(const Packet& packet) const;
+ std::vector<std::unique_ptr<Packet>> FindFrames(int64_t unwrapped_seq_num);
+ bool MaybeAssembleFrame(int64_t start_seq_num_unwrapped,
+ int64_t end_sequence_number_unwrapped,
+ std::vector<std::unique_ptr<Packet>>& packets);
+
+ const bool idr_only_keyframes_allowed_;
+ std::array<std::unique_ptr<Packet>, kBufferSize> buffer_;
+ absl::optional<int64_t> last_continuous_unwrapped_seq_num_;
+ SeqNumUnwrapper<uint16_t> seq_num_unwrapper_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_H264_PACKET_BUFFER_H_
diff --git a/third_party/libwebrtc/modules/video_coding/h264_packet_buffer_unittest.cc b/third_party/libwebrtc/modules/video_coding/h264_packet_buffer_unittest.cc
new file mode 100644
index 0000000000..4f2331da28
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/h264_packet_buffer_unittest.cc
@@ -0,0 +1,778 @@
+/*
+ * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include "modules/video_coding/h264_packet_buffer.h"
+
+#include <cstring>
+#include <limits>
+#include <ostream>
+#include <string>
+#include <utility>
+
+#include "api/array_view.h"
+#include "api/video/render_resolution.h"
+#include "common_video/h264/h264_common.h"
+#include "rtc_base/system/unused.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+
+using ::testing::ElementsAreArray;
+using ::testing::Eq;
+using ::testing::IsEmpty;
+using ::testing::SizeIs;
+
+using H264::NaluType::kAud;
+using H264::NaluType::kFuA;
+using H264::NaluType::kIdr;
+using H264::NaluType::kPps;
+using H264::NaluType::kSlice;
+using H264::NaluType::kSps;
+using H264::NaluType::kStapA;
+
+constexpr int kBufferSize = 2048;
+
+std::vector<uint8_t> StartCode() {
+ return {0, 0, 0, 1};
+}
+
+NaluInfo MakeNaluInfo(uint8_t type) {
+ NaluInfo res;
+ res.type = type;
+ res.sps_id = -1;
+ res.pps_id = -1;
+ return res;
+}
+
+class Packet {
+ public:
+ explicit Packet(H264PacketizationTypes type);
+
+ Packet& Idr(std::vector<uint8_t> payload = {9, 9, 9});
+ Packet& Slice(std::vector<uint8_t> payload = {9, 9, 9});
+ Packet& Sps(std::vector<uint8_t> payload = {9, 9, 9});
+ Packet& SpsWithResolution(RenderResolution resolution,
+ std::vector<uint8_t> payload = {9, 9, 9});
+ Packet& Pps(std::vector<uint8_t> payload = {9, 9, 9});
+ Packet& Aud();
+ Packet& Marker();
+ Packet& AsFirstFragment();
+ Packet& Time(uint32_t rtp_timestamp);
+ Packet& SeqNum(uint16_t rtp_seq_num);
+
+ std::unique_ptr<H264PacketBuffer::Packet> Build();
+
+ private:
+ rtc::CopyOnWriteBuffer BuildFuaPayload() const;
+ rtc::CopyOnWriteBuffer BuildSingleNaluPayload() const;
+ rtc::CopyOnWriteBuffer BuildStapAPayload() const;
+
+ RTPVideoHeaderH264& H264Header() {
+ return absl::get<RTPVideoHeaderH264>(video_header_.video_type_header);
+ }
+ const RTPVideoHeaderH264& H264Header() const {
+ return absl::get<RTPVideoHeaderH264>(video_header_.video_type_header);
+ }
+
+ H264PacketizationTypes type_;
+ RTPVideoHeader video_header_;
+ bool first_fragment_ = false;
+ bool marker_bit_ = false;
+ uint32_t rtp_timestamp_ = 0;
+ uint16_t rtp_seq_num_ = 0;
+ std::vector<std::vector<uint8_t>> nalu_payloads_;
+};
+
+Packet::Packet(H264PacketizationTypes type) : type_(type) {
+ video_header_.video_type_header.emplace<RTPVideoHeaderH264>();
+}
+
+Packet& Packet::Idr(std::vector<uint8_t> payload) {
+ auto& h264_header = H264Header();
+ h264_header.nalus[h264_header.nalus_length++] = MakeNaluInfo(kIdr);
+ nalu_payloads_.push_back(std::move(payload));
+ return *this;
+}
+
+Packet& Packet::Slice(std::vector<uint8_t> payload) {
+ auto& h264_header = H264Header();
+ h264_header.nalus[h264_header.nalus_length++] = MakeNaluInfo(kSlice);
+ nalu_payloads_.push_back(std::move(payload));
+ return *this;
+}
+
+Packet& Packet::Sps(std::vector<uint8_t> payload) {
+ auto& h264_header = H264Header();
+ h264_header.nalus[h264_header.nalus_length++] = MakeNaluInfo(kSps);
+ nalu_payloads_.push_back(std::move(payload));
+ return *this;
+}
+
+Packet& Packet::SpsWithResolution(RenderResolution resolution,
+ std::vector<uint8_t> payload) {
+ auto& h264_header = H264Header();
+ h264_header.nalus[h264_header.nalus_length++] = MakeNaluInfo(kSps);
+ video_header_.width = resolution.Width();
+ video_header_.height = resolution.Height();
+ nalu_payloads_.push_back(std::move(payload));
+ return *this;
+}
+
+Packet& Packet::Pps(std::vector<uint8_t> payload) {
+ auto& h264_header = H264Header();
+ h264_header.nalus[h264_header.nalus_length++] = MakeNaluInfo(kPps);
+ nalu_payloads_.push_back(std::move(payload));
+ return *this;
+}
+
+Packet& Packet::Aud() {
+ auto& h264_header = H264Header();
+ h264_header.nalus[h264_header.nalus_length++] = MakeNaluInfo(kAud);
+ nalu_payloads_.push_back({});
+ return *this;
+}
+
+Packet& Packet::Marker() {
+ marker_bit_ = true;
+ return *this;
+}
+
+Packet& Packet::AsFirstFragment() {
+ first_fragment_ = true;
+ return *this;
+}
+
+Packet& Packet::Time(uint32_t rtp_timestamp) {
+ rtp_timestamp_ = rtp_timestamp;
+ return *this;
+}
+
+Packet& Packet::SeqNum(uint16_t rtp_seq_num) {
+ rtp_seq_num_ = rtp_seq_num;
+ return *this;
+}
+
+std::unique_ptr<H264PacketBuffer::Packet> Packet::Build() {
+ auto res = std::make_unique<H264PacketBuffer::Packet>();
+
+ auto& h264_header = H264Header();
+ switch (type_) {
+ case kH264FuA: {
+ RTC_CHECK_EQ(h264_header.nalus_length, 1);
+ res->video_payload = BuildFuaPayload();
+ break;
+ }
+ case kH264SingleNalu: {
+ RTC_CHECK_EQ(h264_header.nalus_length, 1);
+ res->video_payload = BuildSingleNaluPayload();
+ break;
+ }
+ case kH264StapA: {
+ RTC_CHECK_GT(h264_header.nalus_length, 1);
+ RTC_CHECK_LE(h264_header.nalus_length, kMaxNalusPerPacket);
+ res->video_payload = BuildStapAPayload();
+ break;
+ }
+ }
+
+ if (type_ == kH264FuA && !first_fragment_) {
+ h264_header.nalus_length = 0;
+ }
+
+ h264_header.packetization_type = type_;
+ res->marker_bit = marker_bit_;
+ res->video_header = video_header_;
+ res->timestamp = rtp_timestamp_;
+ res->seq_num = rtp_seq_num_;
+ res->video_header.codec = kVideoCodecH264;
+
+ return res;
+}
+
+rtc::CopyOnWriteBuffer Packet::BuildFuaPayload() const {
+ return rtc::CopyOnWriteBuffer(nalu_payloads_[0]);
+}
+
+rtc::CopyOnWriteBuffer Packet::BuildSingleNaluPayload() const {
+ rtc::CopyOnWriteBuffer res;
+ auto& h264_header = H264Header();
+ res.AppendData(&h264_header.nalus[0].type, 1);
+ res.AppendData(nalu_payloads_[0]);
+ return res;
+}
+
+rtc::CopyOnWriteBuffer Packet::BuildStapAPayload() const {
+ rtc::CopyOnWriteBuffer res;
+
+ const uint8_t indicator = H264::NaluType::kStapA;
+ res.AppendData(&indicator, 1);
+
+ auto& h264_header = H264Header();
+ for (size_t i = 0; i < h264_header.nalus_length; ++i) {
+ // The two first bytes indicates the nalu segment size.
+ uint8_t length_as_array[2] = {
+ 0, static_cast<uint8_t>(nalu_payloads_[i].size() + 1)};
+ res.AppendData(length_as_array);
+
+ res.AppendData(&h264_header.nalus[i].type, 1);
+ res.AppendData(nalu_payloads_[i]);
+ }
+ return res;
+}
+
+rtc::ArrayView<const uint8_t> PacketPayload(
+ const std::unique_ptr<H264PacketBuffer::Packet>& packet) {
+ return packet->video_payload;
+}
+
+std::vector<uint8_t> FlatVector(
+ const std::vector<std::vector<uint8_t>>& elems) {
+ std::vector<uint8_t> res;
+ for (const auto& elem : elems) {
+ res.insert(res.end(), elem.begin(), elem.end());
+ }
+ return res;
+}
+
+TEST(H264PacketBufferTest, IdrIsKeyframe) {
+ H264PacketBuffer packet_buffer(/*allow_idr_only_keyframes=*/true);
+
+ EXPECT_THAT(
+ packet_buffer.InsertPacket(Packet(kH264SingleNalu).Idr().Marker().Build())
+ .packets,
+ SizeIs(1));
+}
+
+TEST(H264PacketBufferTest, IdrIsNotKeyframe) {
+ H264PacketBuffer packet_buffer(/*allow_idr_only_keyframes=*/false);
+
+ EXPECT_THAT(
+ packet_buffer.InsertPacket(Packet(kH264SingleNalu).Idr().Marker().Build())
+ .packets,
+ IsEmpty());
+}
+
+TEST(H264PacketBufferTest, IdrIsKeyframeFuaRequiresFirstFragmet) {
+ H264PacketBuffer packet_buffer(/*allow_idr_only_keyframes=*/true);
+
+ // Not marked as the first fragment
+ EXPECT_THAT(
+ packet_buffer
+ .InsertPacket(Packet(kH264FuA).Idr().SeqNum(0).Time(0).Build())
+ .packets,
+ IsEmpty());
+
+ EXPECT_THAT(packet_buffer
+ .InsertPacket(
+ Packet(kH264FuA).Idr().SeqNum(1).Time(0).Marker().Build())
+ .packets,
+ IsEmpty());
+
+ // Marked as first fragment
+ EXPECT_THAT(packet_buffer
+ .InsertPacket(Packet(kH264FuA)
+ .Idr()
+ .SeqNum(2)
+ .Time(1)
+ .AsFirstFragment()
+ .Build())
+ .packets,
+ IsEmpty());
+
+ EXPECT_THAT(packet_buffer
+ .InsertPacket(
+ Packet(kH264FuA).Idr().SeqNum(3).Time(1).Marker().Build())
+ .packets,
+ SizeIs(2));
+}
+
+TEST(H264PacketBufferTest, SpsPpsIdrIsKeyframeSingleNalus) {
+ H264PacketBuffer packet_buffer(/*allow_idr_only_keyframes=*/false);
+
+ RTC_UNUSED(packet_buffer.InsertPacket(
+ Packet(kH264SingleNalu).Sps().SeqNum(0).Time(0).Build()));
+ RTC_UNUSED(packet_buffer.InsertPacket(
+ Packet(kH264SingleNalu).Pps().SeqNum(1).Time(0).Build()));
+ EXPECT_THAT(
+ packet_buffer
+ .InsertPacket(
+ Packet(kH264SingleNalu).Idr().SeqNum(2).Time(0).Marker().Build())
+ .packets,
+ SizeIs(3));
+}
+
+TEST(H264PacketBufferTest, PpsIdrIsNotKeyframeSingleNalus) {
+ H264PacketBuffer packet_buffer(/*allow_idr_only_keyframes=*/false);
+
+ RTC_UNUSED(packet_buffer.InsertPacket(
+ Packet(kH264SingleNalu).Pps().SeqNum(0).Time(0).Build()));
+ EXPECT_THAT(
+ packet_buffer
+ .InsertPacket(
+ Packet(kH264SingleNalu).Idr().SeqNum(1).Time(0).Marker().Build())
+ .packets,
+ IsEmpty());
+}
+
+TEST(H264PacketBufferTest, SpsIdrIsNotKeyframeSingleNalus) {
+ H264PacketBuffer packet_buffer(/*allow_idr_only_keyframes=*/false);
+
+ RTC_UNUSED(packet_buffer.InsertPacket(
+ Packet(kH264SingleNalu).Sps().SeqNum(0).Time(0).Build()));
+ EXPECT_THAT(
+ packet_buffer
+ .InsertPacket(
+ Packet(kH264SingleNalu).Idr().SeqNum(1).Time(0).Marker().Build())
+ .packets,
+ IsEmpty());
+}
+
+TEST(H264PacketBufferTest, SpsPpsIdrIsKeyframeStapA) {
+ H264PacketBuffer packet_buffer(/*allow_idr_only_keyframes=*/false);
+
+ EXPECT_THAT(packet_buffer
+ .InsertPacket(Packet(kH264StapA)
+ .Sps()
+ .Pps()
+ .Idr()
+ .SeqNum(0)
+ .Time(0)
+ .Marker()
+ .Build())
+ .packets,
+ SizeIs(1));
+}
+
+TEST(H264PacketBufferTest, PpsIdrIsNotKeyframeStapA) {
+ H264PacketBuffer packet_buffer(/*allow_idr_only_keyframes=*/false);
+
+ EXPECT_THAT(
+ packet_buffer
+ .InsertPacket(
+ Packet(kH264StapA).Pps().Idr().SeqNum(0).Time(0).Marker().Build())
+ .packets,
+ IsEmpty());
+}
+
+TEST(H264PacketBufferTest, SpsIdrIsNotKeyframeStapA) {
+ H264PacketBuffer packet_buffer(/*allow_idr_only_keyframes=*/false);
+
+ EXPECT_THAT(
+ packet_buffer
+ .InsertPacket(
+ Packet(kH264StapA).Sps().Idr().SeqNum(2).Time(2).Marker().Build())
+ .packets,
+ IsEmpty());
+
+ EXPECT_THAT(packet_buffer
+ .InsertPacket(Packet(kH264StapA)
+ .Sps()
+ .Pps()
+ .Idr()
+ .SeqNum(3)
+ .Time(3)
+ .Marker()
+ .Build())
+ .packets,
+ SizeIs(1));
+}
+
+TEST(H264PacketBufferTest, InsertingSpsPpsLastCompletesKeyframe) {
+ H264PacketBuffer packet_buffer(/*allow_idr_only_keyframes=*/false);
+
+ RTC_UNUSED(packet_buffer.InsertPacket(
+ Packet(kH264SingleNalu).Idr().SeqNum(2).Time(1).Marker().Build()));
+
+ EXPECT_THAT(packet_buffer
+ .InsertPacket(
+ Packet(kH264StapA).Sps().Pps().SeqNum(1).Time(1).Build())
+ .packets,
+ SizeIs(2));
+}
+
+TEST(H264PacketBufferTest, InsertingMidFuaCompletesFrame) {
+ H264PacketBuffer packet_buffer(/*allow_idr_only_keyframes=*/false);
+
+ EXPECT_THAT(packet_buffer
+ .InsertPacket(Packet(kH264StapA)
+ .Sps()
+ .Pps()
+ .Idr()
+ .SeqNum(0)
+ .Time(0)
+ .Marker()
+ .Build())
+ .packets,
+ SizeIs(1));
+
+ RTC_UNUSED(packet_buffer.InsertPacket(
+ Packet(kH264FuA).Slice().SeqNum(1).Time(1).AsFirstFragment().Build()));
+ RTC_UNUSED(packet_buffer.InsertPacket(
+ Packet(kH264FuA).Slice().SeqNum(3).Time(1).Marker().Build()));
+ EXPECT_THAT(
+ packet_buffer
+ .InsertPacket(Packet(kH264FuA).Slice().SeqNum(2).Time(1).Build())
+ .packets,
+ SizeIs(3));
+}
+
+TEST(H264PacketBufferTest, SeqNumJumpDoesNotCompleteFrame) {
+ H264PacketBuffer packet_buffer(/*allow_idr_only_keyframes=*/false);
+
+ EXPECT_THAT(packet_buffer
+ .InsertPacket(Packet(kH264StapA)
+ .Sps()
+ .Pps()
+ .Idr()
+ .SeqNum(0)
+ .Time(0)
+ .Marker()
+ .Build())
+ .packets,
+ SizeIs(1));
+
+ EXPECT_THAT(
+ packet_buffer
+ .InsertPacket(Packet(kH264FuA).Slice().SeqNum(1).Time(1).Build())
+ .packets,
+ IsEmpty());
+
+ // Add `kBufferSize` to make the index of the sequence number wrap and end up
+ // where the packet with sequence number 2 would have ended up.
+ EXPECT_THAT(packet_buffer
+ .InsertPacket(Packet(kH264FuA)
+ .Slice()
+ .SeqNum(2 + kBufferSize)
+ .Time(3)
+ .Marker()
+ .Build())
+ .packets,
+ IsEmpty());
+}
+
+TEST(H264PacketBufferTest, OldFramesAreNotCompletedAfterBufferWrap) {
+ H264PacketBuffer packet_buffer(/*allow_idr_only_keyframes=*/false);
+
+ EXPECT_THAT(packet_buffer
+ .InsertPacket(Packet(kH264SingleNalu)
+ .Slice()
+ .SeqNum(1)
+ .Time(1)
+ .Marker()
+ .Build())
+ .packets,
+ IsEmpty());
+
+ // New keyframe, preceedes packet with sequence number 1 in the buffer.
+ EXPECT_THAT(packet_buffer
+ .InsertPacket(Packet(kH264StapA)
+ .Sps()
+ .Pps()
+ .Idr()
+ .SeqNum(kBufferSize)
+ .Time(kBufferSize)
+ .Marker()
+ .Build())
+ .packets,
+ SizeIs(1));
+}
+
+TEST(H264PacketBufferTest, OldPacketsDontBlockNewPackets) {
+ H264PacketBuffer packet_buffer(/*allow_idr_only_keyframes=*/false);
+ EXPECT_THAT(packet_buffer
+ .InsertPacket(Packet(kH264StapA)
+ .Sps()
+ .Pps()
+ .Idr()
+ .SeqNum(kBufferSize)
+ .Time(kBufferSize)
+ .Marker()
+ .Build())
+ .packets,
+ SizeIs(1));
+
+ RTC_UNUSED(packet_buffer.InsertPacket(Packet(kH264FuA)
+ .Slice()
+ .SeqNum(kBufferSize + 1)
+ .Time(kBufferSize + 1)
+ .AsFirstFragment()
+ .Build()));
+
+ RTC_UNUSED(packet_buffer.InsertPacket(Packet(kH264FuA)
+ .Slice()
+ .SeqNum(kBufferSize + 3)
+ .Time(kBufferSize + 1)
+ .Marker()
+ .Build()));
+ EXPECT_THAT(
+ packet_buffer
+ .InsertPacket(Packet(kH264FuA).Slice().SeqNum(2).Time(2).Build())
+ .packets,
+ IsEmpty());
+
+ EXPECT_THAT(packet_buffer
+ .InsertPacket(Packet(kH264FuA)
+ .Slice()
+ .SeqNum(kBufferSize + 2)
+ .Time(kBufferSize + 1)
+ .Build())
+ .packets,
+ SizeIs(3));
+}
+
+TEST(H264PacketBufferTest, OldPacketDoesntCompleteFrame) {
+ H264PacketBuffer packet_buffer(/*allow_idr_only_keyframes=*/false);
+
+ EXPECT_THAT(packet_buffer
+ .InsertPacket(Packet(kH264StapA)
+ .Sps()
+ .Pps()
+ .Idr()
+ .SeqNum(kBufferSize)
+ .Time(kBufferSize)
+ .Marker()
+ .Build())
+ .packets,
+ SizeIs(1));
+
+ EXPECT_THAT(packet_buffer
+ .InsertPacket(Packet(kH264FuA)
+ .Slice()
+ .SeqNum(kBufferSize + 3)
+ .Time(kBufferSize + 1)
+ .Marker()
+ .Build())
+ .packets,
+ IsEmpty());
+
+ EXPECT_THAT(
+ packet_buffer
+ .InsertPacket(
+ Packet(kH264FuA).Slice().SeqNum(2).Time(2).Marker().Build())
+ .packets,
+ IsEmpty());
+
+ EXPECT_THAT(packet_buffer
+ .InsertPacket(Packet(kH264FuA)
+ .Slice()
+ .SeqNum(kBufferSize + 1)
+ .Time(kBufferSize + 1)
+ .AsFirstFragment()
+ .Build())
+ .packets,
+ IsEmpty());
+}
+
+TEST(H264PacketBufferTest, FrameBoundariesAreSet) {
+ H264PacketBuffer packet_buffer(/*allow_idr_only_keyframes=*/false);
+
+ auto key = packet_buffer.InsertPacket(
+ Packet(kH264StapA).Sps().Pps().Idr().SeqNum(1).Time(1).Marker().Build());
+
+ ASSERT_THAT(key.packets, SizeIs(1));
+ EXPECT_TRUE(key.packets[0]->video_header.is_first_packet_in_frame);
+ EXPECT_TRUE(key.packets[0]->video_header.is_last_packet_in_frame);
+
+ RTC_UNUSED(packet_buffer.InsertPacket(
+ Packet(kH264FuA).Slice().SeqNum(2).Time(2).Build()));
+ RTC_UNUSED(packet_buffer.InsertPacket(
+ Packet(kH264FuA).Slice().SeqNum(3).Time(2).Build()));
+ auto delta = packet_buffer.InsertPacket(
+ Packet(kH264FuA).Slice().SeqNum(4).Time(2).Marker().Build());
+
+ ASSERT_THAT(delta.packets, SizeIs(3));
+ EXPECT_TRUE(delta.packets[0]->video_header.is_first_packet_in_frame);
+ EXPECT_FALSE(delta.packets[0]->video_header.is_last_packet_in_frame);
+
+ EXPECT_FALSE(delta.packets[1]->video_header.is_first_packet_in_frame);
+ EXPECT_FALSE(delta.packets[1]->video_header.is_last_packet_in_frame);
+
+ EXPECT_FALSE(delta.packets[2]->video_header.is_first_packet_in_frame);
+ EXPECT_TRUE(delta.packets[2]->video_header.is_last_packet_in_frame);
+}
+
+TEST(H264PacketBufferTest, ResolutionSetOnFirstPacket) {
+ H264PacketBuffer packet_buffer(/*allow_idr_only_keyframes=*/false);
+
+ RTC_UNUSED(packet_buffer.InsertPacket(
+ Packet(kH264SingleNalu).Aud().SeqNum(1).Time(1).Build()));
+ auto res = packet_buffer.InsertPacket(Packet(kH264StapA)
+ .SpsWithResolution({320, 240})
+ .Pps()
+ .Idr()
+ .SeqNum(2)
+ .Time(1)
+ .Marker()
+ .Build());
+
+ ASSERT_THAT(res.packets, SizeIs(2));
+ EXPECT_THAT(res.packets[0]->video_header.width, Eq(320));
+ EXPECT_THAT(res.packets[0]->video_header.height, Eq(240));
+}
+
+TEST(H264PacketBufferTest, KeyframeAndDeltaFrameSetOnFirstPacket) {
+ H264PacketBuffer packet_buffer(/*allow_idr_only_keyframes=*/false);
+
+ RTC_UNUSED(packet_buffer.InsertPacket(
+ Packet(kH264SingleNalu).Aud().SeqNum(1).Time(1).Build()));
+ auto key = packet_buffer.InsertPacket(
+ Packet(kH264StapA).Sps().Pps().Idr().SeqNum(2).Time(1).Marker().Build());
+
+ auto delta = packet_buffer.InsertPacket(
+ Packet(kH264SingleNalu).Slice().SeqNum(3).Time(2).Marker().Build());
+
+ ASSERT_THAT(key.packets, SizeIs(2));
+ EXPECT_THAT(key.packets[0]->video_header.frame_type,
+ Eq(VideoFrameType::kVideoFrameKey));
+ ASSERT_THAT(delta.packets, SizeIs(1));
+ EXPECT_THAT(delta.packets[0]->video_header.frame_type,
+ Eq(VideoFrameType::kVideoFrameDelta));
+}
+
+TEST(H264PacketBufferTest, RtpSeqNumWrap) {
+ H264PacketBuffer packet_buffer(/*allow_idr_only_keyframes=*/false);
+
+ RTC_UNUSED(packet_buffer.InsertPacket(
+ Packet(kH264StapA).Sps().Pps().SeqNum(0xffff).Time(0).Build()));
+
+ RTC_UNUSED(packet_buffer.InsertPacket(
+ Packet(kH264FuA).Idr().SeqNum(0).Time(0).Build()));
+ EXPECT_THAT(packet_buffer
+ .InsertPacket(
+ Packet(kH264FuA).Idr().SeqNum(1).Time(0).Marker().Build())
+ .packets,
+ SizeIs(3));
+}
+
+TEST(H264PacketBufferTest, StapAFixedBitstream) {
+ H264PacketBuffer packet_buffer(/*allow_idr_only_keyframes=*/false);
+
+ auto packets = packet_buffer
+ .InsertPacket(Packet(kH264StapA)
+ .Sps({1, 2, 3})
+ .Pps({4, 5, 6})
+ .Idr({7, 8, 9})
+ .SeqNum(0)
+ .Time(0)
+ .Marker()
+ .Build())
+ .packets;
+
+ ASSERT_THAT(packets, SizeIs(1));
+ EXPECT_THAT(PacketPayload(packets[0]),
+ ElementsAreArray(FlatVector({StartCode(),
+ {kSps, 1, 2, 3},
+ StartCode(),
+ {kPps, 4, 5, 6},
+ StartCode(),
+ {kIdr, 7, 8, 9}})));
+}
+
+TEST(H264PacketBufferTest, SingleNaluFixedBitstream) {
+ H264PacketBuffer packet_buffer(/*allow_idr_only_keyframes=*/false);
+
+ RTC_UNUSED(packet_buffer.InsertPacket(
+ Packet(kH264SingleNalu).Sps({1, 2, 3}).SeqNum(0).Time(0).Build()));
+ RTC_UNUSED(packet_buffer.InsertPacket(
+ Packet(kH264SingleNalu).Pps({4, 5, 6}).SeqNum(1).Time(0).Build()));
+ auto packets = packet_buffer
+ .InsertPacket(Packet(kH264SingleNalu)
+ .Idr({7, 8, 9})
+ .SeqNum(2)
+ .Time(0)
+ .Marker()
+ .Build())
+ .packets;
+
+ ASSERT_THAT(packets, SizeIs(3));
+ EXPECT_THAT(PacketPayload(packets[0]),
+ ElementsAreArray(FlatVector({StartCode(), {kSps, 1, 2, 3}})));
+ EXPECT_THAT(PacketPayload(packets[1]),
+ ElementsAreArray(FlatVector({StartCode(), {kPps, 4, 5, 6}})));
+ EXPECT_THAT(PacketPayload(packets[2]),
+ ElementsAreArray(FlatVector({StartCode(), {kIdr, 7, 8, 9}})));
+}
+
+TEST(H264PacketBufferTest, StapaAndFuaFixedBitstream) {
+ H264PacketBuffer packet_buffer(/*allow_idr_only_keyframes=*/false);
+
+ RTC_UNUSED(packet_buffer.InsertPacket(Packet(kH264StapA)
+ .Sps({1, 2, 3})
+ .Pps({4, 5, 6})
+ .SeqNum(0)
+ .Time(0)
+ .Build()));
+ RTC_UNUSED(packet_buffer.InsertPacket(Packet(kH264FuA)
+ .Idr({8, 8, 8})
+ .SeqNum(1)
+ .Time(0)
+ .AsFirstFragment()
+ .Build()));
+ auto packets = packet_buffer
+ .InsertPacket(Packet(kH264FuA)
+ .Idr({9, 9, 9})
+ .SeqNum(2)
+ .Time(0)
+ .Marker()
+ .Build())
+ .packets;
+
+ ASSERT_THAT(packets, SizeIs(3));
+ EXPECT_THAT(
+ PacketPayload(packets[0]),
+ ElementsAreArray(FlatVector(
+ {StartCode(), {kSps, 1, 2, 3}, StartCode(), {kPps, 4, 5, 6}})));
+ EXPECT_THAT(PacketPayload(packets[1]),
+ ElementsAreArray(FlatVector({StartCode(), {8, 8, 8}})));
+ // Third is a continuation of second, so only the payload is expected.
+ EXPECT_THAT(PacketPayload(packets[2]),
+ ElementsAreArray(FlatVector({{9, 9, 9}})));
+}
+
+TEST(H264PacketBufferTest, FullPacketBufferDoesNotBlockKeyframe) {
+ H264PacketBuffer packet_buffer(/*allow_idr_only_keyframes=*/false);
+
+ for (int i = 0; i < kBufferSize; ++i) {
+ EXPECT_THAT(
+ packet_buffer
+ .InsertPacket(
+ Packet(kH264SingleNalu).Slice().SeqNum(i).Time(0).Build())
+ .packets,
+ IsEmpty());
+ }
+
+ EXPECT_THAT(packet_buffer
+ .InsertPacket(Packet(kH264StapA)
+ .Sps()
+ .Pps()
+ .Idr()
+ .SeqNum(kBufferSize)
+ .Time(1)
+ .Marker()
+ .Build())
+ .packets,
+ SizeIs(1));
+}
+
+TEST(H264PacketBufferTest, TooManyNalusInPacket) {
+ H264PacketBuffer packet_buffer(/*allow_idr_only_keyframes=*/false);
+
+ std::unique_ptr<H264PacketBuffer::Packet> packet(
+ Packet(kH264StapA).Sps().Pps().Idr().SeqNum(1).Time(1).Marker().Build());
+ auto& h264_header =
+ absl::get<RTPVideoHeaderH264>(packet->video_header.video_type_header);
+ h264_header.nalus_length = kMaxNalusPerPacket + 1;
+
+ EXPECT_THAT(packet_buffer.InsertPacket(std::move(packet)).packets, IsEmpty());
+}
+
+} // namespace
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/h264_sprop_parameter_sets.cc b/third_party/libwebrtc/modules/video_coding/h264_sprop_parameter_sets.cc
new file mode 100644
index 0000000000..a64f8885da
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/h264_sprop_parameter_sets.cc
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/h264_sprop_parameter_sets.h"
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <string>
+#include <vector>
+
+#include "rtc_base/logging.h"
+#include "rtc_base/third_party/base64/base64.h"
+
+namespace {
+
+bool DecodeAndConvert(const std::string& base64, std::vector<uint8_t>* binary) {
+ return rtc::Base64::DecodeFromArray(base64.data(), base64.size(),
+ rtc::Base64::DO_STRICT, binary, nullptr);
+}
+} // namespace
+
+namespace webrtc {
+
+bool H264SpropParameterSets::DecodeSprop(const std::string& sprop) {
+ size_t separator_pos = sprop.find(',');
+ RTC_LOG(LS_INFO) << "Parsing sprop \"" << sprop << "\"";
+ if ((separator_pos <= 0) || (separator_pos >= sprop.length() - 1)) {
+ RTC_LOG(LS_WARNING) << "Invalid seperator position " << separator_pos
+ << " *" << sprop << "*";
+ return false;
+ }
+ std::string sps_str = sprop.substr(0, separator_pos);
+ std::string pps_str = sprop.substr(separator_pos + 1, std::string::npos);
+ if (!DecodeAndConvert(sps_str, &sps_)) {
+ RTC_LOG(LS_WARNING) << "Failed to decode sprop/sps *" << sprop << "*";
+ return false;
+ }
+ if (!DecodeAndConvert(pps_str, &pps_)) {
+ RTC_LOG(LS_WARNING) << "Failed to decode sprop/pps *" << sprop << "*";
+ return false;
+ }
+ return true;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/h264_sprop_parameter_sets.h b/third_party/libwebrtc/modules/video_coding/h264_sprop_parameter_sets.h
new file mode 100644
index 0000000000..8a32f31cc0
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/h264_sprop_parameter_sets.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_H264_SPROP_PARAMETER_SETS_H_
+#define MODULES_VIDEO_CODING_H264_SPROP_PARAMETER_SETS_H_
+
+#include <cstdint>
+#include <string>
+#include <vector>
+
+namespace webrtc {
+
+class H264SpropParameterSets {
+ public:
+ H264SpropParameterSets() {}
+
+ H264SpropParameterSets(const H264SpropParameterSets&) = delete;
+ H264SpropParameterSets& operator=(const H264SpropParameterSets&) = delete;
+
+ bool DecodeSprop(const std::string& sprop);
+ const std::vector<uint8_t>& sps_nalu() { return sps_; }
+ const std::vector<uint8_t>& pps_nalu() { return pps_; }
+
+ private:
+ std::vector<uint8_t> sps_;
+ std::vector<uint8_t> pps_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_H264_SPROP_PARAMETER_SETS_H_
diff --git a/third_party/libwebrtc/modules/video_coding/h264_sprop_parameter_sets_unittest.cc b/third_party/libwebrtc/modules/video_coding/h264_sprop_parameter_sets_unittest.cc
new file mode 100644
index 0000000000..ae263131a7
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/h264_sprop_parameter_sets_unittest.cc
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/h264_sprop_parameter_sets.h"
+
+#include <vector>
+
+#include "test/gtest.h"
+
+namespace webrtc {
+
+class H264SpropParameterSetsTest : public ::testing::Test {
+ public:
+ H264SpropParameterSets h264_sprop;
+};
+
+TEST_F(H264SpropParameterSetsTest, Base64DecodeSprop) {
+ // Example sprop string from https://tools.ietf.org/html/rfc3984 .
+ EXPECT_TRUE(h264_sprop.DecodeSprop("Z0IACpZTBYmI,aMljiA=="));
+ static const std::vector<uint8_t> raw_sps{0x67, 0x42, 0x00, 0x0A, 0x96,
+ 0x53, 0x05, 0x89, 0x88};
+ static const std::vector<uint8_t> raw_pps{0x68, 0xC9, 0x63, 0x88};
+ EXPECT_EQ(raw_sps, h264_sprop.sps_nalu());
+ EXPECT_EQ(raw_pps, h264_sprop.pps_nalu());
+}
+
+TEST_F(H264SpropParameterSetsTest, InvalidData) {
+ EXPECT_FALSE(h264_sprop.DecodeSprop(","));
+ EXPECT_FALSE(h264_sprop.DecodeSprop(""));
+ EXPECT_FALSE(h264_sprop.DecodeSprop(",iA=="));
+ EXPECT_FALSE(h264_sprop.DecodeSprop("iA==,"));
+ EXPECT_TRUE(h264_sprop.DecodeSprop("iA==,iA=="));
+ EXPECT_FALSE(h264_sprop.DecodeSprop("--,--"));
+ EXPECT_FALSE(h264_sprop.DecodeSprop(",,"));
+ EXPECT_FALSE(h264_sprop.DecodeSprop("iA=="));
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/h264_sps_pps_tracker.cc b/third_party/libwebrtc/modules/video_coding/h264_sps_pps_tracker.cc
new file mode 100644
index 0000000000..0741a261e0
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/h264_sps_pps_tracker.cc
@@ -0,0 +1,271 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/h264_sps_pps_tracker.h"
+
+#include <memory>
+#include <string>
+#include <utility>
+
+#include "absl/types/variant.h"
+#include "common_video/h264/h264_common.h"
+#include "common_video/h264/pps_parser.h"
+#include "common_video/h264/sps_parser.h"
+#include "modules/video_coding/codecs/h264/include/h264_globals.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+namespace video_coding {
+
+namespace {
+const uint8_t start_code_h264[] = {0, 0, 0, 1};
+} // namespace
+
+H264SpsPpsTracker::H264SpsPpsTracker() = default;
+H264SpsPpsTracker::~H264SpsPpsTracker() = default;
+
+H264SpsPpsTracker::PpsInfo::PpsInfo() = default;
+H264SpsPpsTracker::PpsInfo::PpsInfo(PpsInfo&& rhs) = default;
+H264SpsPpsTracker::PpsInfo& H264SpsPpsTracker::PpsInfo::operator=(
+ PpsInfo&& rhs) = default;
+H264SpsPpsTracker::PpsInfo::~PpsInfo() = default;
+
+H264SpsPpsTracker::SpsInfo::SpsInfo() = default;
+H264SpsPpsTracker::SpsInfo::SpsInfo(SpsInfo&& rhs) = default;
+H264SpsPpsTracker::SpsInfo& H264SpsPpsTracker::SpsInfo::operator=(
+ SpsInfo&& rhs) = default;
+H264SpsPpsTracker::SpsInfo::~SpsInfo() = default;
+
+H264SpsPpsTracker::FixedBitstream H264SpsPpsTracker::CopyAndFixBitstream(
+ rtc::ArrayView<const uint8_t> bitstream,
+ RTPVideoHeader* video_header) {
+ RTC_DCHECK(video_header);
+ RTC_DCHECK(video_header->codec == kVideoCodecH264);
+ RTC_DCHECK_GT(bitstream.size(), 0);
+
+ auto& h264_header =
+ absl::get<RTPVideoHeaderH264>(video_header->video_type_header);
+
+ bool append_sps_pps = false;
+ auto sps = sps_data_.end();
+ auto pps = pps_data_.end();
+
+ for (size_t i = 0; i < h264_header.nalus_length; ++i) {
+ const NaluInfo& nalu = h264_header.nalus[i];
+ switch (nalu.type) {
+ case H264::NaluType::kSps: {
+ SpsInfo& sps_info = sps_data_[nalu.sps_id];
+ sps_info.width = video_header->width;
+ sps_info.height = video_header->height;
+ break;
+ }
+ case H264::NaluType::kPps: {
+ pps_data_[nalu.pps_id].sps_id = nalu.sps_id;
+ break;
+ }
+ case H264::NaluType::kIdr: {
+ // If this is the first packet of an IDR, make sure we have the required
+ // SPS/PPS and also calculate how much extra space we need in the buffer
+ // to prepend the SPS/PPS to the bitstream with start codes.
+ if (video_header->is_first_packet_in_frame) {
+ if (nalu.pps_id == -1) {
+ RTC_LOG(LS_WARNING) << "No PPS id in IDR nalu.";
+ return {kRequestKeyframe};
+ }
+
+ pps = pps_data_.find(nalu.pps_id);
+ if (pps == pps_data_.end()) {
+ RTC_LOG(LS_WARNING)
+ << "No PPS with id << " << nalu.pps_id << " received";
+ return {kRequestKeyframe};
+ }
+
+ sps = sps_data_.find(pps->second.sps_id);
+ if (sps == sps_data_.end()) {
+ RTC_LOG(LS_WARNING)
+ << "No SPS with id << " << pps->second.sps_id << " received";
+ return {kRequestKeyframe};
+ }
+
+ // Since the first packet of every keyframe should have its width and
+ // height set we set it here in the case of it being supplied out of
+ // band.
+ video_header->width = sps->second.width;
+ video_header->height = sps->second.height;
+
+ // If the SPS/PPS was supplied out of band then we will have saved
+ // the actual bitstream in `data`.
+ if (sps->second.data && pps->second.data) {
+ RTC_DCHECK_GT(sps->second.size, 0);
+ RTC_DCHECK_GT(pps->second.size, 0);
+ append_sps_pps = true;
+ }
+ }
+ break;
+ }
+ default:
+ break;
+ }
+ }
+
+ RTC_CHECK(!append_sps_pps ||
+ (sps != sps_data_.end() && pps != pps_data_.end()));
+
+ // Calculate how much space we need for the rest of the bitstream.
+ size_t required_size = 0;
+
+ if (append_sps_pps) {
+ required_size += sps->second.size + sizeof(start_code_h264);
+ required_size += pps->second.size + sizeof(start_code_h264);
+ }
+
+ if (h264_header.packetization_type == kH264StapA) {
+ const uint8_t* nalu_ptr = bitstream.data() + 1;
+ while (nalu_ptr < bitstream.data() + bitstream.size() - 1) {
+ RTC_DCHECK(video_header->is_first_packet_in_frame);
+ required_size += sizeof(start_code_h264);
+
+ // The first two bytes describe the length of a segment.
+ uint16_t segment_length = nalu_ptr[0] << 8 | nalu_ptr[1];
+ nalu_ptr += 2;
+
+ required_size += segment_length;
+ nalu_ptr += segment_length;
+ }
+ } else {
+ if (h264_header.nalus_length > 0) {
+ required_size += sizeof(start_code_h264);
+ }
+ required_size += bitstream.size();
+ }
+
+ // Then we copy to the new buffer.
+ H264SpsPpsTracker::FixedBitstream fixed;
+ fixed.bitstream.EnsureCapacity(required_size);
+
+ if (append_sps_pps) {
+ // Insert SPS.
+ fixed.bitstream.AppendData(start_code_h264);
+ fixed.bitstream.AppendData(sps->second.data.get(), sps->second.size);
+
+ // Insert PPS.
+ fixed.bitstream.AppendData(start_code_h264);
+ fixed.bitstream.AppendData(pps->second.data.get(), pps->second.size);
+
+ // Update codec header to reflect the newly added SPS and PPS.
+ NaluInfo sps_info;
+ sps_info.type = H264::NaluType::kSps;
+ sps_info.sps_id = sps->first;
+ sps_info.pps_id = -1;
+ NaluInfo pps_info;
+ pps_info.type = H264::NaluType::kPps;
+ pps_info.sps_id = sps->first;
+ pps_info.pps_id = pps->first;
+ if (h264_header.nalus_length + 2 <= kMaxNalusPerPacket) {
+ h264_header.nalus[h264_header.nalus_length++] = sps_info;
+ h264_header.nalus[h264_header.nalus_length++] = pps_info;
+ } else {
+ RTC_LOG(LS_WARNING) << "Not enough space in H.264 codec header to insert "
+ "SPS/PPS provided out-of-band.";
+ }
+ }
+
+ // Copy the rest of the bitstream and insert start codes.
+ if (h264_header.packetization_type == kH264StapA) {
+ const uint8_t* nalu_ptr = bitstream.data() + 1;
+ while (nalu_ptr < bitstream.data() + bitstream.size() - 1) {
+ fixed.bitstream.AppendData(start_code_h264);
+
+ // The first two bytes describe the length of a segment.
+ uint16_t segment_length = nalu_ptr[0] << 8 | nalu_ptr[1];
+ nalu_ptr += 2;
+
+ size_t copy_end = nalu_ptr - bitstream.data() + segment_length;
+ if (copy_end > bitstream.size()) {
+ return {kDrop};
+ }
+
+ fixed.bitstream.AppendData(nalu_ptr, segment_length);
+ nalu_ptr += segment_length;
+ }
+ } else {
+ if (h264_header.nalus_length > 0) {
+ fixed.bitstream.AppendData(start_code_h264);
+ }
+ fixed.bitstream.AppendData(bitstream.data(), bitstream.size());
+ }
+
+ fixed.action = kInsert;
+ return fixed;
+}
+
+void H264SpsPpsTracker::InsertSpsPpsNalus(const std::vector<uint8_t>& sps,
+ const std::vector<uint8_t>& pps) {
+ constexpr size_t kNaluHeaderOffset = 1;
+ if (sps.size() < kNaluHeaderOffset) {
+ RTC_LOG(LS_WARNING) << "SPS size " << sps.size() << " is smaller than "
+ << kNaluHeaderOffset;
+ return;
+ }
+ if ((sps[0] & 0x1f) != H264::NaluType::kSps) {
+ RTC_LOG(LS_WARNING) << "SPS Nalu header missing";
+ return;
+ }
+ if (pps.size() < kNaluHeaderOffset) {
+ RTC_LOG(LS_WARNING) << "PPS size " << pps.size() << " is smaller than "
+ << kNaluHeaderOffset;
+ return;
+ }
+ if ((pps[0] & 0x1f) != H264::NaluType::kPps) {
+ RTC_LOG(LS_WARNING) << "SPS Nalu header missing";
+ return;
+ }
+ absl::optional<SpsParser::SpsState> parsed_sps = SpsParser::ParseSps(
+ sps.data() + kNaluHeaderOffset, sps.size() - kNaluHeaderOffset);
+ absl::optional<PpsParser::PpsState> parsed_pps = PpsParser::ParsePps(
+ pps.data() + kNaluHeaderOffset, pps.size() - kNaluHeaderOffset);
+
+ if (!parsed_sps) {
+ RTC_LOG(LS_WARNING) << "Failed to parse SPS.";
+ }
+
+ if (!parsed_pps) {
+ RTC_LOG(LS_WARNING) << "Failed to parse PPS.";
+ }
+
+ if (!parsed_pps || !parsed_sps) {
+ return;
+ }
+
+ SpsInfo sps_info;
+ sps_info.size = sps.size();
+ sps_info.width = parsed_sps->width;
+ sps_info.height = parsed_sps->height;
+ uint8_t* sps_data = new uint8_t[sps_info.size];
+ memcpy(sps_data, sps.data(), sps_info.size);
+ sps_info.data.reset(sps_data);
+ sps_data_[parsed_sps->id] = std::move(sps_info);
+
+ PpsInfo pps_info;
+ pps_info.size = pps.size();
+ pps_info.sps_id = parsed_pps->sps_id;
+ uint8_t* pps_data = new uint8_t[pps_info.size];
+ memcpy(pps_data, pps.data(), pps_info.size);
+ pps_info.data.reset(pps_data);
+ pps_data_[parsed_pps->id] = std::move(pps_info);
+
+ RTC_LOG(LS_INFO) << "Inserted SPS id " << parsed_sps->id << " and PPS id "
+ << parsed_pps->id << " (referencing SPS "
+ << parsed_pps->sps_id << ")";
+}
+
+} // namespace video_coding
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/h264_sps_pps_tracker.h b/third_party/libwebrtc/modules/video_coding/h264_sps_pps_tracker.h
new file mode 100644
index 0000000000..600e2ee397
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/h264_sps_pps_tracker.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_H264_SPS_PPS_TRACKER_H_
+#define MODULES_VIDEO_CODING_H264_SPS_PPS_TRACKER_H_
+
+#include <cstddef>
+#include <cstdint>
+#include <map>
+#include <memory>
+#include <vector>
+
+#include "api/array_view.h"
+#include "modules/rtp_rtcp/source/rtp_video_header.h"
+#include "rtc_base/copy_on_write_buffer.h"
+
+namespace webrtc {
+namespace video_coding {
+
+class H264SpsPpsTracker {
+ public:
+ enum PacketAction { kInsert, kDrop, kRequestKeyframe };
+ struct FixedBitstream {
+ PacketAction action;
+ rtc::CopyOnWriteBuffer bitstream;
+ };
+
+ H264SpsPpsTracker();
+ ~H264SpsPpsTracker();
+
+ // Returns fixed bitstream and modifies `video_header`.
+ FixedBitstream CopyAndFixBitstream(rtc::ArrayView<const uint8_t> bitstream,
+ RTPVideoHeader* video_header);
+
+ void InsertSpsPpsNalus(const std::vector<uint8_t>& sps,
+ const std::vector<uint8_t>& pps);
+
+ private:
+ struct PpsInfo {
+ PpsInfo();
+ PpsInfo(PpsInfo&& rhs);
+ PpsInfo& operator=(PpsInfo&& rhs);
+ ~PpsInfo();
+
+ int sps_id = -1;
+ size_t size = 0;
+ std::unique_ptr<uint8_t[]> data;
+ };
+
+ struct SpsInfo {
+ SpsInfo();
+ SpsInfo(SpsInfo&& rhs);
+ SpsInfo& operator=(SpsInfo&& rhs);
+ ~SpsInfo();
+
+ size_t size = 0;
+ int width = -1;
+ int height = -1;
+ std::unique_ptr<uint8_t[]> data;
+ };
+
+ std::map<uint32_t, PpsInfo> pps_data_;
+ std::map<uint32_t, SpsInfo> sps_data_;
+};
+
+} // namespace video_coding
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_H264_SPS_PPS_TRACKER_H_
diff --git a/third_party/libwebrtc/modules/video_coding/h264_sps_pps_tracker_unittest.cc b/third_party/libwebrtc/modules/video_coding/h264_sps_pps_tracker_unittest.cc
new file mode 100644
index 0000000000..04abb75e4e
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/h264_sps_pps_tracker_unittest.cc
@@ -0,0 +1,368 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/h264_sps_pps_tracker.h"
+
+#include <string.h>
+
+#include <vector>
+
+#include "absl/types/variant.h"
+#include "common_video/h264/h264_common.h"
+#include "modules/rtp_rtcp/source/rtp_video_header.h"
+#include "modules/video_coding/codecs/h264/include/h264_globals.h"
+#include "modules/video_coding/packet.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace video_coding {
+namespace {
+
+using ::testing::ElementsAreArray;
+
+const uint8_t start_code[] = {0, 0, 0, 1};
+
+rtc::ArrayView<const uint8_t> Bitstream(
+ const H264SpsPpsTracker::FixedBitstream& fixed) {
+ return fixed.bitstream;
+}
+
+void ExpectSpsPpsIdr(const RTPVideoHeaderH264& codec_header,
+ uint8_t sps_id,
+ uint8_t pps_id) {
+ bool contains_sps = false;
+ bool contains_pps = false;
+ bool contains_idr = false;
+ for (const auto& nalu : codec_header.nalus) {
+ if (nalu.type == H264::NaluType::kSps) {
+ EXPECT_EQ(sps_id, nalu.sps_id);
+ contains_sps = true;
+ } else if (nalu.type == H264::NaluType::kPps) {
+ EXPECT_EQ(sps_id, nalu.sps_id);
+ EXPECT_EQ(pps_id, nalu.pps_id);
+ contains_pps = true;
+ } else if (nalu.type == H264::NaluType::kIdr) {
+ EXPECT_EQ(pps_id, nalu.pps_id);
+ contains_idr = true;
+ }
+ }
+ EXPECT_TRUE(contains_sps);
+ EXPECT_TRUE(contains_pps);
+ EXPECT_TRUE(contains_idr);
+}
+
+class H264VideoHeader : public RTPVideoHeader {
+ public:
+ H264VideoHeader() {
+ codec = kVideoCodecH264;
+ is_first_packet_in_frame = false;
+ auto& h264_header = video_type_header.emplace<RTPVideoHeaderH264>();
+ h264_header.nalus_length = 0;
+ h264_header.packetization_type = kH264SingleNalu;
+ }
+
+ RTPVideoHeaderH264& h264() {
+ return absl::get<RTPVideoHeaderH264>(video_type_header);
+ }
+};
+
+} // namespace
+
+class TestH264SpsPpsTracker : public ::testing::Test {
+ public:
+ void AddSps(H264VideoHeader* header,
+ uint8_t sps_id,
+ std::vector<uint8_t>* data) {
+ NaluInfo info;
+ info.type = H264::NaluType::kSps;
+ info.sps_id = sps_id;
+ info.pps_id = -1;
+ data->push_back(H264::NaluType::kSps);
+ data->push_back(sps_id); // The sps data, just a single byte.
+
+ header->h264().nalus[header->h264().nalus_length++] = info;
+ }
+
+ void AddPps(H264VideoHeader* header,
+ uint8_t sps_id,
+ uint8_t pps_id,
+ std::vector<uint8_t>* data) {
+ NaluInfo info;
+ info.type = H264::NaluType::kPps;
+ info.sps_id = sps_id;
+ info.pps_id = pps_id;
+ data->push_back(H264::NaluType::kPps);
+ data->push_back(pps_id); // The pps data, just a single byte.
+
+ header->h264().nalus[header->h264().nalus_length++] = info;
+ }
+
+ void AddIdr(H264VideoHeader* header, int pps_id) {
+ NaluInfo info;
+ info.type = H264::NaluType::kIdr;
+ info.sps_id = -1;
+ info.pps_id = pps_id;
+
+ header->h264().nalus[header->h264().nalus_length++] = info;
+ }
+
+ protected:
+ H264SpsPpsTracker tracker_;
+};
+
+TEST_F(TestH264SpsPpsTracker, NoNalus) {
+ uint8_t data[] = {1, 2, 3};
+ H264VideoHeader header;
+ header.h264().packetization_type = kH264FuA;
+
+ H264SpsPpsTracker::FixedBitstream fixed =
+ tracker_.CopyAndFixBitstream(data, &header);
+
+ EXPECT_EQ(fixed.action, H264SpsPpsTracker::kInsert);
+ EXPECT_THAT(Bitstream(fixed), ElementsAreArray(data));
+}
+
+TEST_F(TestH264SpsPpsTracker, FuAFirstPacket) {
+ uint8_t data[] = {1, 2, 3};
+ H264VideoHeader header;
+ header.h264().packetization_type = kH264FuA;
+ header.h264().nalus_length = 1;
+ header.is_first_packet_in_frame = true;
+
+ H264SpsPpsTracker::FixedBitstream fixed =
+ tracker_.CopyAndFixBitstream(data, &header);
+
+ EXPECT_EQ(fixed.action, H264SpsPpsTracker::kInsert);
+ std::vector<uint8_t> expected;
+ expected.insert(expected.end(), start_code, start_code + sizeof(start_code));
+ expected.insert(expected.end(), {1, 2, 3});
+ EXPECT_THAT(Bitstream(fixed), ElementsAreArray(expected));
+}
+
+TEST_F(TestH264SpsPpsTracker, StapAIncorrectSegmentLength) {
+ uint8_t data[] = {0, 0, 2, 0};
+ H264VideoHeader header;
+ header.h264().packetization_type = kH264StapA;
+ header.is_first_packet_in_frame = true;
+
+ EXPECT_EQ(tracker_.CopyAndFixBitstream(data, &header).action,
+ H264SpsPpsTracker::kDrop);
+}
+
+TEST_F(TestH264SpsPpsTracker, SingleNaluInsertStartCode) {
+ uint8_t data[] = {1, 2, 3};
+ H264VideoHeader header;
+ header.h264().nalus_length = 1;
+
+ H264SpsPpsTracker::FixedBitstream fixed =
+ tracker_.CopyAndFixBitstream(data, &header);
+
+ EXPECT_EQ(fixed.action, H264SpsPpsTracker::kInsert);
+ std::vector<uint8_t> expected;
+ expected.insert(expected.end(), start_code, start_code + sizeof(start_code));
+ expected.insert(expected.end(), {1, 2, 3});
+ EXPECT_THAT(Bitstream(fixed), ElementsAreArray(expected));
+}
+
+TEST_F(TestH264SpsPpsTracker, NoStartCodeInsertedForSubsequentFuAPacket) {
+ std::vector<uint8_t> data = {1, 2, 3};
+ H264VideoHeader header;
+ header.h264().packetization_type = kH264FuA;
+ // Since no NALU begin in this packet the nalus_length is zero.
+ header.h264().nalus_length = 0;
+
+ H264SpsPpsTracker::FixedBitstream fixed =
+ tracker_.CopyAndFixBitstream(data, &header);
+
+ EXPECT_EQ(fixed.action, H264SpsPpsTracker::kInsert);
+ EXPECT_THAT(Bitstream(fixed), ElementsAreArray(data));
+}
+
+TEST_F(TestH264SpsPpsTracker, IdrFirstPacketNoSpsPpsInserted) {
+ std::vector<uint8_t> data = {1, 2, 3};
+ H264VideoHeader header;
+ header.is_first_packet_in_frame = true;
+ AddIdr(&header, 0);
+
+ EXPECT_EQ(tracker_.CopyAndFixBitstream(data, &header).action,
+ H264SpsPpsTracker::kRequestKeyframe);
+}
+
+TEST_F(TestH264SpsPpsTracker, IdrFirstPacketNoPpsInserted) {
+ std::vector<uint8_t> data = {1, 2, 3};
+ H264VideoHeader header;
+ header.is_first_packet_in_frame = true;
+ AddSps(&header, 0, &data);
+ AddIdr(&header, 0);
+
+ EXPECT_EQ(tracker_.CopyAndFixBitstream(data, &header).action,
+ H264SpsPpsTracker::kRequestKeyframe);
+}
+
+TEST_F(TestH264SpsPpsTracker, IdrFirstPacketNoSpsInserted) {
+ std::vector<uint8_t> data = {1, 2, 3};
+ H264VideoHeader header;
+ header.is_first_packet_in_frame = true;
+ AddPps(&header, 0, 0, &data);
+ AddIdr(&header, 0);
+
+ EXPECT_EQ(tracker_.CopyAndFixBitstream(data, &header).action,
+ H264SpsPpsTracker::kRequestKeyframe);
+}
+
+TEST_F(TestH264SpsPpsTracker, SpsPpsPacketThenIdrFirstPacket) {
+ std::vector<uint8_t> data;
+ H264VideoHeader sps_pps_header;
+ // Insert SPS/PPS
+ AddSps(&sps_pps_header, 0, &data);
+ AddPps(&sps_pps_header, 0, 1, &data);
+
+ EXPECT_EQ(tracker_.CopyAndFixBitstream(data, &sps_pps_header).action,
+ H264SpsPpsTracker::kInsert);
+
+ // Insert first packet of the IDR
+ H264VideoHeader idr_header;
+ idr_header.is_first_packet_in_frame = true;
+ AddIdr(&idr_header, 1);
+ data = {1, 2, 3};
+
+ H264SpsPpsTracker::FixedBitstream fixed =
+ tracker_.CopyAndFixBitstream(data, &idr_header);
+ EXPECT_EQ(fixed.action, H264SpsPpsTracker::kInsert);
+
+ std::vector<uint8_t> expected;
+ expected.insert(expected.end(), start_code, start_code + sizeof(start_code));
+ expected.insert(expected.end(), {1, 2, 3});
+ EXPECT_THAT(Bitstream(fixed), ElementsAreArray(expected));
+}
+
+TEST_F(TestH264SpsPpsTracker, SpsPpsIdrInStapA) {
+ std::vector<uint8_t> data;
+ H264VideoHeader header;
+ header.h264().packetization_type = kH264StapA;
+ header.is_first_packet_in_frame = true; // Always true for StapA
+
+ data.insert(data.end(), {0}); // First byte is ignored
+ data.insert(data.end(), {0, 2}); // Length of segment
+ AddSps(&header, 13, &data);
+ data.insert(data.end(), {0, 2}); // Length of segment
+ AddPps(&header, 13, 27, &data);
+ data.insert(data.end(), {0, 5}); // Length of segment
+ AddIdr(&header, 27);
+ data.insert(data.end(), {1, 2, 3, 2, 1});
+
+ H264SpsPpsTracker::FixedBitstream fixed =
+ tracker_.CopyAndFixBitstream(data, &header);
+
+ EXPECT_THAT(fixed.action, H264SpsPpsTracker::kInsert);
+
+ std::vector<uint8_t> expected;
+ expected.insert(expected.end(), start_code, start_code + sizeof(start_code));
+ expected.insert(expected.end(), {H264::NaluType::kSps, 13});
+ expected.insert(expected.end(), start_code, start_code + sizeof(start_code));
+ expected.insert(expected.end(), {H264::NaluType::kPps, 27});
+ expected.insert(expected.end(), start_code, start_code + sizeof(start_code));
+ expected.insert(expected.end(), {1, 2, 3, 2, 1});
+ EXPECT_THAT(Bitstream(fixed), ElementsAreArray(expected));
+}
+
+TEST_F(TestH264SpsPpsTracker, SpsPpsOutOfBand) {
+ constexpr uint8_t kData[] = {1, 2, 3};
+
+ // Generated by "ffmpeg -r 30 -f avfoundation -i "default" out.h264" on macos.
+ // width: 320, height: 240
+ const std::vector<uint8_t> sps(
+ {0x67, 0x7a, 0x00, 0x0d, 0xbc, 0xd9, 0x41, 0x41, 0xfa, 0x10, 0x00, 0x00,
+ 0x03, 0x00, 0x10, 0x00, 0x00, 0x03, 0x03, 0xc0, 0xf1, 0x42, 0x99, 0x60});
+ const std::vector<uint8_t> pps({0x68, 0xeb, 0xe3, 0xcb, 0x22, 0xc0});
+ tracker_.InsertSpsPpsNalus(sps, pps);
+
+ // Insert first packet of the IDR.
+ H264VideoHeader idr_header;
+ idr_header.is_first_packet_in_frame = true;
+ AddIdr(&idr_header, 0);
+ EXPECT_EQ(idr_header.h264().nalus_length, 1u);
+
+ H264SpsPpsTracker::FixedBitstream fixed =
+ tracker_.CopyAndFixBitstream(kData, &idr_header);
+
+ EXPECT_EQ(idr_header.h264().nalus_length, 3u);
+ EXPECT_EQ(idr_header.width, 320u);
+ EXPECT_EQ(idr_header.height, 240u);
+ ExpectSpsPpsIdr(idr_header.h264(), 0, 0);
+}
+
+TEST_F(TestH264SpsPpsTracker, SpsPpsOutOfBandWrongNaluHeader) {
+ constexpr uint8_t kData[] = {1, 2, 3};
+
+ // Generated by "ffmpeg -r 30 -f avfoundation -i "default" out.h264" on macos.
+ // Nalu headers manupilated afterwards.
+ const std::vector<uint8_t> sps(
+ {0xff, 0x7a, 0x00, 0x0d, 0xbc, 0xd9, 0x41, 0x41, 0xfa, 0x10, 0x00, 0x00,
+ 0x03, 0x00, 0x10, 0x00, 0x00, 0x03, 0x03, 0xc0, 0xf1, 0x42, 0x99, 0x60});
+ const std::vector<uint8_t> pps({0xff, 0xeb, 0xe3, 0xcb, 0x22, 0xc0});
+ tracker_.InsertSpsPpsNalus(sps, pps);
+
+ // Insert first packet of the IDR.
+ H264VideoHeader idr_header;
+ idr_header.is_first_packet_in_frame = true;
+ AddIdr(&idr_header, 0);
+
+ EXPECT_EQ(tracker_.CopyAndFixBitstream(kData, &idr_header).action,
+ H264SpsPpsTracker::kRequestKeyframe);
+}
+
+TEST_F(TestH264SpsPpsTracker, SpsPpsOutOfBandIncompleteNalu) {
+ constexpr uint8_t kData[] = {1, 2, 3};
+
+ // Generated by "ffmpeg -r 30 -f avfoundation -i "default" out.h264" on macos.
+ // Nalus damaged afterwards.
+ const std::vector<uint8_t> sps({0x67, 0x7a, 0x00, 0x0d, 0xbc, 0xd9});
+ const std::vector<uint8_t> pps({0x68, 0xeb, 0xe3, 0xcb, 0x22, 0xc0});
+ tracker_.InsertSpsPpsNalus(sps, pps);
+
+ // Insert first packet of the IDR.
+ H264VideoHeader idr_header;
+ idr_header.is_first_packet_in_frame = true;
+ AddIdr(&idr_header, 0);
+
+ EXPECT_EQ(tracker_.CopyAndFixBitstream(kData, &idr_header).action,
+ H264SpsPpsTracker::kRequestKeyframe);
+}
+
+TEST_F(TestH264SpsPpsTracker, SaveRestoreWidthHeight) {
+ std::vector<uint8_t> data;
+
+ // Insert an SPS/PPS packet with width/height and make sure
+ // that information is set on the first IDR packet.
+ H264VideoHeader sps_pps_header;
+ AddSps(&sps_pps_header, 0, &data);
+ AddPps(&sps_pps_header, 0, 1, &data);
+ sps_pps_header.width = 320;
+ sps_pps_header.height = 240;
+
+ EXPECT_EQ(tracker_.CopyAndFixBitstream(data, &sps_pps_header).action,
+ H264SpsPpsTracker::kInsert);
+
+ H264VideoHeader idr_header;
+ idr_header.is_first_packet_in_frame = true;
+ AddIdr(&idr_header, 1);
+ data.insert(data.end(), {1, 2, 3});
+
+ EXPECT_EQ(tracker_.CopyAndFixBitstream(data, &idr_header).action,
+ H264SpsPpsTracker::kInsert);
+
+ EXPECT_EQ(idr_header.width, 320);
+ EXPECT_EQ(idr_header.height, 240);
+}
+
+} // namespace video_coding
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/histogram.cc b/third_party/libwebrtc/modules/video_coding/histogram.cc
new file mode 100644
index 0000000000..4e90b19eec
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/histogram.cc
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/histogram.h"
+
+#include <algorithm>
+
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+namespace video_coding {
+Histogram::Histogram(size_t num_buckets, size_t max_num_values) {
+ RTC_DCHECK_GT(num_buckets, 0);
+ RTC_DCHECK_GT(max_num_values, 0);
+ buckets_.resize(num_buckets);
+ values_.reserve(max_num_values);
+ index_ = 0;
+}
+
+void Histogram::Add(size_t value) {
+ value = std::min<size_t>(value, buckets_.size() - 1);
+ if (index_ < values_.size()) {
+ --buckets_[values_[index_]];
+ RTC_DCHECK_LT(values_[index_], buckets_.size());
+ values_[index_] = value;
+ } else {
+ values_.emplace_back(value);
+ }
+
+ ++buckets_[value];
+ index_ = (index_ + 1) % values_.capacity();
+}
+
+size_t Histogram::InverseCdf(float probability) const {
+ RTC_DCHECK_GE(probability, 0.f);
+ RTC_DCHECK_LE(probability, 1.f);
+ RTC_DCHECK_GT(values_.size(), 0ul);
+
+ size_t bucket = 0;
+ float accumulated_probability = 0;
+ while (accumulated_probability < probability && bucket < buckets_.size()) {
+ accumulated_probability +=
+ static_cast<float>(buckets_[bucket]) / values_.size();
+ ++bucket;
+ }
+ return bucket;
+}
+
+size_t Histogram::NumValues() const {
+ return values_.size();
+}
+
+} // namespace video_coding
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/histogram.h b/third_party/libwebrtc/modules/video_coding/histogram.h
new file mode 100644
index 0000000000..aa8d44d80f
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/histogram.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_HISTOGRAM_H_
+#define MODULES_VIDEO_CODING_HISTOGRAM_H_
+
+#include <cstddef>
+#include <vector>
+
+namespace webrtc {
+namespace video_coding {
+class Histogram {
+ public:
+ // A discrete histogram where every bucket with range [0, num_buckets).
+ // Values greater or equal to num_buckets will be placed in the last bucket.
+ Histogram(size_t num_buckets, size_t max_num_values);
+
+ // Add a value to the histogram. If there already is max_num_values in the
+ // histogram then the oldest value will be replaced with the new value.
+ void Add(size_t value);
+
+ // Calculates how many buckets have to be summed in order to accumulate at
+ // least the given probability.
+ size_t InverseCdf(float probability) const;
+
+ // How many values that make up this histogram.
+ size_t NumValues() const;
+
+ private:
+ // A circular buffer that holds the values that make up the histogram.
+ std::vector<size_t> values_;
+ std::vector<size_t> buckets_;
+ size_t index_;
+};
+
+} // namespace video_coding
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_HISTOGRAM_H_
diff --git a/third_party/libwebrtc/modules/video_coding/histogram_unittest.cc b/third_party/libwebrtc/modules/video_coding/histogram_unittest.cc
new file mode 100644
index 0000000000..3690a39398
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/histogram_unittest.cc
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/histogram.h"
+
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace video_coding {
+
+class TestHistogram : public ::testing::Test {
+ protected:
+ TestHistogram() : histogram_(5, 10) {}
+ Histogram histogram_;
+};
+
+TEST_F(TestHistogram, NumValues) {
+ EXPECT_EQ(0ul, histogram_.NumValues());
+ histogram_.Add(0);
+ EXPECT_EQ(1ul, histogram_.NumValues());
+}
+
+TEST_F(TestHistogram, InverseCdf) {
+ histogram_.Add(0);
+ histogram_.Add(1);
+ histogram_.Add(2);
+ histogram_.Add(3);
+ histogram_.Add(4);
+ EXPECT_EQ(5ul, histogram_.NumValues());
+ EXPECT_EQ(1ul, histogram_.InverseCdf(0.2f));
+ EXPECT_EQ(2ul, histogram_.InverseCdf(0.2000001f));
+ EXPECT_EQ(4ul, histogram_.InverseCdf(0.8f));
+
+ histogram_.Add(0);
+ EXPECT_EQ(6ul, histogram_.NumValues());
+ EXPECT_EQ(1ul, histogram_.InverseCdf(0.2f));
+ EXPECT_EQ(1ul, histogram_.InverseCdf(0.2000001f));
+}
+
+TEST_F(TestHistogram, ReplaceOldValues) {
+ histogram_.Add(0);
+ histogram_.Add(0);
+ histogram_.Add(0);
+ histogram_.Add(0);
+ histogram_.Add(0);
+ histogram_.Add(1);
+ histogram_.Add(1);
+ histogram_.Add(1);
+ histogram_.Add(1);
+ histogram_.Add(1);
+ EXPECT_EQ(10ul, histogram_.NumValues());
+ EXPECT_EQ(1ul, histogram_.InverseCdf(0.5f));
+ EXPECT_EQ(2ul, histogram_.InverseCdf(0.5000001f));
+
+ histogram_.Add(4);
+ histogram_.Add(4);
+ histogram_.Add(4);
+ histogram_.Add(4);
+ EXPECT_EQ(10ul, histogram_.NumValues());
+ EXPECT_EQ(1ul, histogram_.InverseCdf(0.1f));
+ EXPECT_EQ(2ul, histogram_.InverseCdf(0.5f));
+
+ histogram_.Add(20);
+ EXPECT_EQ(10ul, histogram_.NumValues());
+ EXPECT_EQ(2ul, histogram_.InverseCdf(0.5f));
+ EXPECT_EQ(5ul, histogram_.InverseCdf(0.5000001f));
+}
+
+} // namespace video_coding
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/include/video_codec_initializer.h b/third_party/libwebrtc/modules/video_coding/include/video_codec_initializer.h
new file mode 100644
index 0000000000..270c4dbcd1
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/include/video_codec_initializer.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_INCLUDE_VIDEO_CODEC_INITIALIZER_H_
+#define MODULES_VIDEO_CODING_INCLUDE_VIDEO_CODEC_INITIALIZER_H_
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "video/config/video_encoder_config.h"
+
+namespace webrtc {
+
+class VideoBitrateAllocator;
+class VideoCodec;
+
+class VideoCodecInitializer {
+ public:
+ // Takes a VideoEncoderConfig and the VideoStream configuration and
+ // translates them into the old school VideoCodec type.
+ // It also creates a VideoBitrateAllocator instance, suitable for the codec
+ // type used. For instance, VP8 will create an allocator than can handle
+ // simulcast and temporal layering.
+ // GetBitrateAllocator is called implicitly from here, no need to call again.
+ static bool SetupCodec(const VideoEncoderConfig& config,
+ const std::vector<VideoStream>& streams,
+ VideoCodec* codec);
+
+ private:
+ static VideoCodec VideoEncoderConfigToVideoCodec(
+ const VideoEncoderConfig& config,
+ const std::vector<VideoStream>& streams);
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_INCLUDE_VIDEO_CODEC_INITIALIZER_H_
diff --git a/third_party/libwebrtc/modules/video_coding/include/video_codec_interface.cc b/third_party/libwebrtc/modules/video_coding/include/video_codec_interface.cc
new file mode 100644
index 0000000000..bd033b6c57
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/include/video_codec_interface.cc
@@ -0,0 +1,20 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include "modules/video_coding/include/video_codec_interface.h"
+
+namespace webrtc {
+CodecSpecificInfo::CodecSpecificInfo() : codecType(kVideoCodecGeneric) {
+ memset(&codecSpecific, 0, sizeof(codecSpecific));
+}
+
+CodecSpecificInfo::CodecSpecificInfo(const CodecSpecificInfo&) = default;
+CodecSpecificInfo::~CodecSpecificInfo() = default;
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/include/video_codec_interface.h b/third_party/libwebrtc/modules/video_coding/include/video_codec_interface.h
new file mode 100644
index 0000000000..46ae0d29e1
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/include/video_codec_interface.h
@@ -0,0 +1,121 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_INCLUDE_VIDEO_CODEC_INTERFACE_H_
+#define MODULES_VIDEO_CODING_INCLUDE_VIDEO_CODEC_INTERFACE_H_
+
+#include <vector>
+
+#include "absl/base/attributes.h"
+#include "absl/types/optional.h"
+#include "api/video/video_frame.h"
+#include "api/video_codecs/scalability_mode.h"
+#include "api/video_codecs/video_decoder.h"
+#include "api/video_codecs/video_encoder.h"
+#include "common_video/generic_frame_descriptor/generic_frame_info.h"
+#include "modules/video_coding/codecs/h264/include/h264_globals.h"
+#include "modules/video_coding/codecs/vp9/include/vp9_globals.h"
+#include "modules/video_coding/include/video_error_codes.h"
+#include "rtc_base/system/rtc_export.h"
+
+namespace webrtc {
+
+// Note: If any pointers are added to this struct, it must be fitted
+// with a copy-constructor. See below.
+// Hack alert - the code assumes that thisstruct is memset when constructed.
+struct CodecSpecificInfoVP8 {
+ bool nonReference;
+ uint8_t temporalIdx;
+ bool layerSync;
+ int8_t keyIdx; // Negative value to skip keyIdx.
+
+ // Used to generate the list of dependency frames.
+ // `referencedBuffers` and `updatedBuffers` contain buffer IDs.
+ // Note that the buffer IDs here have a one-to-one mapping with the actual
+ // codec buffers, but the exact mapping (i.e. whether 0 refers to Last,
+ // to Golden or to Arf) is not pre-determined.
+ // More references may be specified than are strictly necessary, but not less.
+ // TODO(bugs.webrtc.org/10242): Remove `useExplicitDependencies` once all
+ // encoder-wrappers are updated.
+ bool useExplicitDependencies;
+ static constexpr size_t kBuffersCount = 3;
+ size_t referencedBuffers[kBuffersCount];
+ size_t referencedBuffersCount;
+ size_t updatedBuffers[kBuffersCount];
+ size_t updatedBuffersCount;
+};
+static_assert(std::is_pod<CodecSpecificInfoVP8>::value, "");
+
+// Hack alert - the code assumes that thisstruct is memset when constructed.
+struct CodecSpecificInfoVP9 {
+ bool first_frame_in_picture; // First frame, increment picture_id.
+ bool inter_pic_predicted; // This layer frame is dependent on previously
+ // coded frame(s).
+ bool flexible_mode;
+ bool ss_data_available;
+ bool non_ref_for_inter_layer_pred;
+
+ uint8_t temporal_idx;
+ bool temporal_up_switch;
+ bool inter_layer_predicted; // Frame is dependent on directly lower spatial
+ // layer frame.
+ uint8_t gof_idx;
+
+ // SS data.
+ size_t num_spatial_layers; // Always populated.
+ size_t first_active_layer;
+ bool spatial_layer_resolution_present;
+ uint16_t width[kMaxVp9NumberOfSpatialLayers];
+ uint16_t height[kMaxVp9NumberOfSpatialLayers];
+ GofInfoVP9 gof;
+
+ // Frame reference data.
+ uint8_t num_ref_pics;
+ uint8_t p_diff[kMaxVp9RefPics];
+
+ ABSL_DEPRECATED("") bool end_of_picture;
+};
+static_assert(std::is_pod<CodecSpecificInfoVP9>::value, "");
+
+// Hack alert - the code assumes that thisstruct is memset when constructed.
+struct CodecSpecificInfoH264 {
+ H264PacketizationMode packetization_mode;
+ uint8_t temporal_idx;
+ bool base_layer_sync;
+ bool idr_frame;
+};
+static_assert(std::is_pod<CodecSpecificInfoH264>::value, "");
+
+union CodecSpecificInfoUnion {
+ CodecSpecificInfoVP8 VP8;
+ CodecSpecificInfoVP9 VP9;
+ CodecSpecificInfoH264 H264;
+};
+static_assert(std::is_pod<CodecSpecificInfoUnion>::value, "");
+
+// Note: if any pointers are added to this struct or its sub-structs, it
+// must be fitted with a copy-constructor. This is because it is copied
+// in the copy-constructor of VCMEncodedFrame.
+struct RTC_EXPORT CodecSpecificInfo {
+ CodecSpecificInfo();
+ CodecSpecificInfo(const CodecSpecificInfo&);
+ ~CodecSpecificInfo();
+
+ VideoCodecType codecType;
+ CodecSpecificInfoUnion codecSpecific;
+ bool end_of_picture = true;
+ absl::optional<GenericFrameInfo> generic_frame_info;
+ absl::optional<FrameDependencyStructure> template_structure;
+ absl::optional<ScalabilityMode> scalability_mode;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_INCLUDE_VIDEO_CODEC_INTERFACE_H_
diff --git a/third_party/libwebrtc/modules/video_coding/include/video_coding.h b/third_party/libwebrtc/modules/video_coding/include/video_coding.h
new file mode 100644
index 0000000000..ee9326d9fc
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/include/video_coding.h
@@ -0,0 +1,150 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_INCLUDE_VIDEO_CODING_H_
+#define MODULES_VIDEO_CODING_INCLUDE_VIDEO_CODING_H_
+
+#include "api/field_trials_view.h"
+#include "api/video/video_frame.h"
+#include "api/video_codecs/video_decoder.h"
+#include "modules/rtp_rtcp/source/rtp_video_header.h"
+#include "modules/video_coding/include/video_coding_defines.h"
+
+namespace webrtc {
+
+class Clock;
+class EncodedImageCallback;
+class VideoDecoder;
+class VideoEncoder;
+struct CodecSpecificInfo;
+
+class VideoCodingModule {
+ public:
+ // DEPRECATED.
+ static VideoCodingModule* Create(
+ Clock* clock,
+ const FieldTrialsView* field_trials = nullptr);
+
+ virtual ~VideoCodingModule() = default;
+
+ /*
+ * Receiver
+ */
+
+ // Register possible receive codecs, can be called multiple times for
+ // different codecs.
+ // The module will automatically switch between registered codecs depending on
+ // the
+ // payload type of incoming frames. The actual decoder will be created when
+ // needed.
+ //
+ // Input:
+ // - payload_type : RTP payload type
+ // - settings : Settings for the decoder to be registered.
+ //
+ virtual void RegisterReceiveCodec(uint8_t payload_type,
+ const VideoDecoder::Settings& settings) = 0;
+
+ // Register an external decoder object.
+ //
+ // Input:
+ // - externalDecoder : Decoder object to be used for decoding frames.
+ // - payloadType : The payload type which this decoder is bound to.
+ virtual void RegisterExternalDecoder(VideoDecoder* externalDecoder,
+ uint8_t payloadType) = 0;
+
+ // Register a receive callback. Will be called whenever there is a new frame
+ // ready
+ // for rendering.
+ //
+ // Input:
+ // - receiveCallback : The callback object to be used by the
+ // module when a
+ // frame is ready for rendering.
+ // De-register with a NULL pointer.
+ //
+ // Return value : VCM_OK, on success.
+ // < 0, on error.
+ virtual int32_t RegisterReceiveCallback(
+ VCMReceiveCallback* receiveCallback) = 0;
+
+ // Register a frame type request callback. This callback will be called when
+ // the
+ // module needs to request specific frame types from the send side.
+ //
+ // Input:
+ // - frameTypeCallback : The callback object to be used by the
+ // module when
+ // requesting a specific type of frame from
+ // the send side.
+ // De-register with a NULL pointer.
+ //
+ // Return value : VCM_OK, on success.
+ // < 0, on error.
+ virtual int32_t RegisterFrameTypeCallback(
+ VCMFrameTypeCallback* frameTypeCallback) = 0;
+
+ // Registers a callback which is called whenever the receive side of the VCM
+ // encounters holes in the packet sequence and needs packets to be
+ // retransmitted.
+ //
+ // Input:
+ // - callback : The callback to be registered in the VCM.
+ //
+ // Return value : VCM_OK, on success.
+ // <0, on error.
+ virtual int32_t RegisterPacketRequestCallback(
+ VCMPacketRequestCallback* callback) = 0;
+
+ // Waits for the next frame in the jitter buffer to become complete
+ // (waits no longer than maxWaitTimeMs), then passes it to the decoder for
+ // decoding.
+ // Should be called as often as possible to get the most out of the decoder.
+ //
+ // Return value : VCM_OK, on success.
+ // < 0, on error.
+ virtual int32_t Decode(uint16_t maxWaitTimeMs = 200) = 0;
+
+ // Insert a parsed packet into the receiver side of the module. Will be placed
+ // in the
+ // jitter buffer waiting for the frame to become complete. Returns as soon as
+ // the packet
+ // has been placed in the jitter buffer.
+ //
+ // Input:
+ // - incomingPayload : Payload of the packet.
+ // - payloadLength : Length of the payload.
+ // - rtp_header : The parsed RTP header.
+ // - video_header : The relevant extensions and payload header.
+ //
+ // Return value : VCM_OK, on success.
+ // < 0, on error.
+ virtual int32_t IncomingPacket(const uint8_t* incomingPayload,
+ size_t payloadLength,
+ const RTPHeader& rtp_header,
+ const RTPVideoHeader& video_header) = 0;
+
+ // Sets the maximum number of sequence numbers that we are allowed to NACK
+ // and the oldest sequence number that we will consider to NACK. If a
+ // sequence number older than `max_packet_age_to_nack` is missing
+ // a key frame will be requested. A key frame will also be requested if the
+ // time of incomplete or non-continuous frames in the jitter buffer is above
+ // `max_incomplete_time_ms`.
+ virtual void SetNackSettings(size_t max_nack_list_size,
+ int max_packet_age_to_nack,
+ int max_incomplete_time_ms) = 0;
+
+ // Runs delayed tasks. Expected to be called periodically.
+ virtual void Process() = 0;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_INCLUDE_VIDEO_CODING_H_
diff --git a/third_party/libwebrtc/modules/video_coding/include/video_coding_defines.h b/third_party/libwebrtc/modules/video_coding/include/video_coding_defines.h
new file mode 100644
index 0000000000..bf98d5e668
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/include/video_coding_defines.h
@@ -0,0 +1,121 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_INCLUDE_VIDEO_CODING_DEFINES_H_
+#define MODULES_VIDEO_CODING_INCLUDE_VIDEO_CODING_DEFINES_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "absl/types/optional.h"
+#include "api/video/video_content_type.h"
+#include "api/video/video_frame.h"
+#include "api/video/video_timing.h"
+#include "api/video_codecs/video_decoder.h"
+
+namespace webrtc {
+
+// Error codes
+#define VCM_FRAME_NOT_READY 3
+#define VCM_MISSING_CALLBACK 1
+#define VCM_OK 0
+#define VCM_GENERAL_ERROR -1
+#define VCM_PARAMETER_ERROR -4
+#define VCM_NO_CODEC_REGISTERED -8
+#define VCM_JITTER_BUFFER_ERROR -9
+
+enum {
+ // Timing frames settings. Timing frames are sent every
+ // `kDefaultTimingFramesDelayMs`, or if the frame is at least
+ // `kDefaultOutliserFrameSizePercent` in size of average frame.
+ kDefaultTimingFramesDelayMs = 200,
+ kDefaultOutlierFrameSizePercent = 500,
+ // Maximum number of frames for what we store encode start timing information.
+ kMaxEncodeStartTimeListSize = 150,
+};
+
+enum VCMVideoProtection {
+ kProtectionNack,
+ kProtectionNackFEC,
+};
+
+// Callback class used for passing decoded frames which are ready to be
+// rendered.
+class VCMReceiveCallback {
+ public:
+ virtual int32_t FrameToRender(VideoFrame& videoFrame, // NOLINT
+ absl::optional<uint8_t> qp,
+ TimeDelta decode_time,
+ VideoContentType content_type) = 0;
+
+ virtual void OnDroppedFrames(uint32_t frames_dropped);
+
+ // Called when the current receive codec changes.
+ virtual void OnIncomingPayloadType(int payload_type);
+ virtual void OnDecoderInfoChanged(
+ const VideoDecoder::DecoderInfo& decoder_info);
+
+ protected:
+ virtual ~VCMReceiveCallback() {}
+};
+
+// Callback class used for informing the user of the incoming bit rate and frame
+// rate.
+class VCMReceiveStatisticsCallback {
+ public:
+ virtual void OnCompleteFrame(bool is_keyframe,
+ size_t size_bytes,
+ VideoContentType content_type) = 0;
+
+ virtual void OnDroppedFrames(uint32_t frames_dropped) = 0;
+
+ virtual void OnDiscardedPackets(uint32_t packets_discarded) = 0;
+
+ virtual void OnFrameBufferTimingsUpdated(int max_decode_ms,
+ int current_delay_ms,
+ int target_delay_ms,
+ int jitter_buffer_ms,
+ int min_playout_delay_ms,
+ int render_delay_ms) = 0;
+
+ virtual void OnTimingFrameInfoUpdated(const TimingFrameInfo& info) = 0;
+
+ protected:
+ virtual ~VCMReceiveStatisticsCallback() {}
+};
+
+// Callback class used for telling the user about what frame type needed to
+// continue decoding.
+// Typically a key frame when the stream has been corrupted in some way.
+class VCMFrameTypeCallback {
+ public:
+ virtual int32_t RequestKeyFrame() = 0;
+
+ protected:
+ virtual ~VCMFrameTypeCallback() {}
+};
+
+// Callback class used for telling the user about which packet sequence numbers
+// are currently
+// missing and need to be resent.
+// TODO(philipel): Deprecate VCMPacketRequestCallback
+// and use NackSender instead.
+class VCMPacketRequestCallback {
+ public:
+ virtual int32_t ResendPackets(const uint16_t* sequenceNumbers,
+ uint16_t length) = 0;
+
+ protected:
+ virtual ~VCMPacketRequestCallback() {}
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_INCLUDE_VIDEO_CODING_DEFINES_H_
diff --git a/third_party/libwebrtc/modules/video_coding/include/video_error_codes.h b/third_party/libwebrtc/modules/video_coding/include/video_error_codes.h
new file mode 100644
index 0000000000..17146ce205
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/include/video_error_codes.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_INCLUDE_VIDEO_ERROR_CODES_H_
+#define MODULES_VIDEO_CODING_INCLUDE_VIDEO_ERROR_CODES_H_
+
+// NOTE: in sync with video_coding_module_defines.h
+
+// Define return values
+
+#define WEBRTC_VIDEO_CODEC_TARGET_BITRATE_OVERSHOOT 5
+#define WEBRTC_VIDEO_CODEC_OK_REQUEST_KEYFRAME 4
+#define WEBRTC_VIDEO_CODEC_NO_OUTPUT 1
+#define WEBRTC_VIDEO_CODEC_OK 0
+#define WEBRTC_VIDEO_CODEC_ERROR -1
+#define WEBRTC_VIDEO_CODEC_MEMORY -3
+#define WEBRTC_VIDEO_CODEC_ERR_PARAMETER -4
+#define WEBRTC_VIDEO_CODEC_TIMEOUT -6
+#define WEBRTC_VIDEO_CODEC_UNINITIALIZED -7
+#define WEBRTC_VIDEO_CODEC_FALLBACK_SOFTWARE -13
+#define WEBRTC_VIDEO_CODEC_ERR_SIMULCAST_PARAMETERS_NOT_SUPPORTED -15
+#define WEBRTC_VIDEO_CODEC_ENCODER_FAILURE -16
+
+#endif // MODULES_VIDEO_CODING_INCLUDE_VIDEO_ERROR_CODES_H_
diff --git a/third_party/libwebrtc/modules/video_coding/internal_defines.h b/third_party/libwebrtc/modules/video_coding/internal_defines.h
new file mode 100644
index 0000000000..f753f200e1
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/internal_defines.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_INTERNAL_DEFINES_H_
+#define MODULES_VIDEO_CODING_INTERNAL_DEFINES_H_
+
+namespace webrtc {
+
+#define VCM_MAX(a, b) (((a) > (b)) ? (a) : (b))
+#define VCM_MIN(a, b) (((a) < (b)) ? (a) : (b))
+
+#define VCM_FLUSH_INDICATOR 4
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_INTERNAL_DEFINES_H_
diff --git a/third_party/libwebrtc/modules/video_coding/jitter_buffer.cc b/third_party/libwebrtc/modules/video_coding/jitter_buffer.cc
new file mode 100644
index 0000000000..39553c9f3f
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/jitter_buffer.cc
@@ -0,0 +1,892 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include "modules/video_coding/jitter_buffer.h"
+
+#include <algorithm>
+#include <limits>
+#include <utility>
+
+#include "api/units/timestamp.h"
+#include "modules/video_coding/frame_buffer.h"
+#include "modules/video_coding/include/video_coding.h"
+#include "modules/video_coding/internal_defines.h"
+#include "modules/video_coding/jitter_buffer_common.h"
+#include "modules/video_coding/packet.h"
+#include "modules/video_coding/timing/inter_frame_delay.h"
+#include "modules/video_coding/timing/jitter_estimator.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "system_wrappers/include/clock.h"
+
+namespace webrtc {
+// Use this rtt if no value has been reported.
+static const int64_t kDefaultRtt = 200;
+
+typedef std::pair<uint32_t, VCMFrameBuffer*> FrameListPair;
+
+bool IsKeyFrame(FrameListPair pair) {
+ return pair.second->FrameType() == VideoFrameType::kVideoFrameKey;
+}
+
+bool HasNonEmptyState(FrameListPair pair) {
+ return pair.second->GetState() != kStateEmpty;
+}
+
+void FrameList::InsertFrame(VCMFrameBuffer* frame) {
+ insert(rbegin().base(), FrameListPair(frame->Timestamp(), frame));
+}
+
+VCMFrameBuffer* FrameList::PopFrame(uint32_t timestamp) {
+ FrameList::iterator it = find(timestamp);
+ if (it == end())
+ return NULL;
+ VCMFrameBuffer* frame = it->second;
+ erase(it);
+ return frame;
+}
+
+VCMFrameBuffer* FrameList::Front() const {
+ return begin()->second;
+}
+
+VCMFrameBuffer* FrameList::Back() const {
+ return rbegin()->second;
+}
+
+int FrameList::RecycleFramesUntilKeyFrame(FrameList::iterator* key_frame_it,
+ UnorderedFrameList* free_frames) {
+ int drop_count = 0;
+ FrameList::iterator it = begin();
+ while (!empty()) {
+ // Throw at least one frame.
+ it->second->Reset();
+ free_frames->push_back(it->second);
+ erase(it++);
+ ++drop_count;
+ if (it != end() &&
+ it->second->FrameType() == VideoFrameType::kVideoFrameKey) {
+ *key_frame_it = it;
+ return drop_count;
+ }
+ }
+ *key_frame_it = end();
+ return drop_count;
+}
+
+void FrameList::CleanUpOldOrEmptyFrames(VCMDecodingState* decoding_state,
+ UnorderedFrameList* free_frames) {
+ while (!empty()) {
+ VCMFrameBuffer* oldest_frame = Front();
+ bool remove_frame = false;
+ if (oldest_frame->GetState() == kStateEmpty && size() > 1) {
+ // This frame is empty, try to update the last decoded state and drop it
+ // if successful.
+ remove_frame = decoding_state->UpdateEmptyFrame(oldest_frame);
+ } else {
+ remove_frame = decoding_state->IsOldFrame(oldest_frame);
+ }
+ if (!remove_frame) {
+ break;
+ }
+ free_frames->push_back(oldest_frame);
+ erase(begin());
+ }
+}
+
+void FrameList::Reset(UnorderedFrameList* free_frames) {
+ while (!empty()) {
+ begin()->second->Reset();
+ free_frames->push_back(begin()->second);
+ erase(begin());
+ }
+}
+
+VCMJitterBuffer::VCMJitterBuffer(Clock* clock,
+ std::unique_ptr<EventWrapper> event,
+ const FieldTrialsView& field_trials)
+ : clock_(clock),
+ running_(false),
+ frame_event_(std::move(event)),
+ max_number_of_frames_(kStartNumberOfFrames),
+ free_frames_(),
+ decodable_frames_(),
+ incomplete_frames_(),
+ last_decoded_state_(),
+ first_packet_since_reset_(true),
+ num_consecutive_old_packets_(0),
+ num_packets_(0),
+ num_duplicated_packets_(0),
+ jitter_estimate_(clock, field_trials),
+ missing_sequence_numbers_(SequenceNumberLessThan()),
+ latest_received_sequence_number_(0),
+ max_nack_list_size_(0),
+ max_packet_age_to_nack_(0),
+ max_incomplete_time_ms_(0),
+ average_packets_per_frame_(0.0f),
+ frame_counter_(0) {
+ for (int i = 0; i < kStartNumberOfFrames; i++)
+ free_frames_.push_back(new VCMFrameBuffer());
+}
+
+VCMJitterBuffer::~VCMJitterBuffer() {
+ Stop();
+ for (UnorderedFrameList::iterator it = free_frames_.begin();
+ it != free_frames_.end(); ++it) {
+ delete *it;
+ }
+ for (FrameList::iterator it = incomplete_frames_.begin();
+ it != incomplete_frames_.end(); ++it) {
+ delete it->second;
+ }
+ for (FrameList::iterator it = decodable_frames_.begin();
+ it != decodable_frames_.end(); ++it) {
+ delete it->second;
+ }
+}
+
+void VCMJitterBuffer::Start() {
+ MutexLock lock(&mutex_);
+ running_ = true;
+
+ num_consecutive_old_packets_ = 0;
+ num_packets_ = 0;
+ num_duplicated_packets_ = 0;
+
+ // Start in a non-signaled state.
+ waiting_for_completion_.frame_size = 0;
+ waiting_for_completion_.timestamp = 0;
+ waiting_for_completion_.latest_packet_time = -1;
+ first_packet_since_reset_ = true;
+ last_decoded_state_.Reset();
+
+ decodable_frames_.Reset(&free_frames_);
+ incomplete_frames_.Reset(&free_frames_);
+}
+
+void VCMJitterBuffer::Stop() {
+ MutexLock lock(&mutex_);
+ running_ = false;
+ last_decoded_state_.Reset();
+
+ // Make sure we wake up any threads waiting on these events.
+ frame_event_->Set();
+}
+
+bool VCMJitterBuffer::Running() const {
+ MutexLock lock(&mutex_);
+ return running_;
+}
+
+void VCMJitterBuffer::Flush() {
+ MutexLock lock(&mutex_);
+ decodable_frames_.Reset(&free_frames_);
+ incomplete_frames_.Reset(&free_frames_);
+ last_decoded_state_.Reset(); // TODO(mikhal): sync reset.
+ num_consecutive_old_packets_ = 0;
+ // Also reset the jitter and delay estimates
+ jitter_estimate_.Reset();
+ inter_frame_delay_.Reset();
+ waiting_for_completion_.frame_size = 0;
+ waiting_for_completion_.timestamp = 0;
+ waiting_for_completion_.latest_packet_time = -1;
+ first_packet_since_reset_ = true;
+ missing_sequence_numbers_.clear();
+}
+
+int VCMJitterBuffer::num_packets() const {
+ MutexLock lock(&mutex_);
+ return num_packets_;
+}
+
+int VCMJitterBuffer::num_duplicated_packets() const {
+ MutexLock lock(&mutex_);
+ return num_duplicated_packets_;
+}
+
+// Returns immediately or a `max_wait_time_ms` ms event hang waiting for a
+// complete frame, `max_wait_time_ms` decided by caller.
+VCMEncodedFrame* VCMJitterBuffer::NextCompleteFrame(uint32_t max_wait_time_ms) {
+ MutexLock lock(&mutex_);
+ if (!running_) {
+ return nullptr;
+ }
+ CleanUpOldOrEmptyFrames();
+
+ if (decodable_frames_.empty() ||
+ decodable_frames_.Front()->GetState() != kStateComplete) {
+ const int64_t end_wait_time_ms =
+ clock_->TimeInMilliseconds() + max_wait_time_ms;
+ int64_t wait_time_ms = max_wait_time_ms;
+ while (wait_time_ms > 0) {
+ mutex_.Unlock();
+ const EventTypeWrapper ret =
+ frame_event_->Wait(static_cast<uint32_t>(wait_time_ms));
+ mutex_.Lock();
+ if (ret == kEventSignaled) {
+ // Are we shutting down the jitter buffer?
+ if (!running_) {
+ return nullptr;
+ }
+ // Finding oldest frame ready for decoder.
+ CleanUpOldOrEmptyFrames();
+ if (decodable_frames_.empty() ||
+ decodable_frames_.Front()->GetState() != kStateComplete) {
+ wait_time_ms = end_wait_time_ms - clock_->TimeInMilliseconds();
+ } else {
+ break;
+ }
+ } else {
+ break;
+ }
+ }
+ }
+ if (decodable_frames_.empty() ||
+ decodable_frames_.Front()->GetState() != kStateComplete) {
+ return nullptr;
+ }
+ return decodable_frames_.Front();
+}
+
+VCMEncodedFrame* VCMJitterBuffer::ExtractAndSetDecode(uint32_t timestamp) {
+ MutexLock lock(&mutex_);
+ if (!running_) {
+ return NULL;
+ }
+ // Extract the frame with the desired timestamp.
+ VCMFrameBuffer* frame = decodable_frames_.PopFrame(timestamp);
+ bool continuous = true;
+ if (!frame) {
+ frame = incomplete_frames_.PopFrame(timestamp);
+ if (frame)
+ continuous = last_decoded_state_.ContinuousFrame(frame);
+ else
+ return NULL;
+ }
+ // Frame pulled out from jitter buffer, update the jitter estimate.
+ const bool retransmitted = (frame->GetNackCount() > 0);
+ if (retransmitted) {
+ jitter_estimate_.FrameNacked();
+ } else if (frame->size() > 0) {
+ // Ignore retransmitted and empty frames.
+ if (waiting_for_completion_.latest_packet_time >= 0) {
+ UpdateJitterEstimate(waiting_for_completion_, true);
+ }
+ if (frame->GetState() == kStateComplete) {
+ UpdateJitterEstimate(*frame, false);
+ } else {
+ // Wait for this one to get complete.
+ waiting_for_completion_.frame_size = frame->size();
+ waiting_for_completion_.latest_packet_time = frame->LatestPacketTimeMs();
+ waiting_for_completion_.timestamp = frame->Timestamp();
+ }
+ }
+
+ // The state must be changed to decoding before cleaning up zero sized
+ // frames to avoid empty frames being cleaned up and then given to the
+ // decoder. Propagates the missing_frame bit.
+ frame->PrepareForDecode(continuous);
+
+ // We have a frame - update the last decoded state and nack list.
+ last_decoded_state_.SetState(frame);
+ DropPacketsFromNackList(last_decoded_state_.sequence_num());
+
+ UpdateAveragePacketsPerFrame(frame->NumPackets());
+
+ return frame;
+}
+
+// Release frame when done with decoding. Should never be used to release
+// frames from within the jitter buffer.
+void VCMJitterBuffer::ReleaseFrame(VCMEncodedFrame* frame) {
+ RTC_CHECK(frame != nullptr);
+ MutexLock lock(&mutex_);
+ VCMFrameBuffer* frame_buffer = static_cast<VCMFrameBuffer*>(frame);
+ RecycleFrameBuffer(frame_buffer);
+}
+
+// Gets frame to use for this timestamp. If no match, get empty frame.
+VCMFrameBufferEnum VCMJitterBuffer::GetFrame(const VCMPacket& packet,
+ VCMFrameBuffer** frame,
+ FrameList** frame_list) {
+ *frame = incomplete_frames_.PopFrame(packet.timestamp);
+ if (*frame != NULL) {
+ *frame_list = &incomplete_frames_;
+ return kNoError;
+ }
+ *frame = decodable_frames_.PopFrame(packet.timestamp);
+ if (*frame != NULL) {
+ *frame_list = &decodable_frames_;
+ return kNoError;
+ }
+
+ *frame_list = NULL;
+ // No match, return empty frame.
+ *frame = GetEmptyFrame();
+ if (*frame == NULL) {
+ // No free frame! Try to reclaim some...
+ RTC_LOG(LS_WARNING) << "Unable to get empty frame; Recycling.";
+ bool found_key_frame = RecycleFramesUntilKeyFrame();
+ *frame = GetEmptyFrame();
+ RTC_CHECK(*frame);
+ if (!found_key_frame) {
+ RecycleFrameBuffer(*frame);
+ return kFlushIndicator;
+ }
+ }
+ (*frame)->Reset();
+ return kNoError;
+}
+
+int64_t VCMJitterBuffer::LastPacketTime(const VCMEncodedFrame* frame,
+ bool* retransmitted) const {
+ RTC_DCHECK(retransmitted);
+ MutexLock lock(&mutex_);
+ const VCMFrameBuffer* frame_buffer =
+ static_cast<const VCMFrameBuffer*>(frame);
+ *retransmitted = (frame_buffer->GetNackCount() > 0);
+ return frame_buffer->LatestPacketTimeMs();
+}
+
+VCMFrameBufferEnum VCMJitterBuffer::InsertPacket(const VCMPacket& packet,
+ bool* retransmitted) {
+ MutexLock lock(&mutex_);
+
+ ++num_packets_;
+ // Does this packet belong to an old frame?
+ if (last_decoded_state_.IsOldPacket(&packet)) {
+ // Account only for media packets.
+ if (packet.sizeBytes > 0) {
+ num_consecutive_old_packets_++;
+ }
+ // Update last decoded sequence number if the packet arrived late and
+ // belongs to a frame with a timestamp equal to the last decoded
+ // timestamp.
+ last_decoded_state_.UpdateOldPacket(&packet);
+ DropPacketsFromNackList(last_decoded_state_.sequence_num());
+
+ // Also see if this old packet made more incomplete frames continuous.
+ FindAndInsertContinuousFramesWithState(last_decoded_state_);
+
+ if (num_consecutive_old_packets_ > kMaxConsecutiveOldPackets) {
+ RTC_LOG(LS_WARNING)
+ << num_consecutive_old_packets_
+ << " consecutive old packets received. Flushing the jitter buffer.";
+ Flush();
+ return kFlushIndicator;
+ }
+ return kOldPacket;
+ }
+
+ num_consecutive_old_packets_ = 0;
+
+ VCMFrameBuffer* frame;
+ FrameList* frame_list;
+ const VCMFrameBufferEnum error = GetFrame(packet, &frame, &frame_list);
+ if (error != kNoError)
+ return error;
+
+ Timestamp now = clock_->CurrentTime();
+ // We are keeping track of the first and latest seq numbers, and
+ // the number of wraps to be able to calculate how many packets we expect.
+ if (first_packet_since_reset_) {
+ // Now it's time to start estimating jitter
+ // reset the delay estimate.
+ inter_frame_delay_.Reset();
+ }
+
+ // Empty packets may bias the jitter estimate (lacking size component),
+ // therefore don't let empty packet trigger the following updates:
+ if (packet.video_header.frame_type != VideoFrameType::kEmptyFrame) {
+ if (waiting_for_completion_.timestamp == packet.timestamp) {
+ // This can get bad if we have a lot of duplicate packets,
+ // we will then count some packet multiple times.
+ waiting_for_completion_.frame_size += packet.sizeBytes;
+ waiting_for_completion_.latest_packet_time = now.ms();
+ } else if (waiting_for_completion_.latest_packet_time >= 0 &&
+ waiting_for_completion_.latest_packet_time + 2000 <= now.ms()) {
+ // A packet should never be more than two seconds late
+ UpdateJitterEstimate(waiting_for_completion_, true);
+ waiting_for_completion_.latest_packet_time = -1;
+ waiting_for_completion_.frame_size = 0;
+ waiting_for_completion_.timestamp = 0;
+ }
+ }
+
+ VCMFrameBufferStateEnum previous_state = frame->GetState();
+ // Insert packet.
+ FrameData frame_data;
+ frame_data.rtt_ms = kDefaultRtt;
+ frame_data.rolling_average_packets_per_frame = average_packets_per_frame_;
+ VCMFrameBufferEnum buffer_state =
+ frame->InsertPacket(packet, now.ms(), frame_data);
+
+ if (buffer_state > 0) {
+ if (first_packet_since_reset_) {
+ latest_received_sequence_number_ = packet.seqNum;
+ first_packet_since_reset_ = false;
+ } else {
+ if (IsPacketRetransmitted(packet)) {
+ frame->IncrementNackCount();
+ }
+ if (!UpdateNackList(packet.seqNum) &&
+ packet.video_header.frame_type != VideoFrameType::kVideoFrameKey) {
+ buffer_state = kFlushIndicator;
+ }
+
+ latest_received_sequence_number_ =
+ LatestSequenceNumber(latest_received_sequence_number_, packet.seqNum);
+ }
+ }
+
+ // Is the frame already in the decodable list?
+ bool continuous = IsContinuous(*frame);
+ switch (buffer_state) {
+ case kGeneralError:
+ case kTimeStampError:
+ case kSizeError: {
+ RecycleFrameBuffer(frame);
+ break;
+ }
+ case kCompleteSession: {
+ if (previous_state != kStateComplete) {
+ if (continuous) {
+ // Signal that we have a complete session.
+ frame_event_->Set();
+ }
+ }
+
+ *retransmitted = (frame->GetNackCount() > 0);
+ if (continuous) {
+ decodable_frames_.InsertFrame(frame);
+ FindAndInsertContinuousFrames(*frame);
+ } else {
+ incomplete_frames_.InsertFrame(frame);
+ }
+ break;
+ }
+ case kIncomplete: {
+ if (frame->GetState() == kStateEmpty &&
+ last_decoded_state_.UpdateEmptyFrame(frame)) {
+ RecycleFrameBuffer(frame);
+ return kNoError;
+ } else {
+ incomplete_frames_.InsertFrame(frame);
+ }
+ break;
+ }
+ case kNoError:
+ case kOutOfBoundsPacket:
+ case kDuplicatePacket: {
+ // Put back the frame where it came from.
+ if (frame_list != NULL) {
+ frame_list->InsertFrame(frame);
+ } else {
+ RecycleFrameBuffer(frame);
+ }
+ ++num_duplicated_packets_;
+ break;
+ }
+ case kFlushIndicator:
+ RecycleFrameBuffer(frame);
+ return kFlushIndicator;
+ default:
+ RTC_DCHECK_NOTREACHED();
+ }
+ return buffer_state;
+}
+
+bool VCMJitterBuffer::IsContinuousInState(
+ const VCMFrameBuffer& frame,
+ const VCMDecodingState& decoding_state) const {
+ // Is this frame complete and continuous?
+ return (frame.GetState() == kStateComplete) &&
+ decoding_state.ContinuousFrame(&frame);
+}
+
+bool VCMJitterBuffer::IsContinuous(const VCMFrameBuffer& frame) const {
+ if (IsContinuousInState(frame, last_decoded_state_)) {
+ return true;
+ }
+ VCMDecodingState decoding_state;
+ decoding_state.CopyFrom(last_decoded_state_);
+ for (FrameList::const_iterator it = decodable_frames_.begin();
+ it != decodable_frames_.end(); ++it) {
+ VCMFrameBuffer* decodable_frame = it->second;
+ if (IsNewerTimestamp(decodable_frame->Timestamp(), frame.Timestamp())) {
+ break;
+ }
+ decoding_state.SetState(decodable_frame);
+ if (IsContinuousInState(frame, decoding_state)) {
+ return true;
+ }
+ }
+ return false;
+}
+
+void VCMJitterBuffer::FindAndInsertContinuousFrames(
+ const VCMFrameBuffer& new_frame) {
+ VCMDecodingState decoding_state;
+ decoding_state.CopyFrom(last_decoded_state_);
+ decoding_state.SetState(&new_frame);
+ FindAndInsertContinuousFramesWithState(decoding_state);
+}
+
+void VCMJitterBuffer::FindAndInsertContinuousFramesWithState(
+ const VCMDecodingState& original_decoded_state) {
+ // Copy original_decoded_state so we can move the state forward with each
+ // decodable frame we find.
+ VCMDecodingState decoding_state;
+ decoding_state.CopyFrom(original_decoded_state);
+
+ // When temporal layers are available, we search for a complete or decodable
+ // frame until we hit one of the following:
+ // 1. Continuous base or sync layer.
+ // 2. The end of the list was reached.
+ for (FrameList::iterator it = incomplete_frames_.begin();
+ it != incomplete_frames_.end();) {
+ VCMFrameBuffer* frame = it->second;
+ if (IsNewerTimestamp(original_decoded_state.time_stamp(),
+ frame->Timestamp())) {
+ ++it;
+ continue;
+ }
+ if (IsContinuousInState(*frame, decoding_state)) {
+ decodable_frames_.InsertFrame(frame);
+ incomplete_frames_.erase(it++);
+ decoding_state.SetState(frame);
+ } else if (frame->TemporalId() <= 0) {
+ break;
+ } else {
+ ++it;
+ }
+ }
+}
+
+uint32_t VCMJitterBuffer::EstimatedJitterMs() {
+ MutexLock lock(&mutex_);
+ const double rtt_mult = 1.0f;
+ return jitter_estimate_.GetJitterEstimate(rtt_mult, absl::nullopt).ms();
+}
+
+void VCMJitterBuffer::SetNackSettings(size_t max_nack_list_size,
+ int max_packet_age_to_nack,
+ int max_incomplete_time_ms) {
+ MutexLock lock(&mutex_);
+ RTC_DCHECK_GE(max_packet_age_to_nack, 0);
+ RTC_DCHECK_GE(max_incomplete_time_ms_, 0);
+ max_nack_list_size_ = max_nack_list_size;
+ max_packet_age_to_nack_ = max_packet_age_to_nack;
+ max_incomplete_time_ms_ = max_incomplete_time_ms;
+}
+
+int VCMJitterBuffer::NonContinuousOrIncompleteDuration() {
+ if (incomplete_frames_.empty()) {
+ return 0;
+ }
+ uint32_t start_timestamp = incomplete_frames_.Front()->Timestamp();
+ if (!decodable_frames_.empty()) {
+ start_timestamp = decodable_frames_.Back()->Timestamp();
+ }
+ return incomplete_frames_.Back()->Timestamp() - start_timestamp;
+}
+
+uint16_t VCMJitterBuffer::EstimatedLowSequenceNumber(
+ const VCMFrameBuffer& frame) const {
+ RTC_DCHECK_GE(frame.GetLowSeqNum(), 0);
+ if (frame.HaveFirstPacket())
+ return frame.GetLowSeqNum();
+
+ // This estimate is not accurate if more than one packet with lower sequence
+ // number is lost.
+ return frame.GetLowSeqNum() - 1;
+}
+
+std::vector<uint16_t> VCMJitterBuffer::GetNackList(bool* request_key_frame) {
+ MutexLock lock(&mutex_);
+ *request_key_frame = false;
+ if (last_decoded_state_.in_initial_state()) {
+ VCMFrameBuffer* next_frame = NextFrame();
+ const bool first_frame_is_key =
+ next_frame &&
+ next_frame->FrameType() == VideoFrameType::kVideoFrameKey &&
+ next_frame->HaveFirstPacket();
+ if (!first_frame_is_key) {
+ bool have_non_empty_frame =
+ decodable_frames_.end() != find_if(decodable_frames_.begin(),
+ decodable_frames_.end(),
+ HasNonEmptyState);
+ if (!have_non_empty_frame) {
+ have_non_empty_frame =
+ incomplete_frames_.end() != find_if(incomplete_frames_.begin(),
+ incomplete_frames_.end(),
+ HasNonEmptyState);
+ }
+ bool found_key_frame = RecycleFramesUntilKeyFrame();
+ if (!found_key_frame) {
+ *request_key_frame = have_non_empty_frame;
+ return std::vector<uint16_t>();
+ }
+ }
+ }
+ if (TooLargeNackList()) {
+ *request_key_frame = !HandleTooLargeNackList();
+ }
+ if (max_incomplete_time_ms_ > 0) {
+ int non_continuous_incomplete_duration =
+ NonContinuousOrIncompleteDuration();
+ if (non_continuous_incomplete_duration > 90 * max_incomplete_time_ms_) {
+ RTC_LOG_F(LS_WARNING) << "Too long non-decodable duration: "
+ << non_continuous_incomplete_duration << " > "
+ << 90 * max_incomplete_time_ms_;
+ FrameList::reverse_iterator rit = find_if(
+ incomplete_frames_.rbegin(), incomplete_frames_.rend(), IsKeyFrame);
+ if (rit == incomplete_frames_.rend()) {
+ // Request a key frame if we don't have one already.
+ *request_key_frame = true;
+ return std::vector<uint16_t>();
+ } else {
+ // Skip to the last key frame. If it's incomplete we will start
+ // NACKing it.
+ // Note that the estimated low sequence number is correct for VP8
+ // streams because only the first packet of a key frame is marked.
+ last_decoded_state_.Reset();
+ DropPacketsFromNackList(EstimatedLowSequenceNumber(*rit->second));
+ }
+ }
+ }
+ std::vector<uint16_t> nack_list(missing_sequence_numbers_.begin(),
+ missing_sequence_numbers_.end());
+ return nack_list;
+}
+
+VCMFrameBuffer* VCMJitterBuffer::NextFrame() const {
+ if (!decodable_frames_.empty())
+ return decodable_frames_.Front();
+ if (!incomplete_frames_.empty())
+ return incomplete_frames_.Front();
+ return NULL;
+}
+
+bool VCMJitterBuffer::UpdateNackList(uint16_t sequence_number) {
+ // Make sure we don't add packets which are already too old to be decoded.
+ if (!last_decoded_state_.in_initial_state()) {
+ latest_received_sequence_number_ = LatestSequenceNumber(
+ latest_received_sequence_number_, last_decoded_state_.sequence_num());
+ }
+ if (IsNewerSequenceNumber(sequence_number,
+ latest_received_sequence_number_)) {
+ // Push any missing sequence numbers to the NACK list.
+ for (uint16_t i = latest_received_sequence_number_ + 1;
+ IsNewerSequenceNumber(sequence_number, i); ++i) {
+ missing_sequence_numbers_.insert(missing_sequence_numbers_.end(), i);
+ }
+ if (TooLargeNackList() && !HandleTooLargeNackList()) {
+ RTC_LOG(LS_WARNING) << "Requesting key frame due to too large NACK list.";
+ return false;
+ }
+ if (MissingTooOldPacket(sequence_number) &&
+ !HandleTooOldPackets(sequence_number)) {
+ RTC_LOG(LS_WARNING)
+ << "Requesting key frame due to missing too old packets";
+ return false;
+ }
+ } else {
+ missing_sequence_numbers_.erase(sequence_number);
+ }
+ return true;
+}
+
+bool VCMJitterBuffer::TooLargeNackList() const {
+ return missing_sequence_numbers_.size() > max_nack_list_size_;
+}
+
+bool VCMJitterBuffer::HandleTooLargeNackList() {
+ // Recycle frames until the NACK list is small enough. It is likely cheaper to
+ // request a key frame than to retransmit this many missing packets.
+ RTC_LOG_F(LS_WARNING) << "NACK list has grown too large: "
+ << missing_sequence_numbers_.size() << " > "
+ << max_nack_list_size_;
+ bool key_frame_found = false;
+ while (TooLargeNackList()) {
+ key_frame_found = RecycleFramesUntilKeyFrame();
+ }
+ return key_frame_found;
+}
+
+bool VCMJitterBuffer::MissingTooOldPacket(
+ uint16_t latest_sequence_number) const {
+ if (missing_sequence_numbers_.empty()) {
+ return false;
+ }
+ const uint16_t age_of_oldest_missing_packet =
+ latest_sequence_number - *missing_sequence_numbers_.begin();
+ // Recycle frames if the NACK list contains too old sequence numbers as
+ // the packets may have already been dropped by the sender.
+ return age_of_oldest_missing_packet > max_packet_age_to_nack_;
+}
+
+bool VCMJitterBuffer::HandleTooOldPackets(uint16_t latest_sequence_number) {
+ bool key_frame_found = false;
+ const uint16_t age_of_oldest_missing_packet =
+ latest_sequence_number - *missing_sequence_numbers_.begin();
+ RTC_LOG_F(LS_WARNING) << "NACK list contains too old sequence numbers: "
+ << age_of_oldest_missing_packet << " > "
+ << max_packet_age_to_nack_;
+ while (MissingTooOldPacket(latest_sequence_number)) {
+ key_frame_found = RecycleFramesUntilKeyFrame();
+ }
+ return key_frame_found;
+}
+
+void VCMJitterBuffer::DropPacketsFromNackList(
+ uint16_t last_decoded_sequence_number) {
+ // Erase all sequence numbers from the NACK list which we won't need any
+ // longer.
+ missing_sequence_numbers_.erase(
+ missing_sequence_numbers_.begin(),
+ missing_sequence_numbers_.upper_bound(last_decoded_sequence_number));
+}
+
+VCMFrameBuffer* VCMJitterBuffer::GetEmptyFrame() {
+ if (free_frames_.empty()) {
+ if (!TryToIncreaseJitterBufferSize()) {
+ return NULL;
+ }
+ }
+ VCMFrameBuffer* frame = free_frames_.front();
+ free_frames_.pop_front();
+ return frame;
+}
+
+bool VCMJitterBuffer::TryToIncreaseJitterBufferSize() {
+ if (max_number_of_frames_ >= kMaxNumberOfFrames)
+ return false;
+ free_frames_.push_back(new VCMFrameBuffer());
+ ++max_number_of_frames_;
+ return true;
+}
+
+// Recycle oldest frames up to a key frame, used if jitter buffer is completely
+// full.
+bool VCMJitterBuffer::RecycleFramesUntilKeyFrame() {
+ // First release incomplete frames, and only release decodable frames if there
+ // are no incomplete ones.
+ FrameList::iterator key_frame_it;
+ bool key_frame_found = false;
+ int dropped_frames = 0;
+ dropped_frames += incomplete_frames_.RecycleFramesUntilKeyFrame(
+ &key_frame_it, &free_frames_);
+ key_frame_found = key_frame_it != incomplete_frames_.end();
+ if (dropped_frames == 0) {
+ dropped_frames += decodable_frames_.RecycleFramesUntilKeyFrame(
+ &key_frame_it, &free_frames_);
+ key_frame_found = key_frame_it != decodable_frames_.end();
+ }
+ if (key_frame_found) {
+ RTC_LOG(LS_INFO) << "Found key frame while dropping frames.";
+ // Reset last decoded state to make sure the next frame decoded is a key
+ // frame, and start NACKing from here.
+ last_decoded_state_.Reset();
+ DropPacketsFromNackList(EstimatedLowSequenceNumber(*key_frame_it->second));
+ } else if (decodable_frames_.empty()) {
+ // All frames dropped. Reset the decoding state and clear missing sequence
+ // numbers as we're starting fresh.
+ last_decoded_state_.Reset();
+ missing_sequence_numbers_.clear();
+ }
+ return key_frame_found;
+}
+
+void VCMJitterBuffer::UpdateAveragePacketsPerFrame(int current_number_packets) {
+ if (frame_counter_ > kFastConvergeThreshold) {
+ average_packets_per_frame_ =
+ average_packets_per_frame_ * (1 - kNormalConvergeMultiplier) +
+ current_number_packets * kNormalConvergeMultiplier;
+ } else if (frame_counter_ > 0) {
+ average_packets_per_frame_ =
+ average_packets_per_frame_ * (1 - kFastConvergeMultiplier) +
+ current_number_packets * kFastConvergeMultiplier;
+ frame_counter_++;
+ } else {
+ average_packets_per_frame_ = current_number_packets;
+ frame_counter_++;
+ }
+}
+
+// Must be called under the critical section `mutex_`.
+void VCMJitterBuffer::CleanUpOldOrEmptyFrames() {
+ decodable_frames_.CleanUpOldOrEmptyFrames(&last_decoded_state_,
+ &free_frames_);
+ incomplete_frames_.CleanUpOldOrEmptyFrames(&last_decoded_state_,
+ &free_frames_);
+ if (!last_decoded_state_.in_initial_state()) {
+ DropPacketsFromNackList(last_decoded_state_.sequence_num());
+ }
+}
+
+// Must be called from within `mutex_`.
+bool VCMJitterBuffer::IsPacketRetransmitted(const VCMPacket& packet) const {
+ return missing_sequence_numbers_.find(packet.seqNum) !=
+ missing_sequence_numbers_.end();
+}
+
+// Must be called under the critical section `mutex_`. Should never be
+// called with retransmitted frames, they must be filtered out before this
+// function is called.
+void VCMJitterBuffer::UpdateJitterEstimate(const VCMJitterSample& sample,
+ bool incomplete_frame) {
+ if (sample.latest_packet_time == -1) {
+ return;
+ }
+ UpdateJitterEstimate(sample.latest_packet_time, sample.timestamp,
+ sample.frame_size, incomplete_frame);
+}
+
+// Must be called under the critical section mutex_. Should never be
+// called with retransmitted frames, they must be filtered out before this
+// function is called.
+void VCMJitterBuffer::UpdateJitterEstimate(const VCMFrameBuffer& frame,
+ bool incomplete_frame) {
+ if (frame.LatestPacketTimeMs() == -1) {
+ return;
+ }
+ // No retransmitted frames should be a part of the jitter
+ // estimate.
+ UpdateJitterEstimate(frame.LatestPacketTimeMs(), frame.Timestamp(),
+ frame.size(), incomplete_frame);
+}
+
+// Must be called under the critical section `mutex_`. Should never be
+// called with retransmitted frames, they must be filtered out before this
+// function is called.
+void VCMJitterBuffer::UpdateJitterEstimate(int64_t latest_packet_time_ms,
+ uint32_t timestamp,
+ unsigned int frame_size,
+ bool /*incomplete_frame*/) {
+ if (latest_packet_time_ms == -1) {
+ return;
+ }
+ auto frame_delay = inter_frame_delay_.CalculateDelay(
+ timestamp, Timestamp::Millis(latest_packet_time_ms));
+
+ bool not_reordered = frame_delay.has_value();
+ // Filter out frames which have been reordered in time by the network
+ if (not_reordered) {
+ // Update the jitter estimate with the new samples
+ jitter_estimate_.UpdateEstimate(*frame_delay, DataSize::Bytes(frame_size));
+ }
+}
+
+void VCMJitterBuffer::RecycleFrameBuffer(VCMFrameBuffer* frame) {
+ frame->Reset();
+ free_frames_.push_back(frame);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/jitter_buffer.h b/third_party/libwebrtc/modules/video_coding/jitter_buffer.h
new file mode 100644
index 0000000000..7ca8953428
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/jitter_buffer.h
@@ -0,0 +1,275 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_JITTER_BUFFER_H_
+#define MODULES_VIDEO_CODING_JITTER_BUFFER_H_
+
+#include <list>
+#include <map>
+#include <memory>
+#include <set>
+#include <vector>
+
+#include "api/field_trials_view.h"
+#include "modules/include/module_common_types.h"
+#include "modules/include/module_common_types_public.h"
+#include "modules/video_coding/decoding_state.h"
+#include "modules/video_coding/event_wrapper.h"
+#include "modules/video_coding/include/video_coding.h"
+#include "modules/video_coding/include/video_coding_defines.h"
+#include "modules/video_coding/jitter_buffer_common.h"
+#include "modules/video_coding/timing/inter_frame_delay.h"
+#include "modules/video_coding/timing/jitter_estimator.h"
+#include "rtc_base/synchronization/mutex.h"
+#include "rtc_base/thread_annotations.h"
+
+namespace webrtc {
+
+// forward declarations
+class Clock;
+class VCMFrameBuffer;
+class VCMPacket;
+class VCMEncodedFrame;
+
+typedef std::list<VCMFrameBuffer*> UnorderedFrameList;
+
+struct VCMJitterSample {
+ VCMJitterSample() : timestamp(0), frame_size(0), latest_packet_time(-1) {}
+ uint32_t timestamp;
+ uint32_t frame_size;
+ int64_t latest_packet_time;
+};
+
+class TimestampLessThan {
+ public:
+ bool operator()(uint32_t timestamp1, uint32_t timestamp2) const {
+ return IsNewerTimestamp(timestamp2, timestamp1);
+ }
+};
+
+class FrameList
+ : public std::map<uint32_t, VCMFrameBuffer*, TimestampLessThan> {
+ public:
+ void InsertFrame(VCMFrameBuffer* frame);
+ VCMFrameBuffer* PopFrame(uint32_t timestamp);
+ VCMFrameBuffer* Front() const;
+ VCMFrameBuffer* Back() const;
+ int RecycleFramesUntilKeyFrame(FrameList::iterator* key_frame_it,
+ UnorderedFrameList* free_frames);
+ void CleanUpOldOrEmptyFrames(VCMDecodingState* decoding_state,
+ UnorderedFrameList* free_frames);
+ void Reset(UnorderedFrameList* free_frames);
+};
+
+class VCMJitterBuffer {
+ public:
+ VCMJitterBuffer(Clock* clock,
+ std::unique_ptr<EventWrapper> event,
+ const FieldTrialsView& field_trials);
+
+ ~VCMJitterBuffer();
+
+ VCMJitterBuffer(const VCMJitterBuffer&) = delete;
+ VCMJitterBuffer& operator=(const VCMJitterBuffer&) = delete;
+
+ // Initializes and starts jitter buffer.
+ void Start();
+
+ // Signals all internal events and stops the jitter buffer.
+ void Stop();
+
+ // Returns true if the jitter buffer is running.
+ bool Running() const;
+
+ // Empty the jitter buffer of all its data.
+ void Flush();
+
+ // Gets number of packets received.
+ int num_packets() const;
+
+ // Gets number of duplicated packets received.
+ int num_duplicated_packets() const;
+
+ // Wait `max_wait_time_ms` for a complete frame to arrive.
+ // If found, a pointer to the frame is returned. Returns nullptr otherwise.
+ VCMEncodedFrame* NextCompleteFrame(uint32_t max_wait_time_ms);
+
+ // Extract frame corresponding to input timestamp.
+ // Frame will be set to a decoding state.
+ VCMEncodedFrame* ExtractAndSetDecode(uint32_t timestamp);
+
+ // Releases a frame returned from the jitter buffer, should be called when
+ // done with decoding.
+ void ReleaseFrame(VCMEncodedFrame* frame);
+
+ // Returns the time in ms when the latest packet was inserted into the frame.
+ // Retransmitted is set to true if any of the packets belonging to the frame
+ // has been retransmitted.
+ int64_t LastPacketTime(const VCMEncodedFrame* frame,
+ bool* retransmitted) const;
+
+ // Inserts a packet into a frame returned from GetFrame().
+ // If the return value is <= 0, `frame` is invalidated and the pointer must
+ // be dropped after this function returns.
+ VCMFrameBufferEnum InsertPacket(const VCMPacket& packet, bool* retransmitted);
+
+ // Returns the estimated jitter in milliseconds.
+ uint32_t EstimatedJitterMs();
+
+ void SetNackSettings(size_t max_nack_list_size,
+ int max_packet_age_to_nack,
+ int max_incomplete_time_ms);
+
+ // Returns a list of the sequence numbers currently missing.
+ std::vector<uint16_t> GetNackList(bool* request_key_frame);
+
+ private:
+ class SequenceNumberLessThan {
+ public:
+ bool operator()(const uint16_t& sequence_number1,
+ const uint16_t& sequence_number2) const {
+ return IsNewerSequenceNumber(sequence_number2, sequence_number1);
+ }
+ };
+ typedef std::set<uint16_t, SequenceNumberLessThan> SequenceNumberSet;
+
+ // Gets the frame assigned to the timestamp of the packet. May recycle
+ // existing frames if no free frames are available. Returns an error code if
+ // failing, or kNoError on success. `frame_list` contains which list the
+ // packet was in, or NULL if it was not in a FrameList (a new frame).
+ VCMFrameBufferEnum GetFrame(const VCMPacket& packet,
+ VCMFrameBuffer** frame,
+ FrameList** frame_list)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+
+ // Returns true if `frame` is continuous in `decoding_state`, not taking
+ // decodable frames into account.
+ bool IsContinuousInState(const VCMFrameBuffer& frame,
+ const VCMDecodingState& decoding_state) const
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+ // Returns true if `frame` is continuous in the `last_decoded_state_`, taking
+ // all decodable frames into account.
+ bool IsContinuous(const VCMFrameBuffer& frame) const
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+ // Looks for frames in `incomplete_frames_` which are continuous in the
+ // provided `decoded_state`. Starts the search from the timestamp of
+ // `decoded_state`.
+ void FindAndInsertContinuousFramesWithState(
+ const VCMDecodingState& decoded_state)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+ // Looks for frames in `incomplete_frames_` which are continuous in
+ // `last_decoded_state_` taking all decodable frames into account. Starts
+ // the search from `new_frame`.
+ void FindAndInsertContinuousFrames(const VCMFrameBuffer& new_frame)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+ VCMFrameBuffer* NextFrame() const RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+ // Returns true if the NACK list was updated to cover sequence numbers up to
+ // `sequence_number`. If false a key frame is needed to get into a state where
+ // we can continue decoding.
+ bool UpdateNackList(uint16_t sequence_number)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+ bool TooLargeNackList() const;
+ // Returns true if the NACK list was reduced without problem. If false a key
+ // frame is needed to get into a state where we can continue decoding.
+ bool HandleTooLargeNackList() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+ bool MissingTooOldPacket(uint16_t latest_sequence_number) const
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+ // Returns true if the too old packets was successfully removed from the NACK
+ // list. If false, a key frame is needed to get into a state where we can
+ // continue decoding.
+ bool HandleTooOldPackets(uint16_t latest_sequence_number)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+ // Drops all packets in the NACK list up until `last_decoded_sequence_number`.
+ void DropPacketsFromNackList(uint16_t last_decoded_sequence_number);
+
+ // Gets an empty frame, creating a new frame if necessary (i.e. increases
+ // jitter buffer size).
+ VCMFrameBuffer* GetEmptyFrame() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+
+ // Attempts to increase the size of the jitter buffer. Returns true on
+ // success, false otherwise.
+ bool TryToIncreaseJitterBufferSize() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+
+ // Recycles oldest frames until a key frame is found. Used if jitter buffer is
+ // completely full. Returns true if a key frame was found.
+ bool RecycleFramesUntilKeyFrame() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+
+ // Update rolling average of packets per frame.
+ void UpdateAveragePacketsPerFrame(int current_number_packets_);
+
+ // Cleans the frame list in the JB from old/empty frames.
+ // Should only be called prior to actual use.
+ void CleanUpOldOrEmptyFrames() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+
+ // Returns true if `packet` is likely to have been retransmitted.
+ bool IsPacketRetransmitted(const VCMPacket& packet) const;
+
+ // The following three functions update the jitter estimate with the
+ // payload size, receive time and RTP timestamp of a frame.
+ void UpdateJitterEstimate(const VCMJitterSample& sample,
+ bool incomplete_frame);
+ void UpdateJitterEstimate(const VCMFrameBuffer& frame, bool incomplete_frame);
+ void UpdateJitterEstimate(int64_t latest_packet_time_ms,
+ uint32_t timestamp,
+ unsigned int frame_size,
+ bool incomplete_frame);
+
+ int NonContinuousOrIncompleteDuration() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+
+ uint16_t EstimatedLowSequenceNumber(const VCMFrameBuffer& frame) const;
+
+ // Reset frame buffer and return it to free_frames_.
+ void RecycleFrameBuffer(VCMFrameBuffer* frame)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+
+ Clock* clock_;
+ // If we are running (have started) or not.
+ bool running_;
+ mutable Mutex mutex_;
+ // Event to signal when we have a frame ready for decoder.
+ std::unique_ptr<EventWrapper> frame_event_;
+ // Number of allocated frames.
+ int max_number_of_frames_;
+ UnorderedFrameList free_frames_ RTC_GUARDED_BY(mutex_);
+ FrameList decodable_frames_ RTC_GUARDED_BY(mutex_);
+ FrameList incomplete_frames_ RTC_GUARDED_BY(mutex_);
+ VCMDecodingState last_decoded_state_ RTC_GUARDED_BY(mutex_);
+ bool first_packet_since_reset_;
+
+ // Number of packets in a row that have been too old.
+ int num_consecutive_old_packets_;
+ // Number of packets received.
+ int num_packets_ RTC_GUARDED_BY(mutex_);
+ // Number of duplicated packets received.
+ int num_duplicated_packets_ RTC_GUARDED_BY(mutex_);
+
+ // Jitter estimation.
+ // Filter for estimating jitter.
+ JitterEstimator jitter_estimate_;
+ // Calculates network delays used for jitter calculations.
+ InterFrameDelay inter_frame_delay_;
+ VCMJitterSample waiting_for_completion_;
+
+ // Holds the internal NACK list (the missing sequence numbers).
+ SequenceNumberSet missing_sequence_numbers_;
+ uint16_t latest_received_sequence_number_;
+ size_t max_nack_list_size_;
+ int max_packet_age_to_nack_; // Measured in sequence numbers.
+ int max_incomplete_time_ms_;
+
+ // Estimated rolling average of packets per frame
+ float average_packets_per_frame_;
+ // average_packets_per_frame converges fast if we have fewer than this many
+ // frames.
+ int frame_counter_;
+};
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_JITTER_BUFFER_H_
diff --git a/third_party/libwebrtc/modules/video_coding/jitter_buffer_common.h b/third_party/libwebrtc/modules/video_coding/jitter_buffer_common.h
new file mode 100644
index 0000000000..6ccfe39199
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/jitter_buffer_common.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_JITTER_BUFFER_COMMON_H_
+#define MODULES_VIDEO_CODING_JITTER_BUFFER_COMMON_H_
+
+namespace webrtc {
+
+// Used to estimate rolling average of packets per frame.
+static const float kFastConvergeMultiplier = 0.4f;
+static const float kNormalConvergeMultiplier = 0.2f;
+
+enum { kMaxNumberOfFrames = 300 };
+enum { kStartNumberOfFrames = 6 };
+enum { kMaxVideoDelayMs = 10000 };
+enum { kPacketsPerFrameMultiplier = 5 };
+enum { kFastConvergeThreshold = 5 };
+
+enum VCMJitterBufferEnum {
+ kMaxConsecutiveOldFrames = 60,
+ kMaxConsecutiveOldPackets = 300,
+ // TODO(sprang): Reduce this limit once codecs don't sometimes wildly
+ // overshoot bitrate target.
+ kMaxPacketsInSession = 1400, // Allows ~2MB frames.
+ kBufferIncStepSizeBytes = 30000, // >20 packets.
+ kMaxJBFrameSizeBytes = 4000000 // sanity don't go above 4Mbyte.
+};
+
+enum VCMFrameBufferEnum {
+ kOutOfBoundsPacket = -7,
+ kNotInitialized = -6,
+ kOldPacket = -5,
+ kGeneralError = -4,
+ kFlushIndicator = -3, // Indicator that a flush has occurred.
+ kTimeStampError = -2,
+ kSizeError = -1,
+ kNoError = 0,
+ kIncomplete = 1, // Frame incomplete.
+ kCompleteSession = 3, // at least one layer in the frame complete.
+ kDuplicatePacket = 5 // We're receiving a duplicate packet.
+};
+
+enum VCMFrameBufferStateEnum {
+ kStateEmpty, // frame popped by the RTP receiver
+ kStateIncomplete, // frame that have one or more packet(s) stored
+ kStateComplete, // frame that have all packets
+};
+
+enum { kH264StartCodeLengthBytes = 4 };
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_JITTER_BUFFER_COMMON_H_
diff --git a/third_party/libwebrtc/modules/video_coding/jitter_buffer_unittest.cc b/third_party/libwebrtc/modules/video_coding/jitter_buffer_unittest.cc
new file mode 100644
index 0000000000..cc791fe110
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/jitter_buffer_unittest.cc
@@ -0,0 +1,1848 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/jitter_buffer.h"
+
+#include <list>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "absl/memory/memory.h"
+#include "common_video/h264/h264_common.h"
+#include "modules/video_coding/frame_buffer.h"
+#include "modules/video_coding/media_opt_util.h"
+#include "modules/video_coding/packet.h"
+#include "modules/video_coding/test/stream_generator.h"
+#include "system_wrappers/include/clock.h"
+#include "system_wrappers/include/metrics.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+#include "test/scoped_key_value_config.h"
+
+namespace webrtc {
+
+class TestBasicJitterBuffer : public ::testing::Test {
+ protected:
+ TestBasicJitterBuffer() {}
+ void SetUp() override {
+ clock_.reset(new SimulatedClock(0));
+ jitter_buffer_.reset(new VCMJitterBuffer(
+ clock_.get(), absl::WrapUnique(EventWrapper::Create()), field_trials_));
+ jitter_buffer_->Start();
+ seq_num_ = 1234;
+ timestamp_ = 0;
+ size_ = 1400;
+ // Data vector - 0, 0, 0x80, 3, 4, 5, 6, 7, 8, 9, 0, 0, 0x80, 3....
+ data_[0] = 0;
+ data_[1] = 0;
+ data_[2] = 0x80;
+ int count = 3;
+ for (unsigned int i = 3; i < sizeof(data_) - 3; ++i) {
+ data_[i] = count;
+ count++;
+ if (count == 10) {
+ data_[i + 1] = 0;
+ data_[i + 2] = 0;
+ data_[i + 3] = 0x80;
+ count = 3;
+ i += 3;
+ }
+ }
+ RTPHeader rtp_header;
+ RTPVideoHeader video_header;
+ rtp_header.sequenceNumber = seq_num_;
+ rtp_header.timestamp = timestamp_;
+ rtp_header.markerBit = true;
+ video_header.codec = kVideoCodecGeneric;
+ video_header.is_first_packet_in_frame = true;
+ video_header.frame_type = VideoFrameType::kVideoFrameDelta;
+ packet_.reset(new VCMPacket(data_, size_, rtp_header, video_header,
+ /*ntp_time_ms=*/0, clock_->CurrentTime()));
+ }
+
+ VCMEncodedFrame* DecodeCompleteFrame() {
+ VCMEncodedFrame* found_frame = jitter_buffer_->NextCompleteFrame(10);
+ if (!found_frame)
+ return nullptr;
+ return jitter_buffer_->ExtractAndSetDecode(found_frame->Timestamp());
+ }
+
+ void CheckOutFrame(VCMEncodedFrame* frame_out,
+ unsigned int size,
+ bool startCode) {
+ ASSERT_TRUE(frame_out);
+
+ const uint8_t* outData = frame_out->data();
+ unsigned int i = 0;
+
+ if (startCode) {
+ EXPECT_EQ(0, outData[0]);
+ EXPECT_EQ(0, outData[1]);
+ EXPECT_EQ(0, outData[2]);
+ EXPECT_EQ(1, outData[3]);
+ i += 4;
+ }
+
+ EXPECT_EQ(size, frame_out->size());
+ int count = 3;
+ for (; i < size; i++) {
+ if (outData[i] == 0 && outData[i + 1] == 0 && outData[i + 2] == 0x80) {
+ i += 2;
+ } else if (startCode && outData[i] == 0 && outData[i + 1] == 0) {
+ EXPECT_EQ(0, outData[0]);
+ EXPECT_EQ(0, outData[1]);
+ EXPECT_EQ(0, outData[2]);
+ EXPECT_EQ(1, outData[3]);
+ i += 3;
+ } else {
+ EXPECT_EQ(count, outData[i]);
+ count++;
+ if (count == 10) {
+ count = 3;
+ }
+ }
+ }
+ }
+
+ uint16_t seq_num_;
+ uint32_t timestamp_;
+ int size_;
+ uint8_t data_[1500];
+ test::ScopedKeyValueConfig field_trials_;
+ std::unique_ptr<VCMPacket> packet_;
+ std::unique_ptr<SimulatedClock> clock_;
+ std::unique_ptr<VCMJitterBuffer> jitter_buffer_;
+};
+
+class TestRunningJitterBuffer : public ::testing::Test {
+ protected:
+ enum { kDataBufferSize = 10 };
+
+ virtual void SetUp() {
+ clock_.reset(new SimulatedClock(0));
+ max_nack_list_size_ = 150;
+ oldest_packet_to_nack_ = 250;
+ jitter_buffer_ = new VCMJitterBuffer(
+ clock_.get(), absl::WrapUnique(EventWrapper::Create()), field_trials_);
+ stream_generator_ = new StreamGenerator(0, clock_->TimeInMilliseconds());
+ jitter_buffer_->Start();
+ jitter_buffer_->SetNackSettings(max_nack_list_size_, oldest_packet_to_nack_,
+ 0);
+ memset(data_buffer_, 0, kDataBufferSize);
+ }
+
+ virtual void TearDown() {
+ jitter_buffer_->Stop();
+ delete stream_generator_;
+ delete jitter_buffer_;
+ }
+
+ VCMFrameBufferEnum InsertPacketAndPop(int index) {
+ VCMPacket packet;
+ packet.dataPtr = data_buffer_;
+ bool packet_available = stream_generator_->PopPacket(&packet, index);
+ EXPECT_TRUE(packet_available);
+ if (!packet_available)
+ return kGeneralError; // Return here to avoid crashes below.
+ bool retransmitted = false;
+ return jitter_buffer_->InsertPacket(packet, &retransmitted);
+ }
+
+ VCMFrameBufferEnum InsertPacket(int index) {
+ VCMPacket packet;
+ packet.dataPtr = data_buffer_;
+ bool packet_available = stream_generator_->GetPacket(&packet, index);
+ EXPECT_TRUE(packet_available);
+ if (!packet_available)
+ return kGeneralError; // Return here to avoid crashes below.
+ bool retransmitted = false;
+ return jitter_buffer_->InsertPacket(packet, &retransmitted);
+ }
+
+ VCMFrameBufferEnum InsertFrame(VideoFrameType frame_type) {
+ stream_generator_->GenerateFrame(
+ frame_type, (frame_type != VideoFrameType::kEmptyFrame) ? 1 : 0,
+ (frame_type == VideoFrameType::kEmptyFrame) ? 1 : 0,
+ clock_->TimeInMilliseconds());
+ VCMFrameBufferEnum ret = InsertPacketAndPop(0);
+ clock_->AdvanceTimeMilliseconds(kDefaultFramePeriodMs);
+ return ret;
+ }
+
+ VCMFrameBufferEnum InsertFrames(int num_frames, VideoFrameType frame_type) {
+ VCMFrameBufferEnum ret_for_all = kNoError;
+ for (int i = 0; i < num_frames; ++i) {
+ VCMFrameBufferEnum ret = InsertFrame(frame_type);
+ if (ret < kNoError) {
+ ret_for_all = ret;
+ } else if (ret_for_all >= kNoError) {
+ ret_for_all = ret;
+ }
+ }
+ return ret_for_all;
+ }
+
+ void DropFrame(int num_packets) {
+ stream_generator_->GenerateFrame(VideoFrameType::kVideoFrameDelta,
+ num_packets, 0,
+ clock_->TimeInMilliseconds());
+ for (int i = 0; i < num_packets; ++i)
+ stream_generator_->DropLastPacket();
+ clock_->AdvanceTimeMilliseconds(kDefaultFramePeriodMs);
+ }
+
+ bool DecodeCompleteFrame() {
+ VCMEncodedFrame* found_frame = jitter_buffer_->NextCompleteFrame(0);
+ if (!found_frame)
+ return false;
+
+ VCMEncodedFrame* frame =
+ jitter_buffer_->ExtractAndSetDecode(found_frame->Timestamp());
+ bool ret = (frame != NULL);
+ jitter_buffer_->ReleaseFrame(frame);
+ return ret;
+ }
+
+ test::ScopedKeyValueConfig field_trials_;
+ VCMJitterBuffer* jitter_buffer_;
+ StreamGenerator* stream_generator_;
+ std::unique_ptr<SimulatedClock> clock_;
+ size_t max_nack_list_size_;
+ int oldest_packet_to_nack_;
+ uint8_t data_buffer_[kDataBufferSize];
+};
+
+class TestJitterBufferNack : public TestRunningJitterBuffer {
+ protected:
+ TestJitterBufferNack() {}
+ virtual void SetUp() { TestRunningJitterBuffer::SetUp(); }
+
+ virtual void TearDown() { TestRunningJitterBuffer::TearDown(); }
+};
+
+TEST_F(TestBasicJitterBuffer, StopRunning) {
+ jitter_buffer_->Stop();
+ EXPECT_TRUE(NULL == DecodeCompleteFrame());
+ jitter_buffer_->Start();
+
+ // No packets inserted.
+ EXPECT_TRUE(NULL == DecodeCompleteFrame());
+}
+
+TEST_F(TestBasicJitterBuffer, SinglePacketFrame) {
+ // Always start with a complete key frame when not allowing errors.
+ packet_->video_header.frame_type = VideoFrameType::kVideoFrameKey;
+ packet_->video_header.is_first_packet_in_frame = true;
+ packet_->markerBit = true;
+ packet_->timestamp += 123 * 90;
+
+ // Insert the packet to the jitter buffer and get a frame.
+ bool retransmitted = false;
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+ VCMEncodedFrame* frame_out = DecodeCompleteFrame();
+ CheckOutFrame(frame_out, size_, false);
+ EXPECT_EQ(VideoFrameType::kVideoFrameKey, frame_out->FrameType());
+ jitter_buffer_->ReleaseFrame(frame_out);
+}
+
+TEST_F(TestBasicJitterBuffer, DualPacketFrame) {
+ packet_->video_header.frame_type = VideoFrameType::kVideoFrameKey;
+ packet_->video_header.is_first_packet_in_frame = true;
+ packet_->markerBit = false;
+
+ bool retransmitted = false;
+ EXPECT_EQ(kIncomplete,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+ VCMEncodedFrame* frame_out = DecodeCompleteFrame();
+ // Should not be complete.
+ EXPECT_TRUE(frame_out == NULL);
+
+ ++seq_num_;
+ packet_->video_header.is_first_packet_in_frame = false;
+ packet_->markerBit = true;
+ packet_->seqNum = seq_num_;
+
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+
+ frame_out = DecodeCompleteFrame();
+ CheckOutFrame(frame_out, 2 * size_, false);
+
+ EXPECT_EQ(VideoFrameType::kVideoFrameKey, frame_out->FrameType());
+ jitter_buffer_->ReleaseFrame(frame_out);
+}
+
+TEST_F(TestBasicJitterBuffer, 100PacketKeyFrame) {
+ packet_->video_header.frame_type = VideoFrameType::kVideoFrameKey;
+ packet_->video_header.is_first_packet_in_frame = true;
+ packet_->markerBit = false;
+
+ bool retransmitted = false;
+ EXPECT_EQ(kIncomplete,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+
+ VCMEncodedFrame* frame_out = DecodeCompleteFrame();
+
+ // Frame should not be complete.
+ EXPECT_TRUE(frame_out == NULL);
+
+ // Insert 98 frames.
+ int loop = 0;
+ do {
+ seq_num_++;
+ packet_->video_header.is_first_packet_in_frame = false;
+ packet_->markerBit = false;
+ packet_->seqNum = seq_num_;
+
+ EXPECT_EQ(kIncomplete,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+ loop++;
+ } while (loop < 98);
+
+ // Insert last packet.
+ ++seq_num_;
+ packet_->video_header.is_first_packet_in_frame = false;
+ packet_->markerBit = true;
+ packet_->seqNum = seq_num_;
+
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+
+ frame_out = DecodeCompleteFrame();
+
+ CheckOutFrame(frame_out, 100 * size_, false);
+ EXPECT_EQ(VideoFrameType::kVideoFrameKey, frame_out->FrameType());
+ jitter_buffer_->ReleaseFrame(frame_out);
+}
+
+TEST_F(TestBasicJitterBuffer, 100PacketDeltaFrame) {
+ // Always start with a complete key frame.
+ packet_->video_header.frame_type = VideoFrameType::kVideoFrameKey;
+ packet_->video_header.is_first_packet_in_frame = true;
+ packet_->markerBit = true;
+
+ bool retransmitted = false;
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+ VCMEncodedFrame* frame_out = DecodeCompleteFrame();
+ EXPECT_FALSE(frame_out == NULL);
+ jitter_buffer_->ReleaseFrame(frame_out);
+
+ ++seq_num_;
+ packet_->seqNum = seq_num_;
+ packet_->markerBit = false;
+ packet_->video_header.frame_type = VideoFrameType::kVideoFrameDelta;
+ packet_->timestamp += 33 * 90;
+
+ EXPECT_EQ(kIncomplete,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+
+ frame_out = DecodeCompleteFrame();
+
+ // Frame should not be complete.
+ EXPECT_TRUE(frame_out == NULL);
+
+ packet_->video_header.is_first_packet_in_frame = false;
+ // Insert 98 frames.
+ int loop = 0;
+ do {
+ ++seq_num_;
+ packet_->seqNum = seq_num_;
+
+ // Insert a packet into a frame.
+ EXPECT_EQ(kIncomplete,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+ loop++;
+ } while (loop < 98);
+
+ // Insert the last packet.
+ ++seq_num_;
+ packet_->video_header.is_first_packet_in_frame = false;
+ packet_->markerBit = true;
+ packet_->seqNum = seq_num_;
+
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+
+ frame_out = DecodeCompleteFrame();
+
+ CheckOutFrame(frame_out, 100 * size_, false);
+ EXPECT_EQ(VideoFrameType::kVideoFrameDelta, frame_out->FrameType());
+ jitter_buffer_->ReleaseFrame(frame_out);
+}
+
+TEST_F(TestBasicJitterBuffer, PacketReorderingReverseOrder) {
+ // Insert the "first" packet last.
+ seq_num_ += 100;
+ packet_->video_header.frame_type = VideoFrameType::kVideoFrameKey;
+ packet_->video_header.is_first_packet_in_frame = false;
+ packet_->markerBit = true;
+ packet_->seqNum = seq_num_;
+ packet_->timestamp = timestamp_;
+
+ bool retransmitted = false;
+ EXPECT_EQ(kIncomplete,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+
+ VCMEncodedFrame* frame_out = DecodeCompleteFrame();
+
+ EXPECT_TRUE(frame_out == NULL);
+
+ // Insert 98 packets.
+ int loop = 0;
+ do {
+ seq_num_--;
+ packet_->video_header.is_first_packet_in_frame = false;
+ packet_->markerBit = false;
+ packet_->seqNum = seq_num_;
+
+ EXPECT_EQ(kIncomplete,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+ loop++;
+ } while (loop < 98);
+
+ // Insert the last packet.
+ seq_num_--;
+ packet_->video_header.is_first_packet_in_frame = true;
+ packet_->markerBit = false;
+ packet_->seqNum = seq_num_;
+
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+
+ frame_out = DecodeCompleteFrame();
+
+ CheckOutFrame(frame_out, 100 * size_, false);
+
+ EXPECT_EQ(VideoFrameType::kVideoFrameKey, frame_out->FrameType());
+ jitter_buffer_->ReleaseFrame(frame_out);
+}
+
+TEST_F(TestBasicJitterBuffer, FrameReordering2Frames2PacketsEach) {
+ packet_->video_header.frame_type = VideoFrameType::kVideoFrameDelta;
+ packet_->video_header.is_first_packet_in_frame = true;
+ packet_->markerBit = false;
+
+ bool retransmitted = false;
+ EXPECT_EQ(kIncomplete,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+
+ VCMEncodedFrame* frame_out = DecodeCompleteFrame();
+
+ EXPECT_TRUE(frame_out == NULL);
+
+ seq_num_++;
+ packet_->video_header.is_first_packet_in_frame = false;
+ packet_->markerBit = true;
+ packet_->seqNum = seq_num_;
+
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+
+ // check that we fail to get frame since seqnum is not continuous
+ frame_out = DecodeCompleteFrame();
+ EXPECT_TRUE(frame_out == NULL);
+
+ seq_num_ -= 3;
+ timestamp_ -= 33 * 90;
+ packet_->video_header.frame_type = VideoFrameType::kVideoFrameKey;
+ packet_->video_header.is_first_packet_in_frame = true;
+ packet_->markerBit = false;
+ packet_->seqNum = seq_num_;
+ packet_->timestamp = timestamp_;
+
+ EXPECT_EQ(kIncomplete,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+
+ frame_out = DecodeCompleteFrame();
+
+ // It should not be complete.
+ EXPECT_TRUE(frame_out == NULL);
+
+ seq_num_++;
+ packet_->video_header.is_first_packet_in_frame = false;
+ packet_->markerBit = true;
+ packet_->seqNum = seq_num_;
+
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+
+ frame_out = DecodeCompleteFrame();
+ CheckOutFrame(frame_out, 2 * size_, false);
+ EXPECT_EQ(VideoFrameType::kVideoFrameKey, frame_out->FrameType());
+ jitter_buffer_->ReleaseFrame(frame_out);
+
+ frame_out = DecodeCompleteFrame();
+ CheckOutFrame(frame_out, 2 * size_, false);
+ EXPECT_EQ(VideoFrameType::kVideoFrameDelta, frame_out->FrameType());
+ jitter_buffer_->ReleaseFrame(frame_out);
+}
+
+TEST_F(TestBasicJitterBuffer, TestReorderingWithPadding) {
+ jitter_buffer_->SetNackSettings(kMaxNumberOfFrames, kMaxNumberOfFrames, 0);
+ packet_->video_header.frame_type = VideoFrameType::kVideoFrameKey;
+ packet_->video_header.is_first_packet_in_frame = true;
+ packet_->markerBit = true;
+
+ // Send in an initial good packet/frame (Frame A) to start things off.
+ bool retransmitted = false;
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+ VCMEncodedFrame* frame_out = DecodeCompleteFrame();
+ EXPECT_TRUE(frame_out != NULL);
+ jitter_buffer_->ReleaseFrame(frame_out);
+
+ // Now send in a complete delta frame (Frame C), but with a sequence number
+ // gap. No pic index either, so no temporal scalability cheating :)
+ packet_->video_header.frame_type = VideoFrameType::kVideoFrameDelta;
+ // Leave a gap of 2 sequence numbers and two frames.
+ packet_->seqNum = seq_num_ + 3;
+ packet_->timestamp = timestamp_ + (66 * 90);
+ // Still isFirst = marker = true.
+ // Session should be complete (frame is complete), but there's nothing to
+ // decode yet.
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+ frame_out = DecodeCompleteFrame();
+ EXPECT_TRUE(frame_out == NULL);
+
+ // Now send in a complete delta frame (Frame B) that is continuous from A, but
+ // doesn't fill the full gap to C. The rest of the gap is going to be padding.
+ packet_->seqNum = seq_num_ + 1;
+ packet_->timestamp = timestamp_ + (33 * 90);
+ // Still isFirst = marker = true.
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+ frame_out = DecodeCompleteFrame();
+ EXPECT_TRUE(frame_out != NULL);
+ jitter_buffer_->ReleaseFrame(frame_out);
+
+ // But Frame C isn't continuous yet.
+ frame_out = DecodeCompleteFrame();
+ EXPECT_TRUE(frame_out == NULL);
+
+ // Add in the padding. These are empty packets (data length is 0) with no
+ // marker bit and matching the timestamp of Frame B.
+ RTPHeader rtp_header;
+ RTPVideoHeader video_header;
+ rtp_header.sequenceNumber = seq_num_ + 2;
+ rtp_header.timestamp = timestamp_ + (33 * 90);
+ rtp_header.markerBit = false;
+ video_header.codec = kVideoCodecGeneric;
+ video_header.frame_type = VideoFrameType::kEmptyFrame;
+ VCMPacket empty_packet(data_, 0, rtp_header, video_header,
+ /*ntp_time_ms=*/0, clock_->CurrentTime());
+ EXPECT_EQ(kOldPacket,
+ jitter_buffer_->InsertPacket(empty_packet, &retransmitted));
+ empty_packet.seqNum += 1;
+ EXPECT_EQ(kOldPacket,
+ jitter_buffer_->InsertPacket(empty_packet, &retransmitted));
+
+ // But now Frame C should be ready!
+ frame_out = DecodeCompleteFrame();
+ EXPECT_TRUE(frame_out != NULL);
+ jitter_buffer_->ReleaseFrame(frame_out);
+}
+
+TEST_F(TestBasicJitterBuffer, DuplicatePackets) {
+ packet_->video_header.frame_type = VideoFrameType::kVideoFrameKey;
+ packet_->video_header.is_first_packet_in_frame = true;
+ packet_->markerBit = false;
+ packet_->seqNum = seq_num_;
+ packet_->timestamp = timestamp_;
+ EXPECT_EQ(0, jitter_buffer_->num_packets());
+ EXPECT_EQ(0, jitter_buffer_->num_duplicated_packets());
+
+ bool retransmitted = false;
+ EXPECT_EQ(kIncomplete,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+
+ VCMEncodedFrame* frame_out = DecodeCompleteFrame();
+
+ EXPECT_TRUE(frame_out == NULL);
+ EXPECT_EQ(1, jitter_buffer_->num_packets());
+ EXPECT_EQ(0, jitter_buffer_->num_duplicated_packets());
+
+ // Insert a packet into a frame.
+ EXPECT_EQ(kDuplicatePacket,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+ EXPECT_EQ(2, jitter_buffer_->num_packets());
+ EXPECT_EQ(1, jitter_buffer_->num_duplicated_packets());
+
+ seq_num_++;
+ packet_->seqNum = seq_num_;
+ packet_->markerBit = true;
+ packet_->video_header.is_first_packet_in_frame = false;
+
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+
+ frame_out = DecodeCompleteFrame();
+ ASSERT_TRUE(frame_out != NULL);
+ CheckOutFrame(frame_out, 2 * size_, false);
+
+ EXPECT_EQ(VideoFrameType::kVideoFrameKey, frame_out->FrameType());
+ EXPECT_EQ(3, jitter_buffer_->num_packets());
+ EXPECT_EQ(1, jitter_buffer_->num_duplicated_packets());
+ jitter_buffer_->ReleaseFrame(frame_out);
+}
+
+TEST_F(TestBasicJitterBuffer, DuplicatePreviousDeltaFramePacket) {
+ packet_->video_header.frame_type = VideoFrameType::kVideoFrameKey;
+ packet_->video_header.is_first_packet_in_frame = true;
+ packet_->markerBit = true;
+ packet_->seqNum = seq_num_;
+ packet_->timestamp = timestamp_;
+ EXPECT_EQ(0, jitter_buffer_->num_packets());
+ EXPECT_EQ(0, jitter_buffer_->num_duplicated_packets());
+
+ bool retransmitted = false;
+ // Insert first complete frame.
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+
+ VCMEncodedFrame* frame_out = DecodeCompleteFrame();
+ ASSERT_TRUE(frame_out != NULL);
+ CheckOutFrame(frame_out, size_, false);
+ EXPECT_EQ(VideoFrameType::kVideoFrameKey, frame_out->FrameType());
+ jitter_buffer_->ReleaseFrame(frame_out);
+
+ // Insert 3 delta frames.
+ for (uint16_t i = 1; i <= 3; ++i) {
+ packet_->seqNum = seq_num_ + i;
+ packet_->timestamp = timestamp_ + (i * 33) * 90;
+ packet_->video_header.frame_type = VideoFrameType::kVideoFrameDelta;
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+ EXPECT_EQ(i + 1, jitter_buffer_->num_packets());
+ EXPECT_EQ(0, jitter_buffer_->num_duplicated_packets());
+ }
+
+ // Retransmit second delta frame.
+ packet_->seqNum = seq_num_ + 2;
+ packet_->timestamp = timestamp_ + 66 * 90;
+
+ EXPECT_EQ(kDuplicatePacket,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+
+ EXPECT_EQ(5, jitter_buffer_->num_packets());
+ EXPECT_EQ(1, jitter_buffer_->num_duplicated_packets());
+
+ // Should be able to decode 3 delta frames, key frame already decoded.
+ for (size_t i = 0; i < 3; ++i) {
+ frame_out = DecodeCompleteFrame();
+ ASSERT_TRUE(frame_out != NULL);
+ CheckOutFrame(frame_out, size_, false);
+ EXPECT_EQ(VideoFrameType::kVideoFrameDelta, frame_out->FrameType());
+ jitter_buffer_->ReleaseFrame(frame_out);
+ }
+}
+
+TEST_F(TestBasicJitterBuffer, TestSkipForwardVp9) {
+ // Verify that JB skips forward to next base layer frame.
+ // -------------------------------------------------
+ // | 65485 | 65486 | 65487 | 65488 | 65489 | ...
+ // | pid:5 | pid:6 | pid:7 | pid:8 | pid:9 | ...
+ // | tid:0 | tid:2 | tid:1 | tid:2 | tid:0 | ...
+ // | ss | x | x | x | |
+ // -------------------------------------------------
+ // |<----------tl0idx:200--------->|<---tl0idx:201---
+
+ jitter_buffer_->SetNackSettings(kMaxNumberOfFrames, kMaxNumberOfFrames, 0);
+ auto& vp9_header =
+ packet_->video_header.video_type_header.emplace<RTPVideoHeaderVP9>();
+
+ bool re = false;
+ packet_->video_header.codec = kVideoCodecVP9;
+ packet_->video_header.is_first_packet_in_frame = true;
+ packet_->markerBit = true;
+ vp9_header.flexible_mode = false;
+ vp9_header.spatial_idx = 0;
+ vp9_header.beginning_of_frame = true;
+ vp9_header.end_of_frame = true;
+ vp9_header.temporal_up_switch = false;
+
+ packet_->seqNum = 65485;
+ packet_->timestamp = 1000;
+ packet_->video_header.frame_type = VideoFrameType::kVideoFrameKey;
+ vp9_header.picture_id = 5;
+ vp9_header.tl0_pic_idx = 200;
+ vp9_header.temporal_idx = 0;
+ vp9_header.ss_data_available = true;
+ vp9_header.gof.SetGofInfoVP9(
+ kTemporalStructureMode3); // kTemporalStructureMode3: 0-2-1-2..
+ EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_, &re));
+
+ // Insert next temporal layer 0.
+ packet_->seqNum = 65489;
+ packet_->timestamp = 13000;
+ packet_->video_header.frame_type = VideoFrameType::kVideoFrameDelta;
+ vp9_header.picture_id = 9;
+ vp9_header.tl0_pic_idx = 201;
+ vp9_header.temporal_idx = 0;
+ vp9_header.ss_data_available = false;
+ EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_, &re));
+
+ VCMEncodedFrame* frame_out = DecodeCompleteFrame();
+ EXPECT_EQ(1000U, frame_out->Timestamp());
+ EXPECT_EQ(VideoFrameType::kVideoFrameKey, frame_out->FrameType());
+ jitter_buffer_->ReleaseFrame(frame_out);
+
+ frame_out = DecodeCompleteFrame();
+ EXPECT_EQ(13000U, frame_out->Timestamp());
+ EXPECT_EQ(VideoFrameType::kVideoFrameDelta, frame_out->FrameType());
+ jitter_buffer_->ReleaseFrame(frame_out);
+}
+
+TEST_F(TestBasicJitterBuffer, ReorderedVp9SsData_3TlLayers) {
+ // Verify that frames are updated with SS data when SS packet is reordered.
+ // --------------------------------
+ // | 65486 | 65487 | 65485 |...
+ // | pid:6 | pid:7 | pid:5 |...
+ // | tid:2 | tid:1 | tid:0 |...
+ // | | | ss |
+ // --------------------------------
+ // |<--------tl0idx:200--------->|
+
+ auto& vp9_header =
+ packet_->video_header.video_type_header.emplace<RTPVideoHeaderVP9>();
+
+ bool re = false;
+ packet_->video_header.codec = kVideoCodecVP9;
+ packet_->video_header.is_first_packet_in_frame = true;
+ packet_->markerBit = true;
+ vp9_header.flexible_mode = false;
+ vp9_header.spatial_idx = 0;
+ vp9_header.beginning_of_frame = true;
+ vp9_header.end_of_frame = true;
+ vp9_header.tl0_pic_idx = 200;
+
+ packet_->seqNum = 65486;
+ packet_->timestamp = 6000;
+ packet_->video_header.frame_type = VideoFrameType::kVideoFrameDelta;
+ vp9_header.picture_id = 6;
+ vp9_header.temporal_idx = 2;
+ vp9_header.temporal_up_switch = true;
+ EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_, &re));
+
+ packet_->seqNum = 65487;
+ packet_->timestamp = 9000;
+ packet_->video_header.frame_type = VideoFrameType::kVideoFrameDelta;
+ vp9_header.picture_id = 7;
+ vp9_header.temporal_idx = 1;
+ vp9_header.temporal_up_switch = true;
+ EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_, &re));
+
+ // Insert first frame with SS data.
+ packet_->seqNum = 65485;
+ packet_->timestamp = 3000;
+ packet_->video_header.frame_type = VideoFrameType::kVideoFrameKey;
+ packet_->video_header.width = 352;
+ packet_->video_header.height = 288;
+ vp9_header.picture_id = 5;
+ vp9_header.temporal_idx = 0;
+ vp9_header.temporal_up_switch = false;
+ vp9_header.ss_data_available = true;
+ vp9_header.gof.SetGofInfoVP9(
+ kTemporalStructureMode3); // kTemporalStructureMode3: 0-2-1-2..
+ EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_, &re));
+
+ VCMEncodedFrame* frame_out = DecodeCompleteFrame();
+ EXPECT_EQ(3000U, frame_out->Timestamp());
+ EXPECT_EQ(VideoFrameType::kVideoFrameKey, frame_out->FrameType());
+ EXPECT_EQ(0, frame_out->CodecSpecific()->codecSpecific.VP9.temporal_idx);
+ EXPECT_FALSE(
+ frame_out->CodecSpecific()->codecSpecific.VP9.temporal_up_switch);
+ jitter_buffer_->ReleaseFrame(frame_out);
+
+ frame_out = DecodeCompleteFrame();
+ EXPECT_EQ(6000U, frame_out->Timestamp());
+ EXPECT_EQ(VideoFrameType::kVideoFrameDelta, frame_out->FrameType());
+ EXPECT_EQ(2, frame_out->CodecSpecific()->codecSpecific.VP9.temporal_idx);
+ EXPECT_TRUE(frame_out->CodecSpecific()->codecSpecific.VP9.temporal_up_switch);
+ jitter_buffer_->ReleaseFrame(frame_out);
+
+ frame_out = DecodeCompleteFrame();
+ EXPECT_EQ(9000U, frame_out->Timestamp());
+ EXPECT_EQ(VideoFrameType::kVideoFrameDelta, frame_out->FrameType());
+ EXPECT_EQ(1, frame_out->CodecSpecific()->codecSpecific.VP9.temporal_idx);
+ EXPECT_TRUE(frame_out->CodecSpecific()->codecSpecific.VP9.temporal_up_switch);
+ jitter_buffer_->ReleaseFrame(frame_out);
+}
+
+TEST_F(TestBasicJitterBuffer, ReorderedVp9SsData_2Tl2SLayers) {
+ // Verify that frames are updated with SS data when SS packet is reordered.
+ // -----------------------------------------
+ // | 65486 | 65487 | 65485 | 65484 |...
+ // | pid:6 | pid:6 | pid:5 | pid:5 |...
+ // | tid:1 | tid:1 | tid:0 | tid:0 |...
+ // | sid:0 | sid:1 | sid:1 | sid:0 |...
+ // | t:6000 | t:6000 | t:3000 | t:3000 |
+ // | | | | ss |
+ // -----------------------------------------
+ // |<-----------tl0idx:200------------>|
+
+ auto& vp9_header =
+ packet_->video_header.video_type_header.emplace<RTPVideoHeaderVP9>();
+
+ bool re = false;
+ packet_->video_header.codec = kVideoCodecVP9;
+ vp9_header.flexible_mode = false;
+ vp9_header.beginning_of_frame = true;
+ vp9_header.end_of_frame = true;
+ vp9_header.tl0_pic_idx = 200;
+
+ packet_->video_header.is_first_packet_in_frame = true;
+ packet_->markerBit = false;
+ packet_->seqNum = 65486;
+ packet_->timestamp = 6000;
+ packet_->video_header.frame_type = VideoFrameType::kVideoFrameDelta;
+ vp9_header.spatial_idx = 0;
+ vp9_header.picture_id = 6;
+ vp9_header.temporal_idx = 1;
+ vp9_header.temporal_up_switch = true;
+ EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_, &re));
+
+ packet_->video_header.is_first_packet_in_frame = false;
+ packet_->markerBit = true;
+ packet_->seqNum = 65487;
+ packet_->video_header.frame_type = VideoFrameType::kVideoFrameDelta;
+ vp9_header.spatial_idx = 1;
+ vp9_header.picture_id = 6;
+ vp9_header.temporal_idx = 1;
+ vp9_header.temporal_up_switch = true;
+ EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_, &re));
+
+ packet_->video_header.is_first_packet_in_frame = false;
+ packet_->markerBit = true;
+ packet_->seqNum = 65485;
+ packet_->timestamp = 3000;
+ packet_->video_header.frame_type = VideoFrameType::kVideoFrameKey;
+ vp9_header.spatial_idx = 1;
+ vp9_header.picture_id = 5;
+ vp9_header.temporal_idx = 0;
+ vp9_header.temporal_up_switch = false;
+ EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_, &re));
+
+ // Insert first frame with SS data.
+ packet_->video_header.is_first_packet_in_frame = true;
+ packet_->markerBit = false;
+ packet_->seqNum = 65484;
+ packet_->video_header.frame_type = VideoFrameType::kVideoFrameKey;
+ packet_->video_header.width = 352;
+ packet_->video_header.height = 288;
+ vp9_header.spatial_idx = 0;
+ vp9_header.picture_id = 5;
+ vp9_header.temporal_idx = 0;
+ vp9_header.temporal_up_switch = false;
+ vp9_header.ss_data_available = true;
+ vp9_header.gof.SetGofInfoVP9(
+ kTemporalStructureMode2); // kTemporalStructureMode3: 0-1-0-1..
+ EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_, &re));
+
+ VCMEncodedFrame* frame_out = DecodeCompleteFrame();
+ EXPECT_EQ(3000U, frame_out->Timestamp());
+ EXPECT_EQ(VideoFrameType::kVideoFrameKey, frame_out->FrameType());
+ EXPECT_EQ(0, frame_out->CodecSpecific()->codecSpecific.VP9.temporal_idx);
+ EXPECT_FALSE(
+ frame_out->CodecSpecific()->codecSpecific.VP9.temporal_up_switch);
+ jitter_buffer_->ReleaseFrame(frame_out);
+
+ frame_out = DecodeCompleteFrame();
+ EXPECT_EQ(6000U, frame_out->Timestamp());
+ EXPECT_EQ(VideoFrameType::kVideoFrameDelta, frame_out->FrameType());
+ EXPECT_EQ(1, frame_out->CodecSpecific()->codecSpecific.VP9.temporal_idx);
+ EXPECT_TRUE(frame_out->CodecSpecific()->codecSpecific.VP9.temporal_up_switch);
+ jitter_buffer_->ReleaseFrame(frame_out);
+}
+
+TEST_F(TestBasicJitterBuffer, H264InsertStartCode) {
+ packet_->video_header.frame_type = VideoFrameType::kVideoFrameKey;
+ packet_->video_header.is_first_packet_in_frame = true;
+ packet_->markerBit = false;
+ packet_->seqNum = seq_num_;
+ packet_->timestamp = timestamp_;
+ packet_->insertStartCode = true;
+
+ bool retransmitted = false;
+ EXPECT_EQ(kIncomplete,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+
+ VCMEncodedFrame* frame_out = DecodeCompleteFrame();
+
+ // Frame should not be complete.
+ EXPECT_TRUE(frame_out == NULL);
+
+ seq_num_++;
+ packet_->video_header.is_first_packet_in_frame = false;
+ packet_->markerBit = true;
+ packet_->seqNum = seq_num_;
+
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+
+ frame_out = DecodeCompleteFrame();
+ CheckOutFrame(frame_out, size_ * 2 + 4 * 2, true);
+ EXPECT_EQ(VideoFrameType::kVideoFrameKey, frame_out->FrameType());
+ jitter_buffer_->ReleaseFrame(frame_out);
+}
+
+TEST_F(TestBasicJitterBuffer, SpsAndPpsHandling) {
+ auto& h264_header =
+ packet_->video_header.video_type_header.emplace<RTPVideoHeaderH264>();
+ packet_->timestamp = timestamp_;
+ packet_->video_header.frame_type = VideoFrameType::kVideoFrameKey;
+ packet_->video_header.is_first_packet_in_frame = true;
+ packet_->markerBit = true;
+ packet_->video_header.codec = kVideoCodecH264;
+ h264_header.nalu_type = H264::NaluType::kIdr;
+ h264_header.nalus[0].type = H264::NaluType::kIdr;
+ h264_header.nalus[0].sps_id = -1;
+ h264_header.nalus[0].pps_id = 0;
+ h264_header.nalus_length = 1;
+ bool retransmitted = false;
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+ // Not decodable since sps and pps are missing.
+ EXPECT_EQ(nullptr, DecodeCompleteFrame());
+
+ timestamp_ += 3000;
+ packet_->timestamp = timestamp_;
+ ++seq_num_;
+ packet_->seqNum = seq_num_;
+ packet_->video_header.frame_type = VideoFrameType::kVideoFrameKey;
+ packet_->video_header.is_first_packet_in_frame = true;
+ packet_->markerBit = false;
+ packet_->video_header.codec = kVideoCodecH264;
+ h264_header.nalu_type = H264::NaluType::kStapA;
+ h264_header.nalus[0].type = H264::NaluType::kSps;
+ h264_header.nalus[0].sps_id = 0;
+ h264_header.nalus[0].pps_id = -1;
+ h264_header.nalus[1].type = H264::NaluType::kPps;
+ h264_header.nalus[1].sps_id = 0;
+ h264_header.nalus[1].pps_id = 0;
+ h264_header.nalus_length = 2;
+ // Not complete since the marker bit hasn't been received.
+ EXPECT_EQ(kIncomplete,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+
+ ++seq_num_;
+ packet_->seqNum = seq_num_;
+ packet_->video_header.frame_type = VideoFrameType::kVideoFrameKey;
+ packet_->video_header.is_first_packet_in_frame = false;
+ packet_->markerBit = true;
+ packet_->video_header.codec = kVideoCodecH264;
+ h264_header.nalu_type = H264::NaluType::kIdr;
+ h264_header.nalus[0].type = H264::NaluType::kIdr;
+ h264_header.nalus[0].sps_id = -1;
+ h264_header.nalus[0].pps_id = 0;
+ h264_header.nalus_length = 1;
+ // Complete and decodable since the pps and sps are received in the first
+ // packet of this frame.
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+ VCMEncodedFrame* frame_out = DecodeCompleteFrame();
+ ASSERT_NE(nullptr, frame_out);
+ jitter_buffer_->ReleaseFrame(frame_out);
+
+ timestamp_ += 3000;
+ packet_->timestamp = timestamp_;
+ ++seq_num_;
+ packet_->seqNum = seq_num_;
+ packet_->video_header.frame_type = VideoFrameType::kVideoFrameDelta;
+ packet_->video_header.is_first_packet_in_frame = true;
+ packet_->markerBit = true;
+ packet_->video_header.codec = kVideoCodecH264;
+ h264_header.nalu_type = H264::NaluType::kSlice;
+ h264_header.nalus[0].type = H264::NaluType::kSlice;
+ h264_header.nalus[0].sps_id = -1;
+ h264_header.nalus[0].pps_id = 0;
+ h264_header.nalus_length = 1;
+ // Complete and decodable since sps, pps and key frame has been received.
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+ frame_out = DecodeCompleteFrame();
+ ASSERT_NE(nullptr, frame_out);
+ jitter_buffer_->ReleaseFrame(frame_out);
+}
+
+TEST_F(TestBasicJitterBuffer, DeltaFrame100PacketsWithSeqNumWrap) {
+ seq_num_ = 0xfff0;
+ packet_->video_header.frame_type = VideoFrameType::kVideoFrameKey;
+ packet_->video_header.is_first_packet_in_frame = true;
+ packet_->markerBit = false;
+ packet_->seqNum = seq_num_;
+ packet_->timestamp = timestamp_;
+
+ bool retransmitted = false;
+ EXPECT_EQ(kIncomplete,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+
+ VCMEncodedFrame* frame_out = DecodeCompleteFrame();
+
+ EXPECT_TRUE(frame_out == NULL);
+
+ int loop = 0;
+ do {
+ seq_num_++;
+ packet_->video_header.is_first_packet_in_frame = false;
+ packet_->markerBit = false;
+ packet_->seqNum = seq_num_;
+
+ EXPECT_EQ(kIncomplete,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+
+ frame_out = DecodeCompleteFrame();
+
+ EXPECT_TRUE(frame_out == NULL);
+
+ loop++;
+ } while (loop < 98);
+
+ seq_num_++;
+ packet_->video_header.is_first_packet_in_frame = false;
+ packet_->markerBit = true;
+ packet_->seqNum = seq_num_;
+
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+
+ frame_out = DecodeCompleteFrame();
+
+ CheckOutFrame(frame_out, 100 * size_, false);
+
+ EXPECT_EQ(VideoFrameType::kVideoFrameKey, frame_out->FrameType());
+ jitter_buffer_->ReleaseFrame(frame_out);
+}
+
+TEST_F(TestBasicJitterBuffer, PacketReorderingReverseWithNegSeqNumWrap) {
+ // Insert "first" packet last seqnum.
+ seq_num_ = 10;
+ packet_->video_header.frame_type = VideoFrameType::kVideoFrameKey;
+ packet_->video_header.is_first_packet_in_frame = false;
+ packet_->markerBit = true;
+ packet_->seqNum = seq_num_;
+
+ bool retransmitted = false;
+ EXPECT_EQ(kIncomplete,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+ VCMEncodedFrame* frame_out = DecodeCompleteFrame();
+
+ // Should not be complete.
+ EXPECT_TRUE(frame_out == NULL);
+
+ // Insert 98 frames.
+ int loop = 0;
+ do {
+ seq_num_--;
+ packet_->video_header.is_first_packet_in_frame = false;
+ packet_->markerBit = false;
+ packet_->seqNum = seq_num_;
+
+ EXPECT_EQ(kIncomplete,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+
+ frame_out = DecodeCompleteFrame();
+
+ EXPECT_TRUE(frame_out == NULL);
+
+ loop++;
+ } while (loop < 98);
+
+ // Insert last packet.
+ seq_num_--;
+ packet_->video_header.is_first_packet_in_frame = true;
+ packet_->markerBit = false;
+ packet_->seqNum = seq_num_;
+
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+
+ frame_out = DecodeCompleteFrame();
+ CheckOutFrame(frame_out, 100 * size_, false);
+ EXPECT_EQ(VideoFrameType::kVideoFrameKey, frame_out->FrameType());
+ jitter_buffer_->ReleaseFrame(frame_out);
+}
+
+TEST_F(TestBasicJitterBuffer, TestInsertOldFrame) {
+ // ------- -------
+ // | 2 | | 1 |
+ // ------- -------
+ // t = 3000 t = 2000
+ seq_num_ = 2;
+ timestamp_ = 3000;
+ packet_->video_header.frame_type = VideoFrameType::kVideoFrameKey;
+ packet_->video_header.is_first_packet_in_frame = true;
+ packet_->markerBit = true;
+ packet_->timestamp = timestamp_;
+ packet_->seqNum = seq_num_;
+
+ bool retransmitted = false;
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+
+ VCMEncodedFrame* frame_out = DecodeCompleteFrame();
+ EXPECT_EQ(3000u, frame_out->Timestamp());
+ CheckOutFrame(frame_out, size_, false);
+ EXPECT_EQ(VideoFrameType::kVideoFrameKey, frame_out->FrameType());
+ jitter_buffer_->ReleaseFrame(frame_out);
+
+ seq_num_--;
+ timestamp_ = 2000;
+ packet_->video_header.frame_type = VideoFrameType::kVideoFrameDelta;
+ packet_->video_header.is_first_packet_in_frame = true;
+ packet_->markerBit = true;
+ packet_->seqNum = seq_num_;
+ packet_->timestamp = timestamp_;
+
+ EXPECT_EQ(kOldPacket, jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+}
+
+TEST_F(TestBasicJitterBuffer, TestInsertOldFrameWithSeqNumWrap) {
+ // ------- -------
+ // | 2 | | 1 |
+ // ------- -------
+ // t = 3000 t = 0xffffff00
+
+ seq_num_ = 2;
+ timestamp_ = 3000;
+ packet_->video_header.frame_type = VideoFrameType::kVideoFrameKey;
+ packet_->video_header.is_first_packet_in_frame = true;
+ packet_->markerBit = true;
+ packet_->seqNum = seq_num_;
+ packet_->timestamp = timestamp_;
+
+ bool retransmitted = false;
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+
+ VCMEncodedFrame* frame_out = DecodeCompleteFrame();
+ EXPECT_EQ(timestamp_, frame_out->Timestamp());
+
+ CheckOutFrame(frame_out, size_, false);
+
+ EXPECT_EQ(VideoFrameType::kVideoFrameKey, frame_out->FrameType());
+
+ jitter_buffer_->ReleaseFrame(frame_out);
+
+ seq_num_--;
+ timestamp_ = 0xffffff00;
+ packet_->video_header.frame_type = VideoFrameType::kVideoFrameDelta;
+ packet_->video_header.is_first_packet_in_frame = true;
+ packet_->markerBit = true;
+ packet_->seqNum = seq_num_;
+ packet_->timestamp = timestamp_;
+
+ // This timestamp is old.
+ EXPECT_EQ(kOldPacket, jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+}
+
+TEST_F(TestBasicJitterBuffer, TimestampWrap) {
+ // --------------- ---------------
+ // | 1 | 2 | | 3 | 4 |
+ // --------------- ---------------
+ // t = 0xffffff00 t = 33*90
+
+ timestamp_ = 0xffffff00;
+ packet_->video_header.frame_type = VideoFrameType::kVideoFrameKey;
+ packet_->video_header.is_first_packet_in_frame = true;
+ packet_->markerBit = false;
+ packet_->seqNum = seq_num_;
+ packet_->timestamp = timestamp_;
+
+ bool retransmitted = false;
+ EXPECT_EQ(kIncomplete,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+
+ VCMEncodedFrame* frame_out = DecodeCompleteFrame();
+ EXPECT_TRUE(frame_out == NULL);
+
+ seq_num_++;
+ packet_->video_header.is_first_packet_in_frame = false;
+ packet_->markerBit = true;
+ packet_->seqNum = seq_num_;
+
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+
+ frame_out = DecodeCompleteFrame();
+ CheckOutFrame(frame_out, 2 * size_, false);
+ jitter_buffer_->ReleaseFrame(frame_out);
+
+ seq_num_++;
+ timestamp_ += 33 * 90;
+ packet_->video_header.frame_type = VideoFrameType::kVideoFrameDelta;
+ packet_->video_header.is_first_packet_in_frame = true;
+ packet_->markerBit = false;
+ packet_->seqNum = seq_num_;
+ packet_->timestamp = timestamp_;
+
+ EXPECT_EQ(kIncomplete,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+
+ frame_out = DecodeCompleteFrame();
+ EXPECT_TRUE(frame_out == NULL);
+
+ seq_num_++;
+ packet_->video_header.is_first_packet_in_frame = false;
+ packet_->markerBit = true;
+ packet_->seqNum = seq_num_;
+
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+
+ frame_out = DecodeCompleteFrame();
+ CheckOutFrame(frame_out, 2 * size_, false);
+ EXPECT_EQ(VideoFrameType::kVideoFrameDelta, frame_out->FrameType());
+ jitter_buffer_->ReleaseFrame(frame_out);
+}
+
+TEST_F(TestBasicJitterBuffer, 2FrameWithTimestampWrap) {
+ // ------- -------
+ // | 1 | | 2 |
+ // ------- -------
+ // t = 0xffffff00 t = 2700
+
+ timestamp_ = 0xffffff00;
+ packet_->video_header.frame_type = VideoFrameType::kVideoFrameKey;
+ packet_->video_header.is_first_packet_in_frame = true;
+ packet_->markerBit = true;
+ packet_->timestamp = timestamp_;
+
+ bool retransmitted = false;
+ // Insert first frame (session will be complete).
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+
+ // Insert next frame.
+ seq_num_++;
+ timestamp_ = 2700;
+ packet_->video_header.frame_type = VideoFrameType::kVideoFrameDelta;
+ packet_->video_header.is_first_packet_in_frame = true;
+ packet_->markerBit = true;
+ packet_->seqNum = seq_num_;
+ packet_->timestamp = timestamp_;
+
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+
+ VCMEncodedFrame* frame_out = DecodeCompleteFrame();
+ EXPECT_EQ(0xffffff00, frame_out->Timestamp());
+ CheckOutFrame(frame_out, size_, false);
+ EXPECT_EQ(VideoFrameType::kVideoFrameKey, frame_out->FrameType());
+ jitter_buffer_->ReleaseFrame(frame_out);
+
+ VCMEncodedFrame* frame_out2 = DecodeCompleteFrame();
+ EXPECT_EQ(2700u, frame_out2->Timestamp());
+ CheckOutFrame(frame_out2, size_, false);
+ EXPECT_EQ(VideoFrameType::kVideoFrameDelta, frame_out2->FrameType());
+ jitter_buffer_->ReleaseFrame(frame_out2);
+}
+
+TEST_F(TestBasicJitterBuffer, Insert2FramesReOrderedWithTimestampWrap) {
+ // ------- -------
+ // | 2 | | 1 |
+ // ------- -------
+ // t = 2700 t = 0xffffff00
+
+ seq_num_ = 2;
+ timestamp_ = 2700;
+ packet_->video_header.frame_type = VideoFrameType::kVideoFrameDelta;
+ packet_->video_header.is_first_packet_in_frame = true;
+ packet_->markerBit = true;
+ packet_->seqNum = seq_num_;
+ packet_->timestamp = timestamp_;
+
+ bool retransmitted = false;
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+
+ // Insert second frame
+ seq_num_--;
+ timestamp_ = 0xffffff00;
+ packet_->video_header.frame_type = VideoFrameType::kVideoFrameKey;
+ packet_->video_header.is_first_packet_in_frame = true;
+ packet_->markerBit = true;
+ packet_->seqNum = seq_num_;
+ packet_->timestamp = timestamp_;
+
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+
+ VCMEncodedFrame* frame_out = DecodeCompleteFrame();
+ EXPECT_EQ(0xffffff00, frame_out->Timestamp());
+ CheckOutFrame(frame_out, size_, false);
+ EXPECT_EQ(VideoFrameType::kVideoFrameKey, frame_out->FrameType());
+ jitter_buffer_->ReleaseFrame(frame_out);
+
+ VCMEncodedFrame* frame_out2 = DecodeCompleteFrame();
+ EXPECT_EQ(2700u, frame_out2->Timestamp());
+ CheckOutFrame(frame_out2, size_, false);
+ EXPECT_EQ(VideoFrameType::kVideoFrameDelta, frame_out2->FrameType());
+ jitter_buffer_->ReleaseFrame(frame_out2);
+}
+
+TEST_F(TestBasicJitterBuffer, DeltaFrameWithMoreThanMaxNumberOfPackets) {
+ int loop = 0;
+ bool firstPacket = true;
+ bool retransmitted = false;
+ // Insert kMaxPacketsInJitterBuffer into frame.
+ do {
+ seq_num_++;
+ packet_->video_header.is_first_packet_in_frame = false;
+ packet_->markerBit = false;
+ packet_->seqNum = seq_num_;
+
+ if (firstPacket) {
+ EXPECT_EQ(kIncomplete,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+ firstPacket = false;
+ } else {
+ EXPECT_EQ(kIncomplete,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+ }
+
+ loop++;
+ } while (loop < kMaxPacketsInSession);
+
+ // Max number of packets inserted.
+ // Insert one more packet.
+ seq_num_++;
+ packet_->video_header.is_first_packet_in_frame = false;
+ packet_->markerBit = true;
+ packet_->seqNum = seq_num_;
+
+ // Insert the packet -> frame recycled.
+ EXPECT_EQ(kSizeError, jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+ EXPECT_TRUE(NULL == DecodeCompleteFrame());
+}
+
+TEST_F(TestBasicJitterBuffer, ExceedNumOfFrameWithSeqNumWrap) {
+ // TEST fill JB with more than max number of frame (50 delta frames +
+ // 51 key frames) with wrap in seq_num_
+ //
+ // --------------------------------------------------------------
+ // | 65485 | 65486 | 65487 | .... | 65535 | 0 | 1 | 2 | .....| 50 |
+ // --------------------------------------------------------------
+ // |<-----------delta frames------------->|<------key frames----->|
+
+ // Make sure the jitter doesn't request a keyframe after too much non-
+ // decodable frames.
+ jitter_buffer_->SetNackSettings(kMaxNumberOfFrames, kMaxNumberOfFrames, 0);
+
+ int loop = 0;
+ seq_num_ = 65485;
+ uint32_t first_key_frame_timestamp = 0;
+ bool retransmitted = false;
+ // Insert MAX_NUMBER_OF_FRAMES frames.
+ do {
+ timestamp_ += 33 * 90;
+ seq_num_++;
+ packet_->video_header.is_first_packet_in_frame = true;
+ packet_->markerBit = true;
+ packet_->seqNum = seq_num_;
+ packet_->timestamp = timestamp_;
+
+ if (loop == 50) {
+ first_key_frame_timestamp = packet_->timestamp;
+ packet_->video_header.frame_type = VideoFrameType::kVideoFrameKey;
+ }
+
+ // Insert frame.
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+
+ loop++;
+ } while (loop < kMaxNumberOfFrames);
+
+ // Max number of frames inserted.
+
+ // Insert one more frame.
+ timestamp_ += 33 * 90;
+ seq_num_++;
+ packet_->video_header.is_first_packet_in_frame = true;
+ packet_->markerBit = true;
+ packet_->seqNum = seq_num_;
+ packet_->timestamp = timestamp_;
+
+ // Now, no free frame - frames will be recycled until first key frame.
+ EXPECT_EQ(kFlushIndicator,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+
+ VCMEncodedFrame* frame_out = DecodeCompleteFrame();
+ EXPECT_EQ(first_key_frame_timestamp, frame_out->Timestamp());
+ CheckOutFrame(frame_out, size_, false);
+ EXPECT_EQ(VideoFrameType::kVideoFrameKey, frame_out->FrameType());
+ jitter_buffer_->ReleaseFrame(frame_out);
+}
+
+TEST_F(TestBasicJitterBuffer, EmptyLastFrame) {
+ seq_num_ = 3;
+ // Insert one empty packet per frame, should never return the last timestamp
+ // inserted. Only return empty frames in the presence of subsequent frames.
+ int maxSize = 1000;
+ bool retransmitted = false;
+ for (int i = 0; i < maxSize + 10; i++) {
+ timestamp_ += 33 * 90;
+ seq_num_++;
+ packet_->video_header.is_first_packet_in_frame = false;
+ packet_->markerBit = false;
+ packet_->seqNum = seq_num_;
+ packet_->timestamp = timestamp_;
+ packet_->video_header.frame_type = VideoFrameType::kEmptyFrame;
+
+ EXPECT_EQ(kNoError, jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+ }
+}
+
+TEST_F(TestBasicJitterBuffer, NextFrameWhenIncomplete) {
+ // Test that a we cannot get incomplete frames from the JB if we haven't
+ // received the marker bit, unless we have received a packet from a later
+ // timestamp.
+ // Start with a complete key frame - insert and decode.
+ jitter_buffer_->SetNackSettings(kMaxNumberOfFrames, kMaxNumberOfFrames, 0);
+ packet_->video_header.frame_type = VideoFrameType::kVideoFrameKey;
+ packet_->video_header.is_first_packet_in_frame = true;
+ packet_->markerBit = true;
+ bool retransmitted = false;
+
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+ VCMEncodedFrame* frame_out = DecodeCompleteFrame();
+ EXPECT_TRUE(frame_out != NULL);
+ jitter_buffer_->ReleaseFrame(frame_out);
+
+ packet_->seqNum += 2;
+ packet_->timestamp += 33 * 90;
+ packet_->video_header.frame_type = VideoFrameType::kVideoFrameDelta;
+ packet_->video_header.is_first_packet_in_frame = false;
+ packet_->markerBit = false;
+
+ EXPECT_EQ(kIncomplete,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+
+ packet_->seqNum += 2;
+ packet_->timestamp += 33 * 90;
+ packet_->video_header.is_first_packet_in_frame = true;
+
+ EXPECT_EQ(kIncomplete,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+}
+
+TEST_F(TestRunningJitterBuffer, Full) {
+ // Make sure the jitter doesn't request a keyframe after too much non-
+ // decodable frames.
+ jitter_buffer_->SetNackSettings(kMaxNumberOfFrames, kMaxNumberOfFrames, 0);
+ // Insert a key frame and decode it.
+ EXPECT_GE(InsertFrame(VideoFrameType::kVideoFrameKey), kNoError);
+ EXPECT_TRUE(DecodeCompleteFrame());
+ DropFrame(1);
+ // Fill the jitter buffer.
+ EXPECT_GE(InsertFrames(kMaxNumberOfFrames, VideoFrameType::kVideoFrameDelta),
+ kNoError);
+ // Make sure we can't decode these frames.
+ EXPECT_FALSE(DecodeCompleteFrame());
+ // This frame will make the jitter buffer recycle frames until a key frame.
+ // Since none is found it will have to wait until the next key frame before
+ // decoding.
+ EXPECT_EQ(kFlushIndicator, InsertFrame(VideoFrameType::kVideoFrameDelta));
+ EXPECT_FALSE(DecodeCompleteFrame());
+}
+
+TEST_F(TestRunningJitterBuffer, EmptyPackets) {
+ // Make sure a frame can get complete even though empty packets are missing.
+ stream_generator_->GenerateFrame(VideoFrameType::kVideoFrameKey, 3, 3,
+ clock_->TimeInMilliseconds());
+ bool request_key_frame = false;
+ // Insert empty packet.
+ EXPECT_EQ(kNoError, InsertPacketAndPop(4));
+ EXPECT_FALSE(request_key_frame);
+ // Insert 3 media packets.
+ EXPECT_EQ(kIncomplete, InsertPacketAndPop(0));
+ EXPECT_FALSE(request_key_frame);
+ EXPECT_EQ(kIncomplete, InsertPacketAndPop(0));
+ EXPECT_FALSE(request_key_frame);
+ EXPECT_EQ(kCompleteSession, InsertPacketAndPop(0));
+ EXPECT_FALSE(request_key_frame);
+ // Insert empty packet.
+ EXPECT_EQ(kCompleteSession, InsertPacketAndPop(0));
+ EXPECT_FALSE(request_key_frame);
+}
+
+TEST_F(TestRunningJitterBuffer, SkipToKeyFrame) {
+ // Insert delta frames.
+ EXPECT_GE(InsertFrames(5, VideoFrameType::kVideoFrameDelta), kNoError);
+ // Can't decode without a key frame.
+ EXPECT_FALSE(DecodeCompleteFrame());
+ InsertFrame(VideoFrameType::kVideoFrameKey);
+ // Skip to the next key frame.
+ EXPECT_TRUE(DecodeCompleteFrame());
+}
+
+TEST_F(TestRunningJitterBuffer, DontSkipToKeyFrameIfDecodable) {
+ InsertFrame(VideoFrameType::kVideoFrameKey);
+ EXPECT_TRUE(DecodeCompleteFrame());
+ const int kNumDeltaFrames = 5;
+ EXPECT_GE(InsertFrames(kNumDeltaFrames, VideoFrameType::kVideoFrameDelta),
+ kNoError);
+ InsertFrame(VideoFrameType::kVideoFrameKey);
+ for (int i = 0; i < kNumDeltaFrames + 1; ++i) {
+ EXPECT_TRUE(DecodeCompleteFrame());
+ }
+}
+
+TEST_F(TestRunningJitterBuffer, KeyDeltaKeyDelta) {
+ InsertFrame(VideoFrameType::kVideoFrameKey);
+ EXPECT_TRUE(DecodeCompleteFrame());
+ const int kNumDeltaFrames = 5;
+ EXPECT_GE(InsertFrames(kNumDeltaFrames, VideoFrameType::kVideoFrameDelta),
+ kNoError);
+ InsertFrame(VideoFrameType::kVideoFrameKey);
+ EXPECT_GE(InsertFrames(kNumDeltaFrames, VideoFrameType::kVideoFrameDelta),
+ kNoError);
+ InsertFrame(VideoFrameType::kVideoFrameKey);
+ for (int i = 0; i < 2 * (kNumDeltaFrames + 1); ++i) {
+ EXPECT_TRUE(DecodeCompleteFrame());
+ }
+}
+
+TEST_F(TestRunningJitterBuffer, TwoPacketsNonContinuous) {
+ InsertFrame(VideoFrameType::kVideoFrameKey);
+ EXPECT_TRUE(DecodeCompleteFrame());
+ stream_generator_->GenerateFrame(VideoFrameType::kVideoFrameDelta, 1, 0,
+ clock_->TimeInMilliseconds());
+ clock_->AdvanceTimeMilliseconds(kDefaultFramePeriodMs);
+ stream_generator_->GenerateFrame(VideoFrameType::kVideoFrameDelta, 2, 0,
+ clock_->TimeInMilliseconds());
+ EXPECT_EQ(kIncomplete, InsertPacketAndPop(1));
+ EXPECT_EQ(kCompleteSession, InsertPacketAndPop(1));
+ EXPECT_FALSE(DecodeCompleteFrame());
+ EXPECT_EQ(kCompleteSession, InsertPacketAndPop(0));
+ EXPECT_TRUE(DecodeCompleteFrame());
+ EXPECT_TRUE(DecodeCompleteFrame());
+}
+
+TEST_F(TestJitterBufferNack, EmptyPackets) {
+ // Make sure empty packets doesn't clog the jitter buffer.
+ EXPECT_GE(InsertFrames(kMaxNumberOfFrames, VideoFrameType::kEmptyFrame),
+ kNoError);
+ InsertFrame(VideoFrameType::kVideoFrameKey);
+ EXPECT_TRUE(DecodeCompleteFrame());
+}
+
+TEST_F(TestJitterBufferNack, NackTooOldPackets) {
+ // Insert a key frame and decode it.
+ EXPECT_GE(InsertFrame(VideoFrameType::kVideoFrameKey), kNoError);
+ EXPECT_TRUE(DecodeCompleteFrame());
+
+ // Drop one frame and insert `kNackHistoryLength` to trigger NACKing a too
+ // old packet.
+ DropFrame(1);
+ // Insert a frame which should trigger a recycle until the next key frame.
+ EXPECT_EQ(kFlushIndicator, InsertFrames(oldest_packet_to_nack_ + 1,
+ VideoFrameType::kVideoFrameDelta));
+ EXPECT_FALSE(DecodeCompleteFrame());
+
+ bool request_key_frame = false;
+ std::vector<uint16_t> nack_list =
+ jitter_buffer_->GetNackList(&request_key_frame);
+ // No key frame will be requested since the jitter buffer is empty.
+ EXPECT_FALSE(request_key_frame);
+ EXPECT_EQ(0u, nack_list.size());
+
+ EXPECT_GE(InsertFrame(VideoFrameType::kVideoFrameDelta), kNoError);
+ // Waiting for a key frame.
+ EXPECT_FALSE(DecodeCompleteFrame());
+
+ // The next complete continuous frame isn't a key frame, but we're waiting
+ // for one.
+ EXPECT_FALSE(DecodeCompleteFrame());
+ EXPECT_GE(InsertFrame(VideoFrameType::kVideoFrameKey), kNoError);
+ // Skipping ahead to the key frame.
+ EXPECT_TRUE(DecodeCompleteFrame());
+}
+
+TEST_F(TestJitterBufferNack, NackLargeJitterBuffer) {
+ // Insert a key frame and decode it.
+ EXPECT_GE(InsertFrame(VideoFrameType::kVideoFrameKey), kNoError);
+ EXPECT_TRUE(DecodeCompleteFrame());
+
+ // Insert a frame which should trigger a recycle until the next key frame.
+ EXPECT_GE(
+ InsertFrames(oldest_packet_to_nack_, VideoFrameType::kVideoFrameDelta),
+ kNoError);
+
+ bool request_key_frame = false;
+ std::vector<uint16_t> nack_list =
+ jitter_buffer_->GetNackList(&request_key_frame);
+ // Verify that the jitter buffer does not request a key frame.
+ EXPECT_FALSE(request_key_frame);
+ // Verify that no packets are NACKed.
+ EXPECT_EQ(0u, nack_list.size());
+ // Verify that we can decode the next frame.
+ EXPECT_TRUE(DecodeCompleteFrame());
+}
+
+TEST_F(TestJitterBufferNack, NackListFull) {
+ // Insert a key frame and decode it.
+ EXPECT_GE(InsertFrame(VideoFrameType::kVideoFrameKey), kNoError);
+ EXPECT_TRUE(DecodeCompleteFrame());
+
+ // Generate and drop `kNackHistoryLength` packets to fill the NACK list.
+ DropFrame(max_nack_list_size_ + 1);
+ // Insert a frame which should trigger a recycle until the next key frame.
+ EXPECT_EQ(kFlushIndicator, InsertFrame(VideoFrameType::kVideoFrameDelta));
+ EXPECT_FALSE(DecodeCompleteFrame());
+
+ bool request_key_frame = false;
+ jitter_buffer_->GetNackList(&request_key_frame);
+ // The jitter buffer is empty, so we won't request key frames until we get a
+ // packet.
+ EXPECT_FALSE(request_key_frame);
+
+ EXPECT_GE(InsertFrame(VideoFrameType::kVideoFrameDelta), kNoError);
+ // Now we have a packet in the jitter buffer, a key frame will be requested
+ // since it's not a key frame.
+ jitter_buffer_->GetNackList(&request_key_frame);
+ // The jitter buffer is empty, so we won't request key frames until we get a
+ // packet.
+ EXPECT_TRUE(request_key_frame);
+ // The next complete continuous frame isn't a key frame, but we're waiting
+ // for one.
+ EXPECT_FALSE(DecodeCompleteFrame());
+ EXPECT_GE(InsertFrame(VideoFrameType::kVideoFrameKey), kNoError);
+ // Skipping ahead to the key frame.
+ EXPECT_TRUE(DecodeCompleteFrame());
+}
+
+TEST_F(TestJitterBufferNack, NoNackListReturnedBeforeFirstDecode) {
+ DropFrame(10);
+ // Insert a frame and try to generate a NACK list. Shouldn't get one.
+ EXPECT_GE(InsertFrame(VideoFrameType::kVideoFrameDelta), kNoError);
+ bool request_key_frame = false;
+ std::vector<uint16_t> nack_list =
+ jitter_buffer_->GetNackList(&request_key_frame);
+ // No list generated, and a key frame request is signaled.
+ EXPECT_EQ(0u, nack_list.size());
+ EXPECT_TRUE(request_key_frame);
+}
+
+TEST_F(TestJitterBufferNack, NackListBuiltBeforeFirstDecode) {
+ stream_generator_->Init(0, clock_->TimeInMilliseconds());
+ InsertFrame(VideoFrameType::kVideoFrameKey);
+ stream_generator_->GenerateFrame(VideoFrameType::kVideoFrameDelta, 2, 0,
+ clock_->TimeInMilliseconds());
+ stream_generator_->NextPacket(NULL); // Drop packet.
+ EXPECT_EQ(kIncomplete, InsertPacketAndPop(0));
+ EXPECT_TRUE(DecodeCompleteFrame());
+ bool extended = false;
+ std::vector<uint16_t> nack_list = jitter_buffer_->GetNackList(&extended);
+ EXPECT_EQ(1u, nack_list.size());
+}
+
+TEST_F(TestJitterBufferNack, VerifyRetransmittedFlag) {
+ stream_generator_->Init(0, clock_->TimeInMilliseconds());
+ stream_generator_->GenerateFrame(VideoFrameType::kVideoFrameKey, 3, 0,
+ clock_->TimeInMilliseconds());
+ VCMPacket packet;
+ stream_generator_->PopPacket(&packet, 0);
+ bool retransmitted = false;
+ EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(packet, &retransmitted));
+ EXPECT_FALSE(retransmitted);
+ // Drop second packet.
+ stream_generator_->PopPacket(&packet, 1);
+ EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(packet, &retransmitted));
+ EXPECT_FALSE(retransmitted);
+ EXPECT_FALSE(DecodeCompleteFrame());
+ bool extended = false;
+ std::vector<uint16_t> nack_list = jitter_buffer_->GetNackList(&extended);
+ uint16_t seq_num;
+ EXPECT_EQ(1u, nack_list.size());
+ seq_num = nack_list[0];
+ stream_generator_->PopPacket(&packet, 0);
+ EXPECT_EQ(packet.seqNum, seq_num);
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(packet, &retransmitted));
+ EXPECT_TRUE(retransmitted);
+ EXPECT_TRUE(DecodeCompleteFrame());
+}
+
+TEST_F(TestJitterBufferNack, UseNackToRecoverFirstKeyFrame) {
+ stream_generator_->Init(0, clock_->TimeInMilliseconds());
+ stream_generator_->GenerateFrame(VideoFrameType::kVideoFrameKey, 3, 0,
+ clock_->TimeInMilliseconds());
+ EXPECT_EQ(kIncomplete, InsertPacketAndPop(0));
+ // Drop second packet.
+ EXPECT_EQ(kIncomplete, InsertPacketAndPop(1));
+ EXPECT_FALSE(DecodeCompleteFrame());
+ bool extended = false;
+ std::vector<uint16_t> nack_list = jitter_buffer_->GetNackList(&extended);
+ uint16_t seq_num;
+ ASSERT_EQ(1u, nack_list.size());
+ seq_num = nack_list[0];
+ VCMPacket packet;
+ stream_generator_->GetPacket(&packet, 0);
+ EXPECT_EQ(packet.seqNum, seq_num);
+}
+
+TEST_F(TestJitterBufferNack, UseNackToRecoverFirstKeyFrameSecondInQueue) {
+ VCMPacket packet;
+ stream_generator_->Init(0, clock_->TimeInMilliseconds());
+ // First frame is delta.
+ stream_generator_->GenerateFrame(VideoFrameType::kVideoFrameDelta, 3, 0,
+ clock_->TimeInMilliseconds());
+ EXPECT_EQ(kIncomplete, InsertPacketAndPop(0));
+ // Drop second packet in frame.
+ ASSERT_TRUE(stream_generator_->PopPacket(&packet, 0));
+ EXPECT_EQ(kIncomplete, InsertPacketAndPop(0));
+ // Second frame is key.
+ stream_generator_->GenerateFrame(VideoFrameType::kVideoFrameKey, 3, 0,
+ clock_->TimeInMilliseconds() + 10);
+ EXPECT_EQ(kIncomplete, InsertPacketAndPop(0));
+ // Drop second packet in frame.
+ EXPECT_EQ(kIncomplete, InsertPacketAndPop(1));
+ EXPECT_FALSE(DecodeCompleteFrame());
+ bool extended = false;
+ std::vector<uint16_t> nack_list = jitter_buffer_->GetNackList(&extended);
+ uint16_t seq_num;
+ ASSERT_EQ(1u, nack_list.size());
+ seq_num = nack_list[0];
+ stream_generator_->GetPacket(&packet, 0);
+ EXPECT_EQ(packet.seqNum, seq_num);
+}
+
+TEST_F(TestJitterBufferNack, NormalOperation) {
+ EXPECT_GE(InsertFrame(VideoFrameType::kVideoFrameKey), kNoError);
+ EXPECT_TRUE(DecodeCompleteFrame());
+
+ // ----------------------------------------------------------------
+ // | 1 | 2 | .. | 8 | 9 | x | 11 | 12 | .. | 19 | x | 21 | .. | 100 |
+ // ----------------------------------------------------------------
+ stream_generator_->GenerateFrame(VideoFrameType::kVideoFrameKey, 100, 0,
+ clock_->TimeInMilliseconds());
+ clock_->AdvanceTimeMilliseconds(kDefaultFramePeriodMs);
+ EXPECT_EQ(kIncomplete, InsertPacketAndPop(0));
+ // Verify that the frame is incomplete.
+ EXPECT_FALSE(DecodeCompleteFrame());
+ while (stream_generator_->PacketsRemaining() > 1) {
+ if (stream_generator_->NextSequenceNumber() % 10 != 0) {
+ EXPECT_EQ(kIncomplete, InsertPacketAndPop(0));
+ } else {
+ stream_generator_->NextPacket(NULL); // Drop packet
+ }
+ }
+ EXPECT_EQ(kIncomplete, InsertPacketAndPop(0));
+ EXPECT_EQ(0, stream_generator_->PacketsRemaining());
+ EXPECT_FALSE(DecodeCompleteFrame());
+ bool request_key_frame = false;
+
+ // Verify the NACK list.
+ std::vector<uint16_t> nack_list =
+ jitter_buffer_->GetNackList(&request_key_frame);
+ const size_t kExpectedNackSize = 9;
+ ASSERT_EQ(kExpectedNackSize, nack_list.size());
+ for (size_t i = 0; i < nack_list.size(); ++i)
+ EXPECT_EQ((1 + i) * 10, nack_list[i]);
+}
+
+TEST_F(TestJitterBufferNack, NormalOperationWrap) {
+ bool request_key_frame = false;
+ // ------- ------------------------------------------------------------
+ // | 65532 | | 65533 | 65534 | 65535 | x | 1 | .. | 9 | x | 11 |.....| 96 |
+ // ------- ------------------------------------------------------------
+ stream_generator_->Init(65532, clock_->TimeInMilliseconds());
+ InsertFrame(VideoFrameType::kVideoFrameKey);
+ EXPECT_FALSE(request_key_frame);
+ EXPECT_TRUE(DecodeCompleteFrame());
+ stream_generator_->GenerateFrame(VideoFrameType::kVideoFrameDelta, 100, 0,
+ clock_->TimeInMilliseconds());
+ EXPECT_EQ(kIncomplete, InsertPacketAndPop(0));
+ while (stream_generator_->PacketsRemaining() > 1) {
+ if (stream_generator_->NextSequenceNumber() % 10 != 0) {
+ EXPECT_EQ(kIncomplete, InsertPacketAndPop(0));
+ EXPECT_FALSE(request_key_frame);
+ } else {
+ stream_generator_->NextPacket(NULL); // Drop packet
+ }
+ }
+ EXPECT_EQ(kIncomplete, InsertPacketAndPop(0));
+ EXPECT_FALSE(request_key_frame);
+ EXPECT_EQ(0, stream_generator_->PacketsRemaining());
+ EXPECT_FALSE(DecodeCompleteFrame());
+ EXPECT_FALSE(DecodeCompleteFrame());
+ bool extended = false;
+ std::vector<uint16_t> nack_list = jitter_buffer_->GetNackList(&extended);
+ // Verify the NACK list.
+ const size_t kExpectedNackSize = 10;
+ ASSERT_EQ(kExpectedNackSize, nack_list.size());
+ for (size_t i = 0; i < nack_list.size(); ++i)
+ EXPECT_EQ(i * 10, nack_list[i]);
+}
+
+TEST_F(TestJitterBufferNack, NormalOperationWrap2) {
+ bool request_key_frame = false;
+ // -----------------------------------
+ // | 65532 | 65533 | 65534 | x | 0 | 1 |
+ // -----------------------------------
+ stream_generator_->Init(65532, clock_->TimeInMilliseconds());
+ InsertFrame(VideoFrameType::kVideoFrameKey);
+ EXPECT_FALSE(request_key_frame);
+ EXPECT_TRUE(DecodeCompleteFrame());
+ stream_generator_->GenerateFrame(VideoFrameType::kVideoFrameDelta, 1, 0,
+ clock_->TimeInMilliseconds());
+ clock_->AdvanceTimeMilliseconds(kDefaultFramePeriodMs);
+ for (int i = 0; i < 5; ++i) {
+ if (stream_generator_->NextSequenceNumber() != 65535) {
+ EXPECT_EQ(kCompleteSession, InsertPacketAndPop(0));
+ EXPECT_FALSE(request_key_frame);
+ } else {
+ stream_generator_->NextPacket(NULL); // Drop packet
+ }
+ stream_generator_->GenerateFrame(VideoFrameType::kVideoFrameDelta, 1, 0,
+ clock_->TimeInMilliseconds());
+ clock_->AdvanceTimeMilliseconds(kDefaultFramePeriodMs);
+ }
+ EXPECT_EQ(kCompleteSession, InsertPacketAndPop(0));
+ EXPECT_FALSE(request_key_frame);
+ bool extended = false;
+ std::vector<uint16_t> nack_list = jitter_buffer_->GetNackList(&extended);
+ // Verify the NACK list.
+ ASSERT_EQ(1u, nack_list.size());
+ EXPECT_EQ(65535, nack_list[0]);
+}
+
+TEST_F(TestJitterBufferNack, ResetByFutureKeyFrameDoesntError) {
+ stream_generator_->Init(0, clock_->TimeInMilliseconds());
+ InsertFrame(VideoFrameType::kVideoFrameKey);
+ EXPECT_TRUE(DecodeCompleteFrame());
+ bool extended = false;
+ std::vector<uint16_t> nack_list = jitter_buffer_->GetNackList(&extended);
+ EXPECT_EQ(0u, nack_list.size());
+
+ // Far-into-the-future video frame, could be caused by resetting the encoder
+ // or otherwise restarting. This should not fail when error when the packet is
+ // a keyframe, even if all of the nack list needs to be flushed.
+ stream_generator_->Init(10000, clock_->TimeInMilliseconds());
+ clock_->AdvanceTimeMilliseconds(kDefaultFramePeriodMs);
+ InsertFrame(VideoFrameType::kVideoFrameKey);
+ EXPECT_TRUE(DecodeCompleteFrame());
+ nack_list = jitter_buffer_->GetNackList(&extended);
+ EXPECT_EQ(0u, nack_list.size());
+
+ // Stream should be decodable from this point.
+ clock_->AdvanceTimeMilliseconds(kDefaultFramePeriodMs);
+ InsertFrame(VideoFrameType::kVideoFrameDelta);
+ EXPECT_TRUE(DecodeCompleteFrame());
+ nack_list = jitter_buffer_->GetNackList(&extended);
+ EXPECT_EQ(0u, nack_list.size());
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/loss_notification_controller.cc b/third_party/libwebrtc/modules/video_coding/loss_notification_controller.cc
new file mode 100644
index 0000000000..3377ab5a76
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/loss_notification_controller.cc
@@ -0,0 +1,173 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/loss_notification_controller.h"
+
+#include <stdint.h>
+
+#include "api/array_view.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/numerics/sequence_number_util.h"
+
+namespace webrtc {
+namespace {
+// Keep a container's size no higher than `max_allowed_size`, by paring its size
+// down to `target_size` whenever it has more than `max_allowed_size` elements.
+template <typename Container>
+void PareDown(Container* container,
+ size_t max_allowed_size,
+ size_t target_size) {
+ if (container->size() > max_allowed_size) {
+ const size_t entries_to_delete = container->size() - target_size;
+ auto erase_to = container->begin();
+ std::advance(erase_to, entries_to_delete);
+ container->erase(container->begin(), erase_to);
+ RTC_DCHECK_EQ(container->size(), target_size);
+ }
+}
+} // namespace
+
+LossNotificationController::LossNotificationController(
+ KeyFrameRequestSender* key_frame_request_sender,
+ LossNotificationSender* loss_notification_sender)
+ : key_frame_request_sender_(key_frame_request_sender),
+ loss_notification_sender_(loss_notification_sender),
+ current_frame_potentially_decodable_(true) {
+ RTC_DCHECK(key_frame_request_sender_);
+ RTC_DCHECK(loss_notification_sender_);
+}
+
+LossNotificationController::~LossNotificationController() = default;
+
+void LossNotificationController::OnReceivedPacket(
+ uint16_t rtp_seq_num,
+ const LossNotificationController::FrameDetails* frame) {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+
+ // Ignore repeated or reordered packets.
+ // TODO(bugs.webrtc.org/10336): Handle packet reordering.
+ if (last_received_seq_num_ &&
+ !AheadOf(rtp_seq_num, *last_received_seq_num_)) {
+ return;
+ }
+
+ DiscardOldInformation(); // Prevent memory overconsumption.
+
+ const bool seq_num_gap =
+ last_received_seq_num_ &&
+ rtp_seq_num != static_cast<uint16_t>(*last_received_seq_num_ + 1u);
+
+ last_received_seq_num_ = rtp_seq_num;
+
+ // `frame` is not nullptr iff the packet is the first packet in the frame.
+ if (frame != nullptr) {
+ // Ignore repeated or reordered frames.
+ // TODO(bugs.webrtc.org/10336): Handle frame reordering.
+ if (last_received_frame_id_.has_value() &&
+ frame->frame_id <= last_received_frame_id_.value()) {
+ RTC_LOG(LS_WARNING) << "Repeated or reordered frame ID ("
+ << frame->frame_id << ").";
+ return;
+ }
+
+ last_received_frame_id_ = frame->frame_id;
+
+ if (frame->is_keyframe) {
+ // Subsequent frames may not rely on frames before the key frame.
+ // Note that upon receiving a key frame, we do not issue a loss
+ // notification on RTP sequence number gap, unless that gap spanned
+ // the key frame itself. This is because any loss which occurred before
+ // the key frame is no longer relevant.
+ decodable_frame_ids_.clear();
+ current_frame_potentially_decodable_ = true;
+ } else {
+ const bool all_dependencies_decodable =
+ AllDependenciesDecodable(frame->frame_dependencies);
+ current_frame_potentially_decodable_ = all_dependencies_decodable;
+ if (seq_num_gap || !current_frame_potentially_decodable_) {
+ HandleLoss(rtp_seq_num, current_frame_potentially_decodable_);
+ }
+ }
+ } else if (seq_num_gap || !current_frame_potentially_decodable_) {
+ current_frame_potentially_decodable_ = false;
+ // We allow sending multiple loss notifications for a single frame
+ // even if only one of its packets is lost. We do this because the bigger
+ // the frame, the more likely it is to be non-discardable, and therefore
+ // the more robust we wish to be to loss of the feedback messages.
+ HandleLoss(rtp_seq_num, false);
+ }
+}
+
+void LossNotificationController::OnAssembledFrame(
+ uint16_t first_seq_num,
+ int64_t frame_id,
+ bool discardable,
+ rtc::ArrayView<const int64_t> frame_dependencies) {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+
+ DiscardOldInformation(); // Prevent memory overconsumption.
+
+ if (discardable) {
+ return;
+ }
+
+ if (!AllDependenciesDecodable(frame_dependencies)) {
+ return;
+ }
+
+ last_decodable_non_discardable_.emplace(first_seq_num);
+ const auto it = decodable_frame_ids_.insert(frame_id);
+ RTC_DCHECK(it.second);
+}
+
+void LossNotificationController::DiscardOldInformation() {
+ constexpr size_t kExpectedKeyFrameIntervalFrames = 3000;
+ constexpr size_t kMaxSize = 2 * kExpectedKeyFrameIntervalFrames;
+ constexpr size_t kTargetSize = kExpectedKeyFrameIntervalFrames;
+ PareDown(&decodable_frame_ids_, kMaxSize, kTargetSize);
+}
+
+bool LossNotificationController::AllDependenciesDecodable(
+ rtc::ArrayView<const int64_t> frame_dependencies) const {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+
+ // Due to packet reordering, frame buffering and asynchronous decoders, it is
+ // infeasible to make reliable conclusions on the decodability of a frame
+ // immediately when it arrives. We use the following assumptions:
+ // * Intra frames are decodable.
+ // * Inter frames are decodable if all of their references were decodable.
+ // One possibility that is ignored, is that the packet may be corrupt.
+ for (int64_t ref_frame_id : frame_dependencies) {
+ const auto ref_frame_it = decodable_frame_ids_.find(ref_frame_id);
+ if (ref_frame_it == decodable_frame_ids_.end()) {
+ // Reference frame not decodable.
+ return false;
+ }
+ }
+
+ return true;
+}
+
+void LossNotificationController::HandleLoss(uint16_t last_received_seq_num,
+ bool decodability_flag) {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+
+ if (last_decodable_non_discardable_) {
+ RTC_DCHECK(AheadOf(last_received_seq_num,
+ last_decodable_non_discardable_->first_seq_num));
+ loss_notification_sender_->SendLossNotification(
+ last_decodable_non_discardable_->first_seq_num, last_received_seq_num,
+ decodability_flag, /*buffering_allowed=*/true);
+ } else {
+ key_frame_request_sender_->RequestKeyFrame();
+ }
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/loss_notification_controller.h b/third_party/libwebrtc/modules/video_coding/loss_notification_controller.h
new file mode 100644
index 0000000000..ecba41267b
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/loss_notification_controller.h
@@ -0,0 +1,111 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_LOSS_NOTIFICATION_CONTROLLER_H_
+#define MODULES_VIDEO_CODING_LOSS_NOTIFICATION_CONTROLLER_H_
+
+#include <stdint.h>
+
+#include <set>
+
+#include "absl/types/optional.h"
+#include "api/array_view.h"
+#include "api/sequence_checker.h"
+#include "modules/include/module_common_types.h"
+#include "rtc_base/system/no_unique_address.h"
+
+namespace webrtc {
+
+class LossNotificationController {
+ public:
+ struct FrameDetails {
+ bool is_keyframe;
+ int64_t frame_id;
+ rtc::ArrayView<const int64_t> frame_dependencies;
+ };
+
+ LossNotificationController(KeyFrameRequestSender* key_frame_request_sender,
+ LossNotificationSender* loss_notification_sender);
+ ~LossNotificationController();
+
+ // An RTP packet was received from the network.
+ // `frame` is non-null iff the packet is the first packet in the frame.
+ void OnReceivedPacket(uint16_t rtp_seq_num, const FrameDetails* frame);
+
+ // A frame was assembled from packets previously received.
+ // (Should be called even if the frame was composed of a single packet.)
+ void OnAssembledFrame(uint16_t first_seq_num,
+ int64_t frame_id,
+ bool discardable,
+ rtc::ArrayView<const int64_t> frame_dependencies);
+
+ private:
+ void DiscardOldInformation();
+
+ bool AllDependenciesDecodable(
+ rtc::ArrayView<const int64_t> frame_dependencies) const;
+
+ // When the loss of a packet or the non-decodability of a frame is detected,
+ // produces a key frame request or a loss notification.
+ // 1. `last_received_seq_num` is the last received sequence number.
+ // 2. `decodability_flag` refers to the frame associated with the last packet.
+ // It is set to `true` if and only if all of that frame's dependencies are
+ // known to be decodable, and the frame itself is not yet known to be
+ // unassemblable (i.e. no earlier parts of it were lost).
+ // Clarifications:
+ // a. In a multi-packet frame, the first packet reveals the frame's
+ // dependencies, but it is not yet known whether all parts of the
+ // current frame will be received.
+ // b. In a multi-packet frame, if the first packet is missed, the
+ // dependencies are unknown, but it is known that the frame itself
+ // is unassemblable.
+ void HandleLoss(uint16_t last_received_seq_num, bool decodability_flag);
+
+ KeyFrameRequestSender* const key_frame_request_sender_
+ RTC_GUARDED_BY(sequence_checker_);
+
+ LossNotificationSender* const loss_notification_sender_
+ RTC_GUARDED_BY(sequence_checker_);
+
+ // Tracked to avoid processing repeated frames (buggy/malicious remote).
+ absl::optional<int64_t> last_received_frame_id_
+ RTC_GUARDED_BY(sequence_checker_);
+
+ // Tracked to avoid processing repeated packets.
+ absl::optional<uint16_t> last_received_seq_num_
+ RTC_GUARDED_BY(sequence_checker_);
+
+ // Tracked in order to correctly report the potential-decodability of
+ // multi-packet frames.
+ bool current_frame_potentially_decodable_ RTC_GUARDED_BY(sequence_checker_);
+
+ // Loss notifications contain the sequence number of the first packet of
+ // the last decodable-and-non-discardable frame. Since this is a bit of
+ // a mouthful, last_decodable_non_discardable_.first_seq_num is used,
+ // which hopefully is a bit easier for human beings to parse
+ // than `first_seq_num_of_last_decodable_non_discardable_`.
+ struct FrameInfo {
+ explicit FrameInfo(uint16_t first_seq_num) : first_seq_num(first_seq_num) {}
+ uint16_t first_seq_num;
+ };
+ absl::optional<FrameInfo> last_decodable_non_discardable_
+ RTC_GUARDED_BY(sequence_checker_);
+
+ // Track which frames are decodable. Later frames are also decodable if
+ // all of their dependencies can be found in this container.
+ // (Naturally, later frames must also be assemblable to be decodable.)
+ std::set<int64_t> decodable_frame_ids_ RTC_GUARDED_BY(sequence_checker_);
+
+ RTC_NO_UNIQUE_ADDRESS SequenceChecker sequence_checker_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_LOSS_NOTIFICATION_CONTROLLER_H_
diff --git a/third_party/libwebrtc/modules/video_coding/loss_notification_controller_unittest.cc b/third_party/libwebrtc/modules/video_coding/loss_notification_controller_unittest.cc
new file mode 100644
index 0000000000..9c4e715b4f
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/loss_notification_controller_unittest.cc
@@ -0,0 +1,607 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/loss_notification_controller.h"
+
+#include <stdint.h>
+
+#include <limits>
+#include <string>
+#include <tuple>
+#include <utility>
+#include <vector>
+
+#include "absl/types/optional.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+
+// The information about an RTP packet that is relevant in these tests.
+struct Packet {
+ uint16_t seq_num;
+ bool first_in_frame;
+ bool is_keyframe;
+ int64_t frame_id;
+ std::vector<int64_t> frame_dependencies;
+};
+
+Packet CreatePacket(
+ bool first_in_frame,
+ bool last_in_frame,
+ uint16_t seq_num,
+ uint16_t frame_id,
+ bool is_key_frame,
+ std::vector<int64_t> ref_frame_ids = std::vector<int64_t>()) {
+ Packet packet;
+ packet.seq_num = seq_num;
+ packet.first_in_frame = first_in_frame;
+ if (first_in_frame) {
+ packet.is_keyframe = is_key_frame;
+ packet.frame_id = frame_id;
+ RTC_DCHECK(!is_key_frame || ref_frame_ids.empty());
+ packet.frame_dependencies = std::move(ref_frame_ids);
+ }
+ return packet;
+}
+
+class PacketStreamCreator final {
+ public:
+ PacketStreamCreator() : seq_num_(0), frame_id_(0), next_is_key_frame_(true) {}
+
+ Packet NextPacket() {
+ std::vector<int64_t> ref_frame_ids;
+ if (!next_is_key_frame_) {
+ ref_frame_ids.push_back(frame_id_ - 1);
+ }
+
+ Packet packet = CreatePacket(true, true, seq_num_++, frame_id_++,
+ next_is_key_frame_, ref_frame_ids);
+
+ next_is_key_frame_ = false;
+
+ return packet;
+ }
+
+ private:
+ uint16_t seq_num_;
+ int64_t frame_id_;
+ bool next_is_key_frame_;
+};
+} // namespace
+
+// Most of the logic for the tests is here. Subclasses allow parameterizing
+// the test, or adding some more specific logic.
+class LossNotificationControllerBaseTest : public ::testing::Test,
+ public KeyFrameRequestSender,
+ public LossNotificationSender {
+ protected:
+ LossNotificationControllerBaseTest()
+ : uut_(this, this), key_frame_requested_(false) {}
+
+ ~LossNotificationControllerBaseTest() override {
+ EXPECT_FALSE(LastKeyFrameRequest());
+ EXPECT_FALSE(LastLossNotification());
+ }
+
+ // KeyFrameRequestSender implementation.
+ void RequestKeyFrame() override {
+ EXPECT_FALSE(LastKeyFrameRequest());
+ EXPECT_FALSE(LastLossNotification());
+ key_frame_requested_ = true;
+ }
+
+ // LossNotificationSender implementation.
+ void SendLossNotification(uint16_t last_decoded_seq_num,
+ uint16_t last_received_seq_num,
+ bool decodability_flag,
+ bool buffering_allowed) override {
+ EXPECT_TRUE(buffering_allowed); // (Flag useful elsewhere.)
+ EXPECT_FALSE(LastKeyFrameRequest());
+ EXPECT_FALSE(LastLossNotification());
+ last_loss_notification_.emplace(last_decoded_seq_num, last_received_seq_num,
+ decodability_flag);
+ }
+
+ void OnReceivedPacket(const Packet& packet) {
+ EXPECT_FALSE(LastKeyFrameRequest());
+ EXPECT_FALSE(LastLossNotification());
+
+ if (packet.first_in_frame) {
+ previous_first_packet_in_frame_ = packet;
+ LossNotificationController::FrameDetails frame;
+ frame.is_keyframe = packet.is_keyframe;
+ frame.frame_id = packet.frame_id;
+ frame.frame_dependencies = packet.frame_dependencies;
+ uut_.OnReceivedPacket(packet.seq_num, &frame);
+ } else {
+ uut_.OnReceivedPacket(packet.seq_num, nullptr);
+ }
+ }
+
+ void OnAssembledFrame(uint16_t first_seq_num,
+ int64_t frame_id,
+ bool discardable) {
+ EXPECT_FALSE(LastKeyFrameRequest());
+ EXPECT_FALSE(LastLossNotification());
+
+ ASSERT_TRUE(previous_first_packet_in_frame_);
+ uut_.OnAssembledFrame(first_seq_num, frame_id, discardable,
+ previous_first_packet_in_frame_->frame_dependencies);
+ }
+
+ void ExpectKeyFrameRequest() {
+ EXPECT_EQ(LastLossNotification(), absl::nullopt);
+ EXPECT_TRUE(LastKeyFrameRequest());
+ }
+
+ void ExpectLossNotification(uint16_t last_decoded_seq_num,
+ uint16_t last_received_seq_num,
+ bool decodability_flag) {
+ EXPECT_FALSE(LastKeyFrameRequest());
+ const auto last_ln = LastLossNotification();
+ ASSERT_TRUE(last_ln);
+ const LossNotification expected_ln(
+ last_decoded_seq_num, last_received_seq_num, decodability_flag);
+ EXPECT_EQ(expected_ln, *last_ln)
+ << "Expected loss notification (" << expected_ln.ToString()
+ << ") != received loss notification (" << last_ln->ToString() + ")";
+ }
+
+ struct LossNotification {
+ LossNotification(uint16_t last_decoded_seq_num,
+ uint16_t last_received_seq_num,
+ bool decodability_flag)
+ : last_decoded_seq_num(last_decoded_seq_num),
+ last_received_seq_num(last_received_seq_num),
+ decodability_flag(decodability_flag) {}
+
+ LossNotification& operator=(const LossNotification& other) = default;
+
+ bool operator==(const LossNotification& other) const {
+ return last_decoded_seq_num == other.last_decoded_seq_num &&
+ last_received_seq_num == other.last_received_seq_num &&
+ decodability_flag == other.decodability_flag;
+ }
+
+ std::string ToString() const {
+ return std::to_string(last_decoded_seq_num) + ", " +
+ std::to_string(last_received_seq_num) + ", " +
+ std::to_string(decodability_flag);
+ }
+
+ uint16_t last_decoded_seq_num;
+ uint16_t last_received_seq_num;
+ bool decodability_flag;
+ };
+
+ bool LastKeyFrameRequest() {
+ const bool result = key_frame_requested_;
+ key_frame_requested_ = false;
+ return result;
+ }
+
+ absl::optional<LossNotification> LastLossNotification() {
+ const absl::optional<LossNotification> result = last_loss_notification_;
+ last_loss_notification_ = absl::nullopt;
+ return result;
+ }
+
+ LossNotificationController uut_; // Unit under test.
+
+ bool key_frame_requested_;
+
+ absl::optional<LossNotification> last_loss_notification_;
+
+ // First packet of last frame. (Note that if a test skips the first packet
+ // of a subsequent frame, OnAssembledFrame is not called, and so this is
+ // note read. Therefore, it's not a problem if it is not cleared when
+ // the frame changes.)
+ absl::optional<Packet> previous_first_packet_in_frame_;
+};
+
+class LossNotificationControllerTest
+ : public LossNotificationControllerBaseTest,
+ public ::testing::WithParamInterface<std::tuple<bool, bool, bool>> {
+ protected:
+ // Arbitrary parameterized values, to be used by the tests whenever they
+ // wish to either check some combinations, or wish to demonstrate that
+ // a particular arbitrary value is unimportant.
+ template <size_t N>
+ bool Bool() const {
+ return std::get<N>(GetParam());
+ }
+};
+
+INSTANTIATE_TEST_SUITE_P(_,
+ LossNotificationControllerTest,
+ ::testing::Combine(::testing::Bool(),
+ ::testing::Bool(),
+ ::testing::Bool()));
+
+// If the first frame, which is a key frame, is lost, then a new key frame
+// is requested.
+TEST_P(LossNotificationControllerTest,
+ PacketLossBeforeFirstFrameAssembledTriggersKeyFrameRequest) {
+ OnReceivedPacket(CreatePacket(true, false, 100, 0, true));
+ OnReceivedPacket(CreatePacket(Bool<0>(), Bool<1>(), 103, 1, false, {0}));
+ ExpectKeyFrameRequest();
+}
+
+// If packet loss occurs (but not of the first packet), then a loss notification
+// is issued.
+TEST_P(LossNotificationControllerTest,
+ PacketLossAfterFirstFrameAssembledTriggersLossNotification) {
+ OnReceivedPacket(CreatePacket(true, true, 100, 0, true));
+ OnAssembledFrame(100, 0, false);
+ const bool first = Bool<0>();
+ const bool last = Bool<1>();
+ OnReceivedPacket(CreatePacket(first, last, 103, 1, false, {0}));
+ const bool expected_decodability_flag = first;
+ ExpectLossNotification(100, 103, expected_decodability_flag);
+}
+
+// No key frame or loss notifications issued due to an innocuous wrap-around
+// of the sequence number.
+TEST_P(LossNotificationControllerTest, SeqNumWrapAround) {
+ uint16_t seq_num = std::numeric_limits<uint16_t>::max();
+ OnReceivedPacket(CreatePacket(true, true, seq_num, 0, true));
+ OnAssembledFrame(seq_num, 0, false);
+ const bool first = Bool<0>();
+ const bool last = Bool<1>();
+ OnReceivedPacket(CreatePacket(first, last, ++seq_num, 1, false, {0}));
+}
+
+TEST_F(LossNotificationControllerTest,
+ KeyFrameAfterPacketLossProducesNoLossNotifications) {
+ OnReceivedPacket(CreatePacket(true, true, 100, 1, true));
+ OnAssembledFrame(100, 1, false);
+ OnReceivedPacket(CreatePacket(true, true, 108, 8, true));
+}
+
+TEST_P(LossNotificationControllerTest, LostReferenceProducesLossNotification) {
+ OnReceivedPacket(CreatePacket(true, true, 100, 0, true));
+ OnAssembledFrame(100, 0, false);
+ uint16_t last_decodable_non_discardable_seq_num = 100;
+
+ // RTP gap produces loss notification - not the focus of this test.
+ const bool first = Bool<0>();
+ const bool last = Bool<1>();
+ const bool discardable = Bool<2>();
+ const bool decodable = first; // Depends on assemblability.
+ OnReceivedPacket(CreatePacket(first, last, 107, 3, false, {0}));
+ ExpectLossNotification(100, 107, decodable);
+ OnAssembledFrame(107, 3, discardable);
+ if (!discardable) {
+ last_decodable_non_discardable_seq_num = 107;
+ }
+
+ // Test focus - a loss notification is produced because of the missing
+ // dependency (frame ID 2), despite the RTP sequence number being the
+ // next expected one.
+ OnReceivedPacket(CreatePacket(true, true, 108, 4, false, {2, 0}));
+ ExpectLossNotification(last_decodable_non_discardable_seq_num, 108, false);
+}
+
+// The difference between this test and the previous one, is that in this test,
+// although the reference frame was received, it was not decodable.
+TEST_P(LossNotificationControllerTest,
+ UndecodableReferenceProducesLossNotification) {
+ OnReceivedPacket(CreatePacket(true, true, 100, 0, true));
+ OnAssembledFrame(100, 0, false);
+ uint16_t last_decodable_non_discardable_seq_num = 100;
+
+ // RTP gap produces loss notification - not the focus of this test.
+ // Also, not decodable; this is important for later in the test.
+ OnReceivedPacket(CreatePacket(true, true, 107, 3, false, {2}));
+ ExpectLossNotification(100, 107, false);
+ const bool discardable = Bool<0>();
+ OnAssembledFrame(107, 3, discardable);
+
+ // Test focus - a loss notification is produced because of the undecodable
+ // dependency (frame ID 3, which depended on the missing frame ID 2).
+ OnReceivedPacket(CreatePacket(true, true, 108, 4, false, {3, 0}));
+ ExpectLossNotification(last_decodable_non_discardable_seq_num, 108, false);
+}
+
+TEST_P(LossNotificationControllerTest, RobustnessAgainstHighInitialRefFrameId) {
+ constexpr uint16_t max_uint16_t = std::numeric_limits<uint16_t>::max();
+ OnReceivedPacket(CreatePacket(true, true, 100, 0, true));
+ OnAssembledFrame(100, 0, false);
+ OnReceivedPacket(CreatePacket(true, true, 101, 1, false, {max_uint16_t}));
+ ExpectLossNotification(100, 101, false);
+ OnAssembledFrame(101, max_uint16_t, Bool<0>());
+}
+
+TEST_P(LossNotificationControllerTest, RepeatedPacketsAreIgnored) {
+ PacketStreamCreator packet_stream;
+
+ const auto key_frame_packet = packet_stream.NextPacket();
+ OnReceivedPacket(key_frame_packet);
+ OnAssembledFrame(key_frame_packet.seq_num, key_frame_packet.frame_id, false);
+
+ const bool gap = Bool<0>();
+
+ if (gap) {
+ // Lose one packet.
+ packet_stream.NextPacket();
+ }
+
+ auto repeated_packet = packet_stream.NextPacket();
+ OnReceivedPacket(repeated_packet);
+ if (gap) {
+ // Loss notification issued because of the gap. This is not the focus of
+ // the test.
+ ExpectLossNotification(key_frame_packet.seq_num, repeated_packet.seq_num,
+ false);
+ }
+ OnReceivedPacket(repeated_packet);
+}
+
+TEST_F(LossNotificationControllerTest,
+ RecognizesDependencyAcrossIntraFrameThatIsNotAKeyframe) {
+ int last_seq_num = 1;
+ auto receive = [&](bool is_key_frame, int64_t frame_id,
+ std::vector<int64_t> ref_frame_ids) {
+ ++last_seq_num;
+ OnReceivedPacket(CreatePacket(
+ /*first_in_frame=*/true, /*last_in_frame=*/true, last_seq_num, frame_id,
+ is_key_frame, std::move(ref_frame_ids)));
+ OnAssembledFrame(last_seq_num, frame_id, /*discardable=*/false);
+ };
+ // 11 -- 13
+ // | |
+ // 10 12
+ receive(/*is_key_frame=*/true, /*frame_id=*/10, /*ref_frame_ids=*/{});
+ receive(/*is_key_frame=*/false, /*frame_id=*/11, /*ref_frame_ids=*/{10});
+ receive(/*is_key_frame=*/false, /*frame_id=*/12, /*ref_frame_ids=*/{});
+ receive(/*is_key_frame=*/false, /*frame_id=*/13, /*ref_frame_ids=*/{11, 12});
+ EXPECT_FALSE(LastLossNotification());
+}
+
+class LossNotificationControllerTestDecodabilityFlag
+ : public LossNotificationControllerBaseTest {
+ protected:
+ LossNotificationControllerTestDecodabilityFlag()
+ : key_frame_seq_num_(100),
+ key_frame_frame_id_(0),
+ never_received_frame_id_(key_frame_frame_id_ + 1),
+ seq_num_(0),
+ frame_id_(0) {}
+
+ void ReceiveKeyFrame() {
+ RTC_DCHECK_NE(key_frame_frame_id_, never_received_frame_id_);
+ OnReceivedPacket(CreatePacket(true, true, key_frame_seq_num_,
+ key_frame_frame_id_, true));
+ OnAssembledFrame(key_frame_seq_num_, key_frame_frame_id_, false);
+ seq_num_ = key_frame_seq_num_;
+ frame_id_ = key_frame_frame_id_;
+ }
+
+ void ReceivePacket(bool first_packet_in_frame,
+ bool last_packet_in_frame,
+ const std::vector<int64_t>& ref_frame_ids) {
+ if (first_packet_in_frame) {
+ frame_id_ += 1;
+ }
+ RTC_DCHECK_NE(frame_id_, never_received_frame_id_);
+ constexpr bool is_key_frame = false;
+ OnReceivedPacket(CreatePacket(first_packet_in_frame, last_packet_in_frame,
+ ++seq_num_, frame_id_, is_key_frame,
+ ref_frame_ids));
+ }
+
+ void CreateGap() {
+ seq_num_ += 50;
+ frame_id_ += 10;
+ }
+
+ const uint16_t key_frame_seq_num_;
+ const uint16_t key_frame_frame_id_;
+
+ // The tests intentionally never receive this, and can therefore always
+ // use this as an unsatisfied dependency.
+ const int64_t never_received_frame_id_ = 123;
+
+ uint16_t seq_num_;
+ int64_t frame_id_;
+};
+
+TEST_F(LossNotificationControllerTestDecodabilityFlag,
+ SinglePacketFrameWithDecodableDependencies) {
+ ReceiveKeyFrame();
+ CreateGap();
+
+ const std::vector<int64_t> ref_frame_ids = {key_frame_frame_id_};
+ ReceivePacket(true, true, ref_frame_ids);
+
+ const bool expected_decodability_flag = true;
+ ExpectLossNotification(key_frame_seq_num_, seq_num_,
+ expected_decodability_flag);
+}
+
+TEST_F(LossNotificationControllerTestDecodabilityFlag,
+ SinglePacketFrameWithUndecodableDependencies) {
+ ReceiveKeyFrame();
+ CreateGap();
+
+ const std::vector<int64_t> ref_frame_ids = {never_received_frame_id_};
+ ReceivePacket(true, true, ref_frame_ids);
+
+ const bool expected_decodability_flag = false;
+ ExpectLossNotification(key_frame_seq_num_, seq_num_,
+ expected_decodability_flag);
+}
+
+TEST_F(LossNotificationControllerTestDecodabilityFlag,
+ FirstPacketOfMultiPacketFrameWithDecodableDependencies) {
+ ReceiveKeyFrame();
+ CreateGap();
+
+ const std::vector<int64_t> ref_frame_ids = {key_frame_frame_id_};
+ ReceivePacket(true, false, ref_frame_ids);
+
+ const bool expected_decodability_flag = true;
+ ExpectLossNotification(key_frame_seq_num_, seq_num_,
+ expected_decodability_flag);
+}
+
+TEST_F(LossNotificationControllerTestDecodabilityFlag,
+ FirstPacketOfMultiPacketFrameWithUndecodableDependencies) {
+ ReceiveKeyFrame();
+ CreateGap();
+
+ const std::vector<int64_t> ref_frame_ids = {never_received_frame_id_};
+ ReceivePacket(true, false, ref_frame_ids);
+
+ const bool expected_decodability_flag = false;
+ ExpectLossNotification(key_frame_seq_num_, seq_num_,
+ expected_decodability_flag);
+}
+
+TEST_F(LossNotificationControllerTestDecodabilityFlag,
+ MiddlePacketOfMultiPacketFrameWithDecodableDependenciesIfFirstMissed) {
+ ReceiveKeyFrame();
+ CreateGap();
+
+ const std::vector<int64_t> ref_frame_ids = {key_frame_frame_id_};
+ ReceivePacket(false, false, ref_frame_ids);
+
+ const bool expected_decodability_flag = false;
+ ExpectLossNotification(key_frame_seq_num_, seq_num_,
+ expected_decodability_flag);
+}
+
+TEST_F(LossNotificationControllerTestDecodabilityFlag,
+ MiddlePacketOfMultiPacketFrameWithUndecodableDependenciesIfFirstMissed) {
+ ReceiveKeyFrame();
+ CreateGap();
+
+ const std::vector<int64_t> ref_frame_ids = {never_received_frame_id_};
+ ReceivePacket(false, false, ref_frame_ids);
+
+ const bool expected_decodability_flag = false;
+ ExpectLossNotification(key_frame_seq_num_, seq_num_,
+ expected_decodability_flag);
+}
+
+TEST_F(LossNotificationControllerTestDecodabilityFlag,
+ MiddlePacketOfMultiPacketFrameWithDecodableDependenciesIfFirstReceived) {
+ ReceiveKeyFrame();
+ CreateGap();
+
+ // First packet in multi-packet frame. A loss notification is produced
+ // because of the gap in RTP sequence numbers.
+ const std::vector<int64_t> ref_frame_ids = {key_frame_frame_id_};
+ ReceivePacket(true, false, ref_frame_ids);
+ const bool expected_decodability_flag_first = true;
+ ExpectLossNotification(key_frame_seq_num_, seq_num_,
+ expected_decodability_flag_first);
+
+ // Middle packet in multi-packet frame. No additional gap and the frame is
+ // still potentially decodable, so no additional loss indication.
+ ReceivePacket(false, false, ref_frame_ids);
+ EXPECT_FALSE(LastKeyFrameRequest());
+ EXPECT_FALSE(LastLossNotification());
+}
+
+TEST_F(
+ LossNotificationControllerTestDecodabilityFlag,
+ MiddlePacketOfMultiPacketFrameWithUndecodableDependenciesIfFirstReceived) {
+ ReceiveKeyFrame();
+ CreateGap();
+
+ // First packet in multi-packet frame. A loss notification is produced
+ // because of the gap in RTP sequence numbers. The frame is also recognized
+ // as having non-decodable dependencies.
+ const std::vector<int64_t> ref_frame_ids = {never_received_frame_id_};
+ ReceivePacket(true, false, ref_frame_ids);
+ const bool expected_decodability_flag_first = false;
+ ExpectLossNotification(key_frame_seq_num_, seq_num_,
+ expected_decodability_flag_first);
+
+ // Middle packet in multi-packet frame. No additional gap, but the frame is
+ // known to be non-decodable, so we keep issuing loss indications.
+ ReceivePacket(false, false, ref_frame_ids);
+ const bool expected_decodability_flag_middle = false;
+ ExpectLossNotification(key_frame_seq_num_, seq_num_,
+ expected_decodability_flag_middle);
+}
+
+TEST_F(LossNotificationControllerTestDecodabilityFlag,
+ LastPacketOfMultiPacketFrameWithDecodableDependenciesIfAllPrevMissed) {
+ ReceiveKeyFrame();
+ CreateGap();
+
+ const std::vector<int64_t> ref_frame_ids = {key_frame_frame_id_};
+ ReceivePacket(false, true, ref_frame_ids);
+
+ const bool expected_decodability_flag = false;
+ ExpectLossNotification(key_frame_seq_num_, seq_num_,
+ expected_decodability_flag);
+}
+
+TEST_F(LossNotificationControllerTestDecodabilityFlag,
+ LastPacketOfMultiPacketFrameWithUndecodableDependenciesIfAllPrevMissed) {
+ ReceiveKeyFrame();
+ CreateGap();
+
+ const std::vector<int64_t> ref_frame_ids = {never_received_frame_id_};
+ ReceivePacket(false, true, ref_frame_ids);
+
+ const bool expected_decodability_flag = false;
+ ExpectLossNotification(key_frame_seq_num_, seq_num_,
+ expected_decodability_flag);
+}
+
+TEST_F(LossNotificationControllerTestDecodabilityFlag,
+ LastPacketOfMultiPacketFrameWithDecodableDependenciesIfAllPrevReceived) {
+ ReceiveKeyFrame();
+ CreateGap();
+
+ // First packet in multi-packet frame. A loss notification is produced
+ // because of the gap in RTP sequence numbers.
+ const std::vector<int64_t> ref_frame_ids = {key_frame_frame_id_};
+ ReceivePacket(true, false, ref_frame_ids);
+ const bool expected_decodability_flag_first = true;
+ ExpectLossNotification(key_frame_seq_num_, seq_num_,
+ expected_decodability_flag_first);
+
+ // Last packet in multi-packet frame. No additional gap and the frame is
+ // still potentially decodable, so no additional loss indication.
+ ReceivePacket(false, true, ref_frame_ids);
+ EXPECT_FALSE(LastKeyFrameRequest());
+ EXPECT_FALSE(LastLossNotification());
+}
+
+TEST_F(
+ LossNotificationControllerTestDecodabilityFlag,
+ LastPacketOfMultiPacketFrameWithUndecodableDependenciesIfAllPrevReceived) {
+ ReceiveKeyFrame();
+ CreateGap();
+
+ // First packet in multi-packet frame. A loss notification is produced
+ // because of the gap in RTP sequence numbers. The frame is also recognized
+ // as having non-decodable dependencies.
+ const std::vector<int64_t> ref_frame_ids = {never_received_frame_id_};
+ ReceivePacket(true, false, ref_frame_ids);
+ const bool expected_decodability_flag_first = false;
+ ExpectLossNotification(key_frame_seq_num_, seq_num_,
+ expected_decodability_flag_first);
+
+ // Last packet in multi-packet frame. No additional gap, but the frame is
+ // known to be non-decodable, so we keep issuing loss indications.
+ ReceivePacket(false, true, ref_frame_ids);
+ const bool expected_decodability_flag_last = false;
+ ExpectLossNotification(key_frame_seq_num_, seq_num_,
+ expected_decodability_flag_last);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/media_opt_util.cc b/third_party/libwebrtc/modules/video_coding/media_opt_util.cc
new file mode 100644
index 0000000000..7580c95fc7
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/media_opt_util.cc
@@ -0,0 +1,704 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/media_opt_util.h"
+
+#include <math.h>
+
+#include <algorithm>
+
+#include "modules/video_coding/fec_rate_table.h"
+#include "modules/video_coding/internal_defines.h"
+#include "modules/video_coding/utility/simulcast_rate_allocator.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/experiments/rate_control_settings.h"
+#include "rtc_base/numerics/safe_conversions.h"
+
+namespace webrtc {
+// Max value of loss rates in off-line model
+static const int kPacketLossMax = 129;
+
+namespace media_optimization {
+
+VCMProtectionParameters::VCMProtectionParameters()
+ : rtt(0),
+ lossPr(0.0f),
+ bitRate(0.0f),
+ packetsPerFrame(0.0f),
+ packetsPerFrameKey(0.0f),
+ frameRate(0.0f),
+ keyFrameSize(0.0f),
+ fecRateDelta(0),
+ fecRateKey(0),
+ codecWidth(0),
+ codecHeight(0),
+ numLayers(1) {}
+
+VCMProtectionMethod::VCMProtectionMethod()
+ : _effectivePacketLoss(0),
+ _protectionFactorK(0),
+ _protectionFactorD(0),
+ _scaleProtKey(2.0f),
+ _maxPayloadSize(1460),
+ _corrFecCost(1.0),
+ _type(kNone) {}
+
+VCMProtectionMethod::~VCMProtectionMethod() {}
+
+enum VCMProtectionMethodEnum VCMProtectionMethod::Type() const {
+ return _type;
+}
+
+uint8_t VCMProtectionMethod::RequiredPacketLossER() {
+ return _effectivePacketLoss;
+}
+
+uint8_t VCMProtectionMethod::RequiredProtectionFactorK() {
+ return _protectionFactorK;
+}
+
+uint8_t VCMProtectionMethod::RequiredProtectionFactorD() {
+ return _protectionFactorD;
+}
+
+bool VCMProtectionMethod::RequiredUepProtectionK() {
+ return _useUepProtectionK;
+}
+
+bool VCMProtectionMethod::RequiredUepProtectionD() {
+ return _useUepProtectionD;
+}
+
+int VCMProtectionMethod::MaxFramesFec() const {
+ return 1;
+}
+
+VCMNackFecMethod::VCMNackFecMethod(int64_t lowRttNackThresholdMs,
+ int64_t highRttNackThresholdMs)
+ : VCMFecMethod(),
+ _lowRttNackMs(lowRttNackThresholdMs),
+ _highRttNackMs(highRttNackThresholdMs),
+ _maxFramesFec(1) {
+ RTC_DCHECK(lowRttNackThresholdMs >= -1 && highRttNackThresholdMs >= -1);
+ RTC_DCHECK(highRttNackThresholdMs == -1 ||
+ lowRttNackThresholdMs <= highRttNackThresholdMs);
+ RTC_DCHECK(lowRttNackThresholdMs > -1 || highRttNackThresholdMs == -1);
+ _type = kNackFec;
+}
+
+VCMNackFecMethod::~VCMNackFecMethod() {
+ //
+}
+bool VCMNackFecMethod::ProtectionFactor(
+ const VCMProtectionParameters* parameters) {
+ // Hybrid Nack FEC has three operational modes:
+ // 1. Low RTT (below kLowRttNackMs) - Nack only: Set FEC rate
+ // (_protectionFactorD) to zero. -1 means no FEC.
+ // 2. High RTT (above _highRttNackMs) - FEC Only: Keep FEC factors.
+ // -1 means always allow NACK.
+ // 3. Medium RTT values - Hybrid mode: We will only nack the
+ // residual following the decoding of the FEC (refer to JB logic). FEC
+ // delta protection factor will be adjusted based on the RTT.
+
+ // Otherwise: we count on FEC; if the RTT is below a threshold, then we
+ // nack the residual, based on a decision made in the JB.
+
+ // Compute the protection factors
+ VCMFecMethod::ProtectionFactor(parameters);
+ if (_lowRttNackMs == -1 || parameters->rtt < _lowRttNackMs) {
+ _protectionFactorD = 0;
+ VCMFecMethod::UpdateProtectionFactorD(_protectionFactorD);
+
+ // When in Hybrid mode (RTT range), adjust FEC rates based on the
+ // RTT (NACK effectiveness) - adjustment factor is in the range [0,1].
+ } else if (_highRttNackMs == -1 || parameters->rtt < _highRttNackMs) {
+ // TODO(mikhal): Disabling adjustment temporarily.
+ // uint16_t rttIndex = (uint16_t) parameters->rtt;
+ float adjustRtt = 1.0f; // (float)VCMNackFecTable[rttIndex] / 100.0f;
+
+ // Adjust FEC with NACK on (for delta frame only)
+ // table depends on RTT relative to rttMax (NACK Threshold)
+ _protectionFactorD = rtc::saturated_cast<uint8_t>(
+ adjustRtt * rtc::saturated_cast<float>(_protectionFactorD));
+ // update FEC rates after applying adjustment
+ VCMFecMethod::UpdateProtectionFactorD(_protectionFactorD);
+ }
+
+ return true;
+}
+
+int VCMNackFecMethod::ComputeMaxFramesFec(
+ const VCMProtectionParameters* parameters) {
+ if (parameters->numLayers > 2) {
+ // For more than 2 temporal layers we will only have FEC on the base layer,
+ // and the base layers will be pretty far apart. Therefore we force one
+ // frame FEC.
+ return 1;
+ }
+ // We set the max number of frames to base the FEC on so that on average
+ // we will have complete frames in one RTT. Note that this is an upper
+ // bound, and that the actual number of frames used for FEC is decided by the
+ // RTP module based on the actual number of packets and the protection factor.
+ float base_layer_framerate =
+ parameters->frameRate /
+ rtc::saturated_cast<float>(1 << (parameters->numLayers - 1));
+ int max_frames_fec = std::max(
+ rtc::saturated_cast<int>(
+ 2.0f * base_layer_framerate * parameters->rtt / 1000.0f + 0.5f),
+ 1);
+ // `kUpperLimitFramesFec` is the upper limit on how many frames we
+ // allow any FEC to be based on.
+ if (max_frames_fec > kUpperLimitFramesFec) {
+ max_frames_fec = kUpperLimitFramesFec;
+ }
+ return max_frames_fec;
+}
+
+int VCMNackFecMethod::MaxFramesFec() const {
+ return _maxFramesFec;
+}
+
+bool VCMNackFecMethod::BitRateTooLowForFec(
+ const VCMProtectionParameters* parameters) {
+ // Bitrate below which we turn off FEC, regardless of reported packet loss.
+ // The condition should depend on resolution and content. For now, use
+ // threshold on bytes per frame, with some effect for the frame size.
+ // The condition for turning off FEC is also based on other factors,
+ // such as `_numLayers`, `_maxFramesFec`, and `_rtt`.
+ int estimate_bytes_per_frame = 1000 * BitsPerFrame(parameters) / 8;
+ int max_bytes_per_frame = kMaxBytesPerFrameForFec;
+ int num_pixels = parameters->codecWidth * parameters->codecHeight;
+ if (num_pixels <= 352 * 288) {
+ max_bytes_per_frame = kMaxBytesPerFrameForFecLow;
+ } else if (num_pixels > 640 * 480) {
+ max_bytes_per_frame = kMaxBytesPerFrameForFecHigh;
+ }
+ // TODO(marpan): add condition based on maximum frames used for FEC,
+ // and expand condition based on frame size.
+ // Max round trip time threshold in ms.
+ const int64_t kMaxRttTurnOffFec = 200;
+ if (estimate_bytes_per_frame < max_bytes_per_frame &&
+ parameters->numLayers < 3 && parameters->rtt < kMaxRttTurnOffFec) {
+ return true;
+ }
+ return false;
+}
+
+bool VCMNackFecMethod::EffectivePacketLoss(
+ const VCMProtectionParameters* parameters) {
+ // Set the effective packet loss for encoder (based on FEC code).
+ // Compute the effective packet loss and residual packet loss due to FEC.
+ VCMFecMethod::EffectivePacketLoss(parameters);
+ return true;
+}
+
+bool VCMNackFecMethod::UpdateParameters(
+ const VCMProtectionParameters* parameters) {
+ ProtectionFactor(parameters);
+ EffectivePacketLoss(parameters);
+ _maxFramesFec = ComputeMaxFramesFec(parameters);
+ if (BitRateTooLowForFec(parameters)) {
+ _protectionFactorK = 0;
+ _protectionFactorD = 0;
+ }
+
+ // Protection/fec rates obtained above are defined relative to total number
+ // of packets (total rate: source + fec) FEC in RTP module assumes
+ // protection factor is defined relative to source number of packets so we
+ // should convert the factor to reduce mismatch between mediaOpt's rate and
+ // the actual one
+ _protectionFactorK = VCMFecMethod::ConvertFECRate(_protectionFactorK);
+ _protectionFactorD = VCMFecMethod::ConvertFECRate(_protectionFactorD);
+
+ return true;
+}
+
+VCMNackMethod::VCMNackMethod() : VCMProtectionMethod() {
+ _type = kNack;
+}
+
+VCMNackMethod::~VCMNackMethod() {
+ //
+}
+
+bool VCMNackMethod::EffectivePacketLoss(
+ const VCMProtectionParameters* parameter) {
+ // Effective Packet Loss, NA in current version.
+ _effectivePacketLoss = 0;
+ return true;
+}
+
+bool VCMNackMethod::UpdateParameters(
+ const VCMProtectionParameters* parameters) {
+ // Compute the effective packet loss
+ EffectivePacketLoss(parameters);
+
+ // nackCost = (bitRate - nackCost) * (lossPr)
+ return true;
+}
+
+VCMFecMethod::VCMFecMethod()
+ : VCMProtectionMethod(),
+ rate_control_settings_(RateControlSettings::ParseFromFieldTrials()) {
+ _type = kFec;
+}
+
+VCMFecMethod::~VCMFecMethod() = default;
+
+uint8_t VCMFecMethod::BoostCodeRateKey(uint8_t packetFrameDelta,
+ uint8_t packetFrameKey) const {
+ uint8_t boostRateKey = 2;
+ // Default: ratio scales the FEC protection up for I frames
+ uint8_t ratio = 1;
+
+ if (packetFrameDelta > 0) {
+ ratio = (int8_t)(packetFrameKey / packetFrameDelta);
+ }
+ ratio = VCM_MAX(boostRateKey, ratio);
+
+ return ratio;
+}
+
+uint8_t VCMFecMethod::ConvertFECRate(uint8_t codeRateRTP) const {
+ return rtc::saturated_cast<uint8_t>(
+ VCM_MIN(255, (0.5 + 255.0 * codeRateRTP /
+ rtc::saturated_cast<float>(255 - codeRateRTP))));
+}
+
+// Update FEC with protectionFactorD
+void VCMFecMethod::UpdateProtectionFactorD(uint8_t protectionFactorD) {
+ _protectionFactorD = protectionFactorD;
+}
+
+// Update FEC with protectionFactorK
+void VCMFecMethod::UpdateProtectionFactorK(uint8_t protectionFactorK) {
+ _protectionFactorK = protectionFactorK;
+}
+
+bool VCMFecMethod::ProtectionFactor(const VCMProtectionParameters* parameters) {
+ // FEC PROTECTION SETTINGS: varies with packet loss and bitrate
+
+ // No protection if (filtered) packetLoss is 0
+ uint8_t packetLoss = rtc::saturated_cast<uint8_t>(255 * parameters->lossPr);
+ if (packetLoss == 0) {
+ _protectionFactorK = 0;
+ _protectionFactorD = 0;
+ return true;
+ }
+
+ // Parameters for FEC setting:
+ // first partition size, thresholds, table pars, spatial resoln fac.
+
+ // First partition protection: ~ 20%
+ uint8_t firstPartitionProt = rtc::saturated_cast<uint8_t>(255 * 0.20);
+
+ // Minimum protection level needed to generate one FEC packet for one
+ // source packet/frame (in RTP sender)
+ uint8_t minProtLevelFec = 85;
+
+ // Threshold on packetLoss and bitRrate/frameRate (=average #packets),
+ // above which we allocate protection to cover at least first partition.
+ uint8_t lossThr = 0;
+ uint8_t packetNumThr = 1;
+
+ // Parameters for range of rate index of table.
+ const uint8_t ratePar1 = 5;
+ const uint8_t ratePar2 = 49;
+
+ // Spatial resolution size, relative to a reference size.
+ float spatialSizeToRef = rtc::saturated_cast<float>(parameters->codecWidth *
+ parameters->codecHeight) /
+ (rtc::saturated_cast<float>(704 * 576));
+ // resolnFac: This parameter will generally increase/decrease the FEC rate
+ // (for fixed bitRate and packetLoss) based on system size.
+ // Use a smaller exponent (< 1) to control/soften system size effect.
+ const float resolnFac = 1.0 / powf(spatialSizeToRef, 0.3f);
+
+ const int bitRatePerFrame = BitsPerFrame(parameters);
+
+ // Average number of packets per frame (source and fec):
+ const uint8_t avgTotPackets = rtc::saturated_cast<uint8_t>(
+ 1.5f + rtc::saturated_cast<float>(bitRatePerFrame) * 1000.0f /
+ rtc::saturated_cast<float>(8.0 * _maxPayloadSize));
+
+ // FEC rate parameters: for P and I frame
+ uint8_t codeRateDelta = 0;
+ uint8_t codeRateKey = 0;
+
+ // Get index for table: the FEC protection depends on an effective rate.
+ // The range on the rate index corresponds to rates (bps)
+ // from ~200k to ~8000k, for 30fps
+ const uint16_t effRateFecTable =
+ rtc::saturated_cast<uint16_t>(resolnFac * bitRatePerFrame);
+ uint8_t rateIndexTable = rtc::saturated_cast<uint8_t>(
+ VCM_MAX(VCM_MIN((effRateFecTable - ratePar1) / ratePar1, ratePar2), 0));
+
+ // Restrict packet loss range to 50:
+ // current tables defined only up to 50%
+ if (packetLoss >= kPacketLossMax) {
+ packetLoss = kPacketLossMax - 1;
+ }
+ uint16_t indexTable = rateIndexTable * kPacketLossMax + packetLoss;
+
+ // Check on table index
+ RTC_DCHECK_LT(indexTable, kFecRateTableSize);
+
+ // Protection factor for P frame
+ codeRateDelta = kFecRateTable[indexTable];
+
+ if (packetLoss > lossThr && avgTotPackets > packetNumThr) {
+ // Set a minimum based on first partition size.
+ if (codeRateDelta < firstPartitionProt) {
+ codeRateDelta = firstPartitionProt;
+ }
+ }
+
+ // Check limit on amount of protection for P frame; 50% is max.
+ if (codeRateDelta >= kPacketLossMax) {
+ codeRateDelta = kPacketLossMax - 1;
+ }
+
+ // For Key frame:
+ // Effectively at a higher rate, so we scale/boost the rate
+ // The boost factor may depend on several factors: ratio of packet
+ // number of I to P frames, how much protection placed on P frames, etc.
+ const uint8_t packetFrameDelta =
+ rtc::saturated_cast<uint8_t>(0.5 + parameters->packetsPerFrame);
+ const uint8_t packetFrameKey =
+ rtc::saturated_cast<uint8_t>(0.5 + parameters->packetsPerFrameKey);
+ const uint8_t boostKey = BoostCodeRateKey(packetFrameDelta, packetFrameKey);
+
+ rateIndexTable = rtc::saturated_cast<uint8_t>(VCM_MAX(
+ VCM_MIN(1 + (boostKey * effRateFecTable - ratePar1) / ratePar1, ratePar2),
+ 0));
+ uint16_t indexTableKey = rateIndexTable * kPacketLossMax + packetLoss;
+
+ indexTableKey = VCM_MIN(indexTableKey, kFecRateTableSize);
+
+ // Check on table index
+ RTC_DCHECK_LT(indexTableKey, kFecRateTableSize);
+
+ // Protection factor for I frame
+ codeRateKey = kFecRateTable[indexTableKey];
+
+ // Boosting for Key frame.
+ int boostKeyProt = _scaleProtKey * codeRateDelta;
+ if (boostKeyProt >= kPacketLossMax) {
+ boostKeyProt = kPacketLossMax - 1;
+ }
+
+ // Make sure I frame protection is at least larger than P frame protection,
+ // and at least as high as filtered packet loss.
+ codeRateKey = rtc::saturated_cast<uint8_t>(
+ VCM_MAX(packetLoss, VCM_MAX(boostKeyProt, codeRateKey)));
+
+ // Check limit on amount of protection for I frame: 50% is max.
+ if (codeRateKey >= kPacketLossMax) {
+ codeRateKey = kPacketLossMax - 1;
+ }
+
+ _protectionFactorK = codeRateKey;
+ _protectionFactorD = codeRateDelta;
+
+ // Generally there is a rate mis-match between the FEC cost estimated
+ // in mediaOpt and the actual FEC cost sent out in RTP module.
+ // This is more significant at low rates (small # of source packets), where
+ // the granularity of the FEC decreases. In this case, non-zero protection
+ // in mediaOpt may generate 0 FEC packets in RTP sender (since actual #FEC
+ // is based on rounding off protectionFactor on actual source packet number).
+ // The correction factor (_corrFecCost) attempts to corrects this, at least
+ // for cases of low rates (small #packets) and low protection levels.
+
+ float numPacketsFl =
+ 1.0f + (rtc::saturated_cast<float>(bitRatePerFrame) * 1000.0 /
+ rtc::saturated_cast<float>(8.0 * _maxPayloadSize) +
+ 0.5);
+
+ const float estNumFecGen =
+ 0.5f +
+ rtc::saturated_cast<float>(_protectionFactorD * numPacketsFl / 255.0f);
+
+ // We reduce cost factor (which will reduce overhead for FEC and
+ // hybrid method) and not the protectionFactor.
+ _corrFecCost = 1.0f;
+ if (estNumFecGen < 1.1f && _protectionFactorD < minProtLevelFec) {
+ _corrFecCost = 0.5f;
+ }
+ if (estNumFecGen < 0.9f && _protectionFactorD < minProtLevelFec) {
+ _corrFecCost = 0.0f;
+ }
+
+ // DONE WITH FEC PROTECTION SETTINGS
+ return true;
+}
+
+int VCMFecMethod::BitsPerFrame(const VCMProtectionParameters* parameters) {
+ // When temporal layers are available FEC will only be applied on the base
+ // layer.
+ const float bitRateRatio =
+ webrtc::SimulcastRateAllocator::GetTemporalRateAllocation(
+ parameters->numLayers, 0,
+ rate_control_settings_.Vp8BaseHeavyTl3RateAllocation());
+ float frameRateRatio = powf(1 / 2.0, parameters->numLayers - 1);
+ float bitRate = parameters->bitRate * bitRateRatio;
+ float frameRate = parameters->frameRate * frameRateRatio;
+
+ // TODO(mikhal): Update factor following testing.
+ float adjustmentFactor = 1;
+
+ if (frameRate < 1.0f)
+ frameRate = 1.0f;
+ // Average bits per frame (units of kbits)
+ return rtc::saturated_cast<int>(adjustmentFactor * bitRate / frameRate);
+}
+
+bool VCMFecMethod::EffectivePacketLoss(
+ const VCMProtectionParameters* parameters) {
+ // Effective packet loss to encoder is based on RPL (residual packet loss)
+ // this is a soft setting based on degree of FEC protection
+ // RPL = received/input packet loss - average_FEC_recovery
+ // note: received/input packet loss may be filtered based on FilteredLoss
+
+ // Effective Packet Loss, NA in current version.
+ _effectivePacketLoss = 0;
+
+ return true;
+}
+
+bool VCMFecMethod::UpdateParameters(const VCMProtectionParameters* parameters) {
+ // Compute the protection factor
+ ProtectionFactor(parameters);
+
+ // Compute the effective packet loss
+ EffectivePacketLoss(parameters);
+
+ // Protection/fec rates obtained above is defined relative to total number
+ // of packets (total rate: source+fec) FEC in RTP module assumes protection
+ // factor is defined relative to source number of packets so we should
+ // convert the factor to reduce mismatch between mediaOpt suggested rate and
+ // the actual rate
+ _protectionFactorK = ConvertFECRate(_protectionFactorK);
+ _protectionFactorD = ConvertFECRate(_protectionFactorD);
+
+ return true;
+}
+VCMLossProtectionLogic::VCMLossProtectionLogic(int64_t nowMs)
+ : _currentParameters(),
+ _rtt(0),
+ _lossPr(0.0f),
+ _bitRate(0.0f),
+ _frameRate(0.0f),
+ _keyFrameSize(0.0f),
+ _fecRateKey(0),
+ _fecRateDelta(0),
+ _lastPrUpdateT(0),
+ _lossPr255(0.9999f),
+ _lossPrHistory(),
+ _shortMaxLossPr255(0),
+ _packetsPerFrame(0.9999f),
+ _packetsPerFrameKey(0.9999f),
+ _codecWidth(704),
+ _codecHeight(576),
+ _numLayers(1) {
+ Reset(nowMs);
+}
+
+VCMLossProtectionLogic::~VCMLossProtectionLogic() {
+ Release();
+}
+
+void VCMLossProtectionLogic::SetMethod(
+ enum VCMProtectionMethodEnum newMethodType) {
+ if (_selectedMethod && _selectedMethod->Type() == newMethodType)
+ return;
+
+ switch (newMethodType) {
+ case kNack:
+ _selectedMethod.reset(new VCMNackMethod());
+ break;
+ case kFec:
+ _selectedMethod.reset(new VCMFecMethod());
+ break;
+ case kNackFec:
+ _selectedMethod.reset(new VCMNackFecMethod(kLowRttNackMs, -1));
+ break;
+ case kNone:
+ _selectedMethod.reset();
+ break;
+ }
+ UpdateMethod();
+}
+
+void VCMLossProtectionLogic::UpdateRtt(int64_t rtt) {
+ _rtt = rtt;
+}
+
+void VCMLossProtectionLogic::UpdateMaxLossHistory(uint8_t lossPr255,
+ int64_t now) {
+ if (_lossPrHistory[0].timeMs >= 0 &&
+ now - _lossPrHistory[0].timeMs < kLossPrShortFilterWinMs) {
+ if (lossPr255 > _shortMaxLossPr255) {
+ _shortMaxLossPr255 = lossPr255;
+ }
+ } else {
+ // Only add a new value to the history once a second
+ if (_lossPrHistory[0].timeMs == -1) {
+ // First, no shift
+ _shortMaxLossPr255 = lossPr255;
+ } else {
+ // Shift
+ for (int32_t i = (kLossPrHistorySize - 2); i >= 0; i--) {
+ _lossPrHistory[i + 1].lossPr255 = _lossPrHistory[i].lossPr255;
+ _lossPrHistory[i + 1].timeMs = _lossPrHistory[i].timeMs;
+ }
+ }
+ if (_shortMaxLossPr255 == 0) {
+ _shortMaxLossPr255 = lossPr255;
+ }
+
+ _lossPrHistory[0].lossPr255 = _shortMaxLossPr255;
+ _lossPrHistory[0].timeMs = now;
+ _shortMaxLossPr255 = 0;
+ }
+}
+
+uint8_t VCMLossProtectionLogic::MaxFilteredLossPr(int64_t nowMs) const {
+ uint8_t maxFound = _shortMaxLossPr255;
+ if (_lossPrHistory[0].timeMs == -1) {
+ return maxFound;
+ }
+ for (int32_t i = 0; i < kLossPrHistorySize; i++) {
+ if (_lossPrHistory[i].timeMs == -1) {
+ break;
+ }
+ if (nowMs - _lossPrHistory[i].timeMs >
+ kLossPrHistorySize * kLossPrShortFilterWinMs) {
+ // This sample (and all samples after this) is too old
+ break;
+ }
+ if (_lossPrHistory[i].lossPr255 > maxFound) {
+ // This sample is the largest one this far into the history
+ maxFound = _lossPrHistory[i].lossPr255;
+ }
+ }
+ return maxFound;
+}
+
+uint8_t VCMLossProtectionLogic::FilteredLoss(int64_t nowMs,
+ FilterPacketLossMode filter_mode,
+ uint8_t lossPr255) {
+ // Update the max window filter.
+ UpdateMaxLossHistory(lossPr255, nowMs);
+
+ // Update the recursive average filter.
+ _lossPr255.Apply(rtc::saturated_cast<float>(nowMs - _lastPrUpdateT),
+ rtc::saturated_cast<float>(lossPr255));
+ _lastPrUpdateT = nowMs;
+
+ // Filtered loss: default is received loss (no filtering).
+ uint8_t filtered_loss = lossPr255;
+
+ switch (filter_mode) {
+ case kNoFilter:
+ break;
+ case kAvgFilter:
+ filtered_loss = rtc::saturated_cast<uint8_t>(_lossPr255.filtered() + 0.5);
+ break;
+ case kMaxFilter:
+ filtered_loss = MaxFilteredLossPr(nowMs);
+ break;
+ }
+
+ return filtered_loss;
+}
+
+void VCMLossProtectionLogic::UpdateFilteredLossPr(uint8_t packetLossEnc) {
+ _lossPr = rtc::saturated_cast<float>(packetLossEnc) / 255.0;
+}
+
+void VCMLossProtectionLogic::UpdateBitRate(float bitRate) {
+ _bitRate = bitRate;
+}
+
+void VCMLossProtectionLogic::UpdatePacketsPerFrame(float nPackets,
+ int64_t nowMs) {
+ _packetsPerFrame.Apply(
+ rtc::saturated_cast<float>(nowMs - _lastPacketPerFrameUpdateT), nPackets);
+ _lastPacketPerFrameUpdateT = nowMs;
+}
+
+void VCMLossProtectionLogic::UpdatePacketsPerFrameKey(float nPackets,
+ int64_t nowMs) {
+ _packetsPerFrameKey.Apply(
+ rtc::saturated_cast<float>(nowMs - _lastPacketPerFrameUpdateTKey),
+ nPackets);
+ _lastPacketPerFrameUpdateTKey = nowMs;
+}
+
+void VCMLossProtectionLogic::UpdateKeyFrameSize(float keyFrameSize) {
+ _keyFrameSize = keyFrameSize;
+}
+
+void VCMLossProtectionLogic::UpdateFrameSize(size_t width, size_t height) {
+ _codecWidth = width;
+ _codecHeight = height;
+}
+
+void VCMLossProtectionLogic::UpdateNumLayers(int numLayers) {
+ _numLayers = (numLayers == 0) ? 1 : numLayers;
+}
+
+bool VCMLossProtectionLogic::UpdateMethod() {
+ if (!_selectedMethod)
+ return false;
+ _currentParameters.rtt = _rtt;
+ _currentParameters.lossPr = _lossPr;
+ _currentParameters.bitRate = _bitRate;
+ _currentParameters.frameRate = _frameRate; // rename actual frame rate?
+ _currentParameters.keyFrameSize = _keyFrameSize;
+ _currentParameters.fecRateDelta = _fecRateDelta;
+ _currentParameters.fecRateKey = _fecRateKey;
+ _currentParameters.packetsPerFrame = _packetsPerFrame.filtered();
+ _currentParameters.packetsPerFrameKey = _packetsPerFrameKey.filtered();
+ _currentParameters.codecWidth = _codecWidth;
+ _currentParameters.codecHeight = _codecHeight;
+ _currentParameters.numLayers = _numLayers;
+ return _selectedMethod->UpdateParameters(&_currentParameters);
+}
+
+VCMProtectionMethod* VCMLossProtectionLogic::SelectedMethod() const {
+ return _selectedMethod.get();
+}
+
+VCMProtectionMethodEnum VCMLossProtectionLogic::SelectedType() const {
+ return _selectedMethod ? _selectedMethod->Type() : kNone;
+}
+
+void VCMLossProtectionLogic::Reset(int64_t nowMs) {
+ _lastPrUpdateT = nowMs;
+ _lastPacketPerFrameUpdateT = nowMs;
+ _lastPacketPerFrameUpdateTKey = nowMs;
+ _lossPr255.Reset(0.9999f);
+ _packetsPerFrame.Reset(0.9999f);
+ _fecRateDelta = _fecRateKey = 0;
+ for (int32_t i = 0; i < kLossPrHistorySize; i++) {
+ _lossPrHistory[i].lossPr255 = 0;
+ _lossPrHistory[i].timeMs = -1;
+ }
+ _shortMaxLossPr255 = 0;
+ Release();
+}
+
+void VCMLossProtectionLogic::Release() {
+ _selectedMethod.reset();
+}
+
+} // namespace media_optimization
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/media_opt_util.h b/third_party/libwebrtc/modules/video_coding/media_opt_util.h
new file mode 100644
index 0000000000..a74d1af6cb
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/media_opt_util.h
@@ -0,0 +1,350 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_MEDIA_OPT_UTIL_H_
+#define MODULES_VIDEO_CODING_MEDIA_OPT_UTIL_H_
+
+#include <math.h>
+#include <stdlib.h>
+
+#include <memory>
+
+#include "modules/video_coding/internal_defines.h"
+#include "rtc_base/experiments/rate_control_settings.h"
+#include "rtc_base/numerics/exp_filter.h"
+
+namespace webrtc {
+namespace media_optimization {
+
+// Number of time periods used for (max) window filter for packet loss
+// TODO(marpan): set reasonable window size for filtered packet loss,
+// adjustment should be based on logged/real data of loss stats/correlation.
+constexpr int kLossPrHistorySize = 10;
+
+// 1000 ms, total filter length is (kLossPrHistorySize * 1000) ms
+constexpr int kLossPrShortFilterWinMs = 1000;
+
+// The type of filter used on the received packet loss reports.
+enum FilterPacketLossMode {
+ kNoFilter, // No filtering on received loss.
+ kAvgFilter, // Recursive average filter.
+ kMaxFilter // Max-window filter, over the time interval of:
+ // (kLossPrHistorySize * kLossPrShortFilterWinMs) ms.
+};
+
+// Thresholds for hybrid NACK/FEC
+// common to media optimization and the jitter buffer.
+constexpr int64_t kLowRttNackMs = 20;
+
+// If the RTT is higher than this an extra RTT wont be added to to the jitter
+// buffer delay.
+constexpr int kMaxRttDelayThreshold = 500;
+
+struct VCMProtectionParameters {
+ VCMProtectionParameters();
+
+ int64_t rtt;
+ float lossPr;
+ float bitRate;
+ float packetsPerFrame;
+ float packetsPerFrameKey;
+ float frameRate;
+ float keyFrameSize;
+ uint8_t fecRateDelta;
+ uint8_t fecRateKey;
+ uint16_t codecWidth;
+ uint16_t codecHeight;
+ int numLayers;
+};
+
+/******************************/
+/* VCMProtectionMethod class */
+/******************************/
+
+enum VCMProtectionMethodEnum { kNack, kFec, kNackFec, kNone };
+
+class VCMLossProbabilitySample {
+ public:
+ VCMLossProbabilitySample() : lossPr255(0), timeMs(-1) {}
+
+ uint8_t lossPr255;
+ int64_t timeMs;
+};
+
+class VCMProtectionMethod {
+ public:
+ VCMProtectionMethod();
+ virtual ~VCMProtectionMethod();
+
+ // Updates the efficiency of the method using the parameters provided
+ //
+ // Input:
+ // - parameters : Parameters used to calculate efficiency
+ //
+ // Return value : True if this method is recommended in
+ // the given conditions.
+ virtual bool UpdateParameters(const VCMProtectionParameters* parameters) = 0;
+
+ // Returns the protection type
+ //
+ // Return value : The protection type
+ VCMProtectionMethodEnum Type() const;
+
+ // Returns the effective packet loss for ER, required by this protection
+ // method
+ //
+ // Return value : Required effective packet loss
+ virtual uint8_t RequiredPacketLossER();
+
+ // Extracts the FEC protection factor for Key frame, required by this
+ // protection method
+ //
+ // Return value : Required protectionFactor for Key frame
+ virtual uint8_t RequiredProtectionFactorK();
+
+ // Extracts the FEC protection factor for Delta frame, required by this
+ // protection method
+ //
+ // Return value : Required protectionFactor for delta frame
+ virtual uint8_t RequiredProtectionFactorD();
+
+ // Extracts whether the FEC Unequal protection (UEP) is used for Key frame.
+ //
+ // Return value : Required Unequal protection on/off state.
+ virtual bool RequiredUepProtectionK();
+
+ // Extracts whether the the FEC Unequal protection (UEP) is used for Delta
+ // frame.
+ //
+ // Return value : Required Unequal protection on/off state.
+ virtual bool RequiredUepProtectionD();
+
+ virtual int MaxFramesFec() const;
+
+ protected:
+ uint8_t _effectivePacketLoss;
+ uint8_t _protectionFactorK;
+ uint8_t _protectionFactorD;
+ // Estimation of residual loss after the FEC
+ float _scaleProtKey;
+ int32_t _maxPayloadSize;
+
+ bool _useUepProtectionK;
+ bool _useUepProtectionD;
+ float _corrFecCost;
+ VCMProtectionMethodEnum _type;
+};
+
+class VCMNackMethod : public VCMProtectionMethod {
+ public:
+ VCMNackMethod();
+ ~VCMNackMethod() override;
+ bool UpdateParameters(const VCMProtectionParameters* parameters) override;
+ // Get the effective packet loss
+ bool EffectivePacketLoss(const VCMProtectionParameters* parameter);
+};
+
+class VCMFecMethod : public VCMProtectionMethod {
+ public:
+ VCMFecMethod();
+ ~VCMFecMethod() override;
+ bool UpdateParameters(const VCMProtectionParameters* parameters) override;
+ // Get the effective packet loss for ER
+ bool EffectivePacketLoss(const VCMProtectionParameters* parameters);
+ // Get the FEC protection factors
+ bool ProtectionFactor(const VCMProtectionParameters* parameters);
+ // Get the boost for key frame protection
+ uint8_t BoostCodeRateKey(uint8_t packetFrameDelta,
+ uint8_t packetFrameKey) const;
+ // Convert the rates: defined relative to total# packets or source# packets
+ uint8_t ConvertFECRate(uint8_t codeRate) const;
+ // Get the average effective recovery from FEC: for random loss model
+ float AvgRecoveryFEC(const VCMProtectionParameters* parameters) const;
+ // Update FEC with protectionFactorD
+ void UpdateProtectionFactorD(uint8_t protectionFactorD);
+ // Update FEC with protectionFactorK
+ void UpdateProtectionFactorK(uint8_t protectionFactorK);
+ // Compute the bits per frame. Account for temporal layers when applicable.
+ int BitsPerFrame(const VCMProtectionParameters* parameters);
+
+ protected:
+ static constexpr int kUpperLimitFramesFec = 6;
+ // Thresholds values for the bytes/frame and round trip time, below which we
+ // may turn off FEC, depending on `_numLayers` and `_maxFramesFec`.
+ // Max bytes/frame for VGA, corresponds to ~140k at 25fps.
+ static constexpr int kMaxBytesPerFrameForFec = 700;
+ // Max bytes/frame for CIF and lower: corresponds to ~80k at 25fps.
+ static constexpr int kMaxBytesPerFrameForFecLow = 400;
+ // Max bytes/frame for frame size larger than VGA, ~200k at 25fps.
+ static constexpr int kMaxBytesPerFrameForFecHigh = 1000;
+
+ const RateControlSettings rate_control_settings_;
+};
+
+class VCMNackFecMethod : public VCMFecMethod {
+ public:
+ VCMNackFecMethod(int64_t lowRttNackThresholdMs,
+ int64_t highRttNackThresholdMs);
+ ~VCMNackFecMethod() override;
+ bool UpdateParameters(const VCMProtectionParameters* parameters) override;
+ // Get the effective packet loss for ER
+ bool EffectivePacketLoss(const VCMProtectionParameters* parameters);
+ // Get the protection factors
+ bool ProtectionFactor(const VCMProtectionParameters* parameters);
+ // Get the max number of frames the FEC is allowed to be based on.
+ int MaxFramesFec() const override;
+ // Turn off the FEC based on low bitrate and other factors.
+ bool BitRateTooLowForFec(const VCMProtectionParameters* parameters);
+
+ private:
+ int ComputeMaxFramesFec(const VCMProtectionParameters* parameters);
+
+ int64_t _lowRttNackMs;
+ int64_t _highRttNackMs;
+ int _maxFramesFec;
+};
+
+class VCMLossProtectionLogic {
+ public:
+ explicit VCMLossProtectionLogic(int64_t nowMs);
+ ~VCMLossProtectionLogic();
+
+ // Set the protection method to be used
+ //
+ // Input:
+ // - newMethodType : New requested protection method type. If one
+ // is already set, it will be deleted and replaced
+ void SetMethod(VCMProtectionMethodEnum newMethodType);
+
+ // Update the round-trip time
+ //
+ // Input:
+ // - rtt : Round-trip time in seconds.
+ void UpdateRtt(int64_t rtt);
+
+ // Update the filtered packet loss.
+ //
+ // Input:
+ // - packetLossEnc : The reported packet loss filtered
+ // (max window or average)
+ void UpdateFilteredLossPr(uint8_t packetLossEnc);
+
+ // Update the current target bit rate.
+ //
+ // Input:
+ // - bitRate : The current target bit rate in kbits/s
+ void UpdateBitRate(float bitRate);
+
+ // Update the number of packets per frame estimate, for delta frames
+ //
+ // Input:
+ // - nPackets : Number of packets in the latest sent frame.
+ void UpdatePacketsPerFrame(float nPackets, int64_t nowMs);
+
+ // Update the number of packets per frame estimate, for key frames
+ //
+ // Input:
+ // - nPackets : umber of packets in the latest sent frame.
+ void UpdatePacketsPerFrameKey(float nPackets, int64_t nowMs);
+
+ // Update the keyFrameSize estimate
+ //
+ // Input:
+ // - keyFrameSize : The size of the latest sent key frame.
+ void UpdateKeyFrameSize(float keyFrameSize);
+
+ // Update the frame rate
+ //
+ // Input:
+ // - frameRate : The current target frame rate.
+ void UpdateFrameRate(float frameRate) { _frameRate = frameRate; }
+
+ // Update the frame size
+ //
+ // Input:
+ // - width : The codec frame width.
+ // - height : The codec frame height.
+ void UpdateFrameSize(size_t width, size_t height);
+
+ // Update the number of active layers
+ //
+ // Input:
+ // - numLayers : Number of layers used.
+ void UpdateNumLayers(int numLayers);
+
+ // The amount of packet loss to cover for with FEC.
+ //
+ // Input:
+ // - fecRateKey : Packet loss to cover for with FEC when
+ // sending key frames.
+ // - fecRateDelta : Packet loss to cover for with FEC when
+ // sending delta frames.
+ void UpdateFECRates(uint8_t fecRateKey, uint8_t fecRateDelta) {
+ _fecRateKey = fecRateKey;
+ _fecRateDelta = fecRateDelta;
+ }
+
+ // Update the protection methods with the current VCMProtectionParameters
+ // and set the requested protection settings.
+ // Return value : Returns true on update
+ bool UpdateMethod();
+
+ // Returns the method currently selected.
+ //
+ // Return value : The protection method currently selected.
+ VCMProtectionMethod* SelectedMethod() const;
+
+ // Return the protection type of the currently selected method
+ VCMProtectionMethodEnum SelectedType() const;
+
+ // Updates the filtered loss for the average and max window packet loss,
+ // and returns the filtered loss probability in the interval [0, 255].
+ // The returned filtered loss value depends on the parameter `filter_mode`.
+ // The input parameter `lossPr255` is the received packet loss.
+
+ // Return value : The filtered loss probability
+ uint8_t FilteredLoss(int64_t nowMs,
+ FilterPacketLossMode filter_mode,
+ uint8_t lossPr255);
+
+ void Reset(int64_t nowMs);
+
+ void Release();
+
+ private:
+ // Sets the available loss protection methods.
+ void UpdateMaxLossHistory(uint8_t lossPr255, int64_t now);
+ uint8_t MaxFilteredLossPr(int64_t nowMs) const;
+ std::unique_ptr<VCMProtectionMethod> _selectedMethod;
+ VCMProtectionParameters _currentParameters;
+ int64_t _rtt;
+ float _lossPr;
+ float _bitRate;
+ float _frameRate;
+ float _keyFrameSize;
+ uint8_t _fecRateKey;
+ uint8_t _fecRateDelta;
+ int64_t _lastPrUpdateT;
+ int64_t _lastPacketPerFrameUpdateT;
+ int64_t _lastPacketPerFrameUpdateTKey;
+ rtc::ExpFilter _lossPr255;
+ VCMLossProbabilitySample _lossPrHistory[kLossPrHistorySize];
+ uint8_t _shortMaxLossPr255;
+ rtc::ExpFilter _packetsPerFrame;
+ rtc::ExpFilter _packetsPerFrameKey;
+ size_t _codecWidth;
+ size_t _codecHeight;
+ int _numLayers;
+};
+
+} // namespace media_optimization
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_MEDIA_OPT_UTIL_H_
diff --git a/third_party/libwebrtc/modules/video_coding/nack_requester.cc b/third_party/libwebrtc/modules/video_coding/nack_requester.cc
new file mode 100644
index 0000000000..4e74032d01
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/nack_requester.cc
@@ -0,0 +1,340 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/nack_requester.h"
+
+#include <algorithm>
+#include <limits>
+
+#include "api/sequence_checker.h"
+#include "api/task_queue/task_queue_base.h"
+#include "api/units/timestamp.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/experiments/field_trial_parser.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+
+namespace {
+constexpr int kMaxPacketAge = 10'000;
+constexpr int kMaxNackPackets = 1000;
+constexpr TimeDelta kDefaultRtt = TimeDelta::Millis(100);
+constexpr int kMaxNackRetries = 10;
+constexpr int kMaxReorderedPackets = 128;
+constexpr int kNumReorderingBuckets = 10;
+constexpr TimeDelta kDefaultSendNackDelay = TimeDelta::Zero();
+
+TimeDelta GetSendNackDelay(const FieldTrialsView& field_trials) {
+ int64_t delay_ms = strtol(
+ field_trials.Lookup("WebRTC-SendNackDelayMs").c_str(), nullptr, 10);
+ if (delay_ms > 0 && delay_ms <= 20) {
+ RTC_LOG(LS_INFO) << "SendNackDelay is set to " << delay_ms;
+ return TimeDelta::Millis(delay_ms);
+ }
+ return kDefaultSendNackDelay;
+}
+} // namespace
+
+constexpr TimeDelta NackPeriodicProcessor::kUpdateInterval;
+
+NackPeriodicProcessor::NackPeriodicProcessor(TimeDelta update_interval)
+ : update_interval_(update_interval) {}
+
+NackPeriodicProcessor::~NackPeriodicProcessor() {}
+
+void NackPeriodicProcessor::RegisterNackModule(NackRequesterBase* module) {
+ RTC_DCHECK_RUN_ON(&sequence_);
+ modules_.push_back(module);
+ if (modules_.size() != 1)
+ return;
+ repeating_task_ = RepeatingTaskHandle::DelayedStart(
+ TaskQueueBase::Current(), update_interval_, [this] {
+ RTC_DCHECK_RUN_ON(&sequence_);
+ ProcessNackModules();
+ return update_interval_;
+ });
+}
+
+void NackPeriodicProcessor::UnregisterNackModule(NackRequesterBase* module) {
+ RTC_DCHECK_RUN_ON(&sequence_);
+ auto it = std::find(modules_.begin(), modules_.end(), module);
+ RTC_DCHECK(it != modules_.end());
+ modules_.erase(it);
+ if (modules_.empty())
+ repeating_task_.Stop();
+}
+
+void NackPeriodicProcessor::ProcessNackModules() {
+ RTC_DCHECK_RUN_ON(&sequence_);
+ for (NackRequesterBase* module : modules_)
+ module->ProcessNacks();
+}
+
+ScopedNackPeriodicProcessorRegistration::
+ ScopedNackPeriodicProcessorRegistration(NackRequesterBase* module,
+ NackPeriodicProcessor* processor)
+ : module_(module), processor_(processor) {
+ processor_->RegisterNackModule(module_);
+}
+
+ScopedNackPeriodicProcessorRegistration::
+ ~ScopedNackPeriodicProcessorRegistration() {
+ processor_->UnregisterNackModule(module_);
+}
+
+NackRequester::NackInfo::NackInfo()
+ : seq_num(0),
+ send_at_seq_num(0),
+ created_at_time(Timestamp::MinusInfinity()),
+ sent_at_time(Timestamp::MinusInfinity()),
+ retries(0) {}
+
+NackRequester::NackInfo::NackInfo(uint16_t seq_num,
+ uint16_t send_at_seq_num,
+ Timestamp created_at_time)
+ : seq_num(seq_num),
+ send_at_seq_num(send_at_seq_num),
+ created_at_time(created_at_time),
+ sent_at_time(Timestamp::MinusInfinity()),
+ retries(0) {}
+
+NackRequester::NackRequester(TaskQueueBase* current_queue,
+ NackPeriodicProcessor* periodic_processor,
+ Clock* clock,
+ NackSender* nack_sender,
+ KeyFrameRequestSender* keyframe_request_sender,
+ const FieldTrialsView& field_trials)
+ : worker_thread_(current_queue),
+ clock_(clock),
+ nack_sender_(nack_sender),
+ keyframe_request_sender_(keyframe_request_sender),
+ reordering_histogram_(kNumReorderingBuckets, kMaxReorderedPackets),
+ initialized_(false),
+ rtt_(kDefaultRtt),
+ newest_seq_num_(0),
+ send_nack_delay_(GetSendNackDelay(field_trials)),
+ processor_registration_(this, periodic_processor) {
+ RTC_DCHECK(clock_);
+ RTC_DCHECK(nack_sender_);
+ RTC_DCHECK(keyframe_request_sender_);
+ RTC_DCHECK(worker_thread_);
+ RTC_DCHECK(worker_thread_->IsCurrent());
+}
+
+NackRequester::~NackRequester() {
+ RTC_DCHECK_RUN_ON(worker_thread_);
+}
+
+void NackRequester::ProcessNacks() {
+ RTC_DCHECK_RUN_ON(worker_thread_);
+ std::vector<uint16_t> nack_batch = GetNackBatch(kTimeOnly);
+ if (!nack_batch.empty()) {
+ // This batch of NACKs is triggered externally; there is no external
+ // initiator who can batch them with other feedback messages.
+ nack_sender_->SendNack(nack_batch, /*buffering_allowed=*/false);
+ }
+}
+
+int NackRequester::OnReceivedPacket(uint16_t seq_num, bool is_keyframe) {
+ RTC_DCHECK_RUN_ON(worker_thread_);
+ return OnReceivedPacket(seq_num, is_keyframe, false);
+}
+
+int NackRequester::OnReceivedPacket(uint16_t seq_num,
+ bool is_keyframe,
+ bool is_recovered) {
+ RTC_DCHECK_RUN_ON(worker_thread_);
+ // TODO(philipel): When the packet includes information whether it is
+ // retransmitted or not, use that value instead. For
+ // now set it to true, which will cause the reordering
+ // statistics to never be updated.
+ bool is_retransmitted = true;
+
+ if (!initialized_) {
+ newest_seq_num_ = seq_num;
+ if (is_keyframe)
+ keyframe_list_.insert(seq_num);
+ initialized_ = true;
+ return 0;
+ }
+
+ // Since the `newest_seq_num_` is a packet we have actually received we know
+ // that packet has never been Nacked.
+ if (seq_num == newest_seq_num_)
+ return 0;
+
+ if (AheadOf(newest_seq_num_, seq_num)) {
+ // An out of order packet has been received.
+ auto nack_list_it = nack_list_.find(seq_num);
+ int nacks_sent_for_packet = 0;
+ if (nack_list_it != nack_list_.end()) {
+ nacks_sent_for_packet = nack_list_it->second.retries;
+ nack_list_.erase(nack_list_it);
+ }
+ if (!is_retransmitted)
+ UpdateReorderingStatistics(seq_num);
+ return nacks_sent_for_packet;
+ }
+
+ // Keep track of new keyframes.
+ if (is_keyframe)
+ keyframe_list_.insert(seq_num);
+
+ // And remove old ones so we don't accumulate keyframes.
+ auto it = keyframe_list_.lower_bound(seq_num - kMaxPacketAge);
+ if (it != keyframe_list_.begin())
+ keyframe_list_.erase(keyframe_list_.begin(), it);
+
+ if (is_recovered) {
+ recovered_list_.insert(seq_num);
+
+ // Remove old ones so we don't accumulate recovered packets.
+ auto it = recovered_list_.lower_bound(seq_num - kMaxPacketAge);
+ if (it != recovered_list_.begin())
+ recovered_list_.erase(recovered_list_.begin(), it);
+
+ // Do not send nack for packets recovered by FEC or RTX.
+ return 0;
+ }
+
+ AddPacketsToNack(newest_seq_num_ + 1, seq_num);
+ newest_seq_num_ = seq_num;
+
+ // Are there any nacks that are waiting for this seq_num.
+ std::vector<uint16_t> nack_batch = GetNackBatch(kSeqNumOnly);
+ if (!nack_batch.empty()) {
+ // This batch of NACKs is triggered externally; the initiator can
+ // batch them with other feedback messages.
+ nack_sender_->SendNack(nack_batch, /*buffering_allowed=*/true);
+ }
+
+ return 0;
+}
+
+void NackRequester::ClearUpTo(uint16_t seq_num) {
+ // Called via RtpVideoStreamReceiver2::FrameContinuous on the network thread.
+ worker_thread_->PostTask(SafeTask(task_safety_.flag(), [seq_num, this]() {
+ RTC_DCHECK_RUN_ON(worker_thread_);
+ nack_list_.erase(nack_list_.begin(), nack_list_.lower_bound(seq_num));
+ keyframe_list_.erase(keyframe_list_.begin(),
+ keyframe_list_.lower_bound(seq_num));
+ recovered_list_.erase(recovered_list_.begin(),
+ recovered_list_.lower_bound(seq_num));
+ }));
+}
+
+void NackRequester::UpdateRtt(int64_t rtt_ms) {
+ RTC_DCHECK_RUN_ON(worker_thread_);
+ rtt_ = TimeDelta::Millis(rtt_ms);
+}
+
+bool NackRequester::RemovePacketsUntilKeyFrame() {
+ // Called on worker_thread_.
+ while (!keyframe_list_.empty()) {
+ auto it = nack_list_.lower_bound(*keyframe_list_.begin());
+
+ if (it != nack_list_.begin()) {
+ // We have found a keyframe that actually is newer than at least one
+ // packet in the nack list.
+ nack_list_.erase(nack_list_.begin(), it);
+ return true;
+ }
+
+ // If this keyframe is so old it does not remove any packets from the list,
+ // remove it from the list of keyframes and try the next keyframe.
+ keyframe_list_.erase(keyframe_list_.begin());
+ }
+ return false;
+}
+
+void NackRequester::AddPacketsToNack(uint16_t seq_num_start,
+ uint16_t seq_num_end) {
+ // Called on worker_thread_.
+ // Remove old packets.
+ auto it = nack_list_.lower_bound(seq_num_end - kMaxPacketAge);
+ nack_list_.erase(nack_list_.begin(), it);
+
+ // If the nack list is too large, remove packets from the nack list until
+ // the latest first packet of a keyframe. If the list is still too large,
+ // clear it and request a keyframe.
+ uint16_t num_new_nacks = ForwardDiff(seq_num_start, seq_num_end);
+ if (nack_list_.size() + num_new_nacks > kMaxNackPackets) {
+ while (RemovePacketsUntilKeyFrame() &&
+ nack_list_.size() + num_new_nacks > kMaxNackPackets) {
+ }
+
+ if (nack_list_.size() + num_new_nacks > kMaxNackPackets) {
+ nack_list_.clear();
+ RTC_LOG(LS_WARNING) << "NACK list full, clearing NACK"
+ " list and requesting keyframe.";
+ keyframe_request_sender_->RequestKeyFrame();
+ return;
+ }
+ }
+
+ for (uint16_t seq_num = seq_num_start; seq_num != seq_num_end; ++seq_num) {
+ // Do not send nack for packets that are already recovered by FEC or RTX
+ if (recovered_list_.find(seq_num) != recovered_list_.end())
+ continue;
+ NackInfo nack_info(seq_num, seq_num + WaitNumberOfPackets(0.5),
+ clock_->CurrentTime());
+ RTC_DCHECK(nack_list_.find(seq_num) == nack_list_.end());
+ nack_list_[seq_num] = nack_info;
+ }
+}
+
+std::vector<uint16_t> NackRequester::GetNackBatch(NackFilterOptions options) {
+ // Called on worker_thread_.
+
+ bool consider_seq_num = options != kTimeOnly;
+ bool consider_timestamp = options != kSeqNumOnly;
+ Timestamp now = clock_->CurrentTime();
+ std::vector<uint16_t> nack_batch;
+ auto it = nack_list_.begin();
+ while (it != nack_list_.end()) {
+ bool delay_timed_out = now - it->second.created_at_time >= send_nack_delay_;
+ bool nack_on_rtt_passed = now - it->second.sent_at_time >= rtt_;
+ bool nack_on_seq_num_passed =
+ it->second.sent_at_time.IsInfinite() &&
+ AheadOrAt(newest_seq_num_, it->second.send_at_seq_num);
+ if (delay_timed_out && ((consider_seq_num && nack_on_seq_num_passed) ||
+ (consider_timestamp && nack_on_rtt_passed))) {
+ nack_batch.emplace_back(it->second.seq_num);
+ ++it->second.retries;
+ it->second.sent_at_time = now;
+ if (it->second.retries >= kMaxNackRetries) {
+ RTC_LOG(LS_WARNING) << "Sequence number " << it->second.seq_num
+ << " removed from NACK list due to max retries.";
+ it = nack_list_.erase(it);
+ } else {
+ ++it;
+ }
+ continue;
+ }
+ ++it;
+ }
+ return nack_batch;
+}
+
+void NackRequester::UpdateReorderingStatistics(uint16_t seq_num) {
+ // Running on worker_thread_.
+ RTC_DCHECK(AheadOf(newest_seq_num_, seq_num));
+ uint16_t diff = ReverseDiff(newest_seq_num_, seq_num);
+ reordering_histogram_.Add(diff);
+}
+
+int NackRequester::WaitNumberOfPackets(float probability) const {
+ // Called on worker_thread_;
+ if (reordering_histogram_.NumValues() == 0)
+ return 0;
+ return reordering_histogram_.InverseCdf(probability);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/nack_requester.h b/third_party/libwebrtc/modules/video_coding/nack_requester.h
new file mode 100644
index 0000000000..c860787dcf
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/nack_requester.h
@@ -0,0 +1,157 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_NACK_REQUESTER_H_
+#define MODULES_VIDEO_CODING_NACK_REQUESTER_H_
+
+#include <stdint.h>
+
+#include <map>
+#include <set>
+#include <vector>
+
+#include "api/field_trials_view.h"
+#include "api/sequence_checker.h"
+#include "api/task_queue/pending_task_safety_flag.h"
+#include "api/task_queue/task_queue_base.h"
+#include "api/units/time_delta.h"
+#include "api/units/timestamp.h"
+#include "modules/include/module_common_types.h"
+#include "modules/video_coding/histogram.h"
+#include "rtc_base/numerics/sequence_number_util.h"
+#include "rtc_base/task_utils/repeating_task.h"
+#include "rtc_base/thread_annotations.h"
+#include "system_wrappers/include/clock.h"
+
+namespace webrtc {
+
+class NackRequesterBase {
+ public:
+ virtual ~NackRequesterBase() = default;
+ virtual void ProcessNacks() = 0;
+};
+
+class NackPeriodicProcessor {
+ public:
+ static constexpr TimeDelta kUpdateInterval = TimeDelta::Millis(20);
+ explicit NackPeriodicProcessor(TimeDelta update_interval = kUpdateInterval);
+ ~NackPeriodicProcessor();
+ void RegisterNackModule(NackRequesterBase* module);
+ void UnregisterNackModule(NackRequesterBase* module);
+
+ private:
+ void ProcessNackModules() RTC_RUN_ON(sequence_);
+
+ const TimeDelta update_interval_;
+ RepeatingTaskHandle repeating_task_ RTC_GUARDED_BY(sequence_);
+ std::vector<NackRequesterBase*> modules_ RTC_GUARDED_BY(sequence_);
+ RTC_NO_UNIQUE_ADDRESS SequenceChecker sequence_;
+};
+
+class ScopedNackPeriodicProcessorRegistration {
+ public:
+ ScopedNackPeriodicProcessorRegistration(NackRequesterBase* module,
+ NackPeriodicProcessor* processor);
+ ~ScopedNackPeriodicProcessorRegistration();
+
+ private:
+ NackRequesterBase* const module_;
+ NackPeriodicProcessor* const processor_;
+};
+
+class NackRequester final : public NackRequesterBase {
+ public:
+ NackRequester(TaskQueueBase* current_queue,
+ NackPeriodicProcessor* periodic_processor,
+ Clock* clock,
+ NackSender* nack_sender,
+ KeyFrameRequestSender* keyframe_request_sender,
+ const FieldTrialsView& field_trials);
+ ~NackRequester();
+
+ void ProcessNacks() override;
+
+ int OnReceivedPacket(uint16_t seq_num, bool is_keyframe);
+ int OnReceivedPacket(uint16_t seq_num, bool is_keyframe, bool is_recovered);
+
+ void ClearUpTo(uint16_t seq_num);
+ void UpdateRtt(int64_t rtt_ms);
+
+ private:
+ // Which fields to consider when deciding which packet to nack in
+ // GetNackBatch.
+ enum NackFilterOptions { kSeqNumOnly, kTimeOnly, kSeqNumAndTime };
+
+ // This class holds the sequence number of the packet that is in the nack list
+ // as well as the meta data about when it should be nacked and how many times
+ // we have tried to nack this packet.
+ struct NackInfo {
+ NackInfo();
+ NackInfo(uint16_t seq_num,
+ uint16_t send_at_seq_num,
+ Timestamp created_at_time);
+
+ uint16_t seq_num;
+ uint16_t send_at_seq_num;
+ Timestamp created_at_time;
+ Timestamp sent_at_time;
+ int retries;
+ };
+
+ void AddPacketsToNack(uint16_t seq_num_start, uint16_t seq_num_end)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(worker_thread_);
+
+ // Removes packets from the nack list until the next keyframe. Returns true
+ // if packets were removed.
+ bool RemovePacketsUntilKeyFrame()
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(worker_thread_);
+ std::vector<uint16_t> GetNackBatch(NackFilterOptions options)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(worker_thread_);
+
+ // Update the reordering distribution.
+ void UpdateReorderingStatistics(uint16_t seq_num)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(worker_thread_);
+
+ // Returns how many packets we have to wait in order to receive the packet
+ // with probability `probabilty` or higher.
+ int WaitNumberOfPackets(float probability) const
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(worker_thread_);
+
+ TaskQueueBase* const worker_thread_;
+ Clock* const clock_;
+ NackSender* const nack_sender_;
+ KeyFrameRequestSender* const keyframe_request_sender_;
+
+ // TODO(philipel): Some of the variables below are consistently used on a
+ // known thread (e.g. see `initialized_`). Those probably do not need
+ // synchronized access.
+ std::map<uint16_t, NackInfo, DescendingSeqNumComp<uint16_t>> nack_list_
+ RTC_GUARDED_BY(worker_thread_);
+ std::set<uint16_t, DescendingSeqNumComp<uint16_t>> keyframe_list_
+ RTC_GUARDED_BY(worker_thread_);
+ std::set<uint16_t, DescendingSeqNumComp<uint16_t>> recovered_list_
+ RTC_GUARDED_BY(worker_thread_);
+ video_coding::Histogram reordering_histogram_ RTC_GUARDED_BY(worker_thread_);
+ bool initialized_ RTC_GUARDED_BY(worker_thread_);
+ TimeDelta rtt_ RTC_GUARDED_BY(worker_thread_);
+ uint16_t newest_seq_num_ RTC_GUARDED_BY(worker_thread_);
+
+ // Adds a delay before send nack on packet received.
+ const TimeDelta send_nack_delay_;
+
+ ScopedNackPeriodicProcessorRegistration processor_registration_;
+
+ // Used to signal destruction to potentially pending tasks.
+ ScopedTaskSafety task_safety_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_NACK_REQUESTER_H_
diff --git a/third_party/libwebrtc/modules/video_coding/nack_requester_gn/moz.build b/third_party/libwebrtc/modules/video_coding/nack_requester_gn/moz.build
new file mode 100644
index 0000000000..33e763a48d
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/nack_requester_gn/moz.build
@@ -0,0 +1,233 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+DEFINES["WEBRTC_STRICT_FIELD_TRIALS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "!/third_party/libwebrtc/gen",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/video_coding/histogram.cc",
+ "/third_party/libwebrtc/modules/video_coding/nack_requester.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "crypt32",
+ "iphlpapi",
+ "secur32",
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "mips32":
+
+ DEFINES["MIPS32_LE"] = True
+ DEFINES["MIPS_FPU_LE"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "mips64":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("nack_requester_gn")
diff --git a/third_party/libwebrtc/modules/video_coding/nack_requester_unittest.cc b/third_party/libwebrtc/modules/video_coding/nack_requester_unittest.cc
new file mode 100644
index 0000000000..6f11cb6e91
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/nack_requester_unittest.cc
@@ -0,0 +1,402 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/nack_requester.h"
+
+#include <algorithm>
+#include <cstdint>
+#include <cstring>
+#include <memory>
+
+#include "system_wrappers/include/clock.h"
+#include "test/gtest.h"
+#include "test/run_loop.h"
+#include "test/scoped_key_value_config.h"
+
+namespace webrtc {
+// TODO(bugs.webrtc.org/11594): Use the use the GlobalSimulatedTimeController
+// instead of RunLoop. At the moment we mix use of the Clock and the underlying
+// implementation of RunLoop, which is realtime.
+class TestNackRequester : public ::testing::Test,
+ public NackSender,
+ public KeyFrameRequestSender {
+ protected:
+ TestNackRequester()
+ : clock_(new SimulatedClock(0)), keyframes_requested_(0) {}
+
+ void SetUp() override {}
+
+ void SendNack(const std::vector<uint16_t>& sequence_numbers,
+ bool buffering_allowed) override {
+ sent_nacks_.insert(sent_nacks_.end(), sequence_numbers.begin(),
+ sequence_numbers.end());
+ if (waiting_for_send_nack_) {
+ waiting_for_send_nack_ = false;
+ loop_.Quit();
+ }
+ }
+
+ void RequestKeyFrame() override { ++keyframes_requested_; }
+
+ void Flush() {
+ // nack_module.Process();
+ loop_.Flush();
+ }
+
+ bool WaitForSendNack() {
+ if (timed_out_) {
+ RTC_DCHECK_NOTREACHED();
+ return false;
+ }
+
+ RTC_DCHECK(!waiting_for_send_nack_);
+
+ waiting_for_send_nack_ = true;
+ loop_.task_queue()->PostDelayedTask(
+ [this]() {
+ timed_out_ = true;
+ loop_.Quit();
+ },
+ TimeDelta::Seconds(1));
+
+ loop_.Run();
+
+ if (timed_out_)
+ return false;
+
+ RTC_DCHECK(!waiting_for_send_nack_);
+ return true;
+ }
+
+ NackRequester& CreateNackModule(
+ TimeDelta interval = NackPeriodicProcessor::kUpdateInterval) {
+ RTC_DCHECK(!nack_module_.get());
+ nack_periodic_processor_ =
+ std::make_unique<NackPeriodicProcessor>(interval);
+ test::ScopedKeyValueConfig empty_field_trials_;
+ nack_module_ = std::make_unique<NackRequester>(
+ TaskQueueBase::Current(), nack_periodic_processor_.get(), clock_.get(),
+ this, this, empty_field_trials_);
+ nack_module_->UpdateRtt(kDefaultRttMs);
+ return *nack_module_.get();
+ }
+
+ static constexpr int64_t kDefaultRttMs = 20;
+ rtc::AutoThread main_thread_;
+ test::RunLoop loop_;
+ std::unique_ptr<SimulatedClock> clock_;
+ std::unique_ptr<NackPeriodicProcessor> nack_periodic_processor_;
+ std::unique_ptr<NackRequester> nack_module_;
+ std::vector<uint16_t> sent_nacks_;
+ int keyframes_requested_;
+ bool waiting_for_send_nack_ = false;
+ bool timed_out_ = false;
+};
+
+TEST_F(TestNackRequester, NackOnePacket) {
+ NackRequester& nack_module = CreateNackModule();
+ nack_module.OnReceivedPacket(1, false, false);
+ nack_module.OnReceivedPacket(3, false, false);
+ ASSERT_EQ(1u, sent_nacks_.size());
+ EXPECT_EQ(2, sent_nacks_[0]);
+}
+
+TEST_F(TestNackRequester, WrappingSeqNum) {
+ NackRequester& nack_module = CreateNackModule();
+ nack_module.OnReceivedPacket(0xfffe, false, false);
+ nack_module.OnReceivedPacket(1, false, false);
+ ASSERT_EQ(2u, sent_nacks_.size());
+ EXPECT_EQ(0xffff, sent_nacks_[0]);
+ EXPECT_EQ(0, sent_nacks_[1]);
+}
+
+TEST_F(TestNackRequester, WrappingSeqNumClearToKeyframe) {
+ NackRequester& nack_module = CreateNackModule(TimeDelta::Millis(10));
+ nack_module.OnReceivedPacket(0xfffe, false, false);
+ nack_module.OnReceivedPacket(1, false, false);
+ ASSERT_EQ(2u, sent_nacks_.size());
+ EXPECT_EQ(0xffff, sent_nacks_[0]);
+ EXPECT_EQ(0, sent_nacks_[1]);
+
+ sent_nacks_.clear();
+ nack_module.OnReceivedPacket(2, true, false);
+ ASSERT_EQ(0u, sent_nacks_.size());
+
+ nack_module.OnReceivedPacket(501, true, false);
+ ASSERT_EQ(498u, sent_nacks_.size());
+ for (int seq_num = 3; seq_num < 501; ++seq_num)
+ EXPECT_EQ(seq_num, sent_nacks_[seq_num - 3]);
+
+ sent_nacks_.clear();
+ nack_module.OnReceivedPacket(1001, false, false);
+ EXPECT_EQ(499u, sent_nacks_.size());
+ for (int seq_num = 502; seq_num < 1001; ++seq_num)
+ EXPECT_EQ(seq_num, sent_nacks_[seq_num - 502]);
+
+ sent_nacks_.clear();
+ clock_->AdvanceTimeMilliseconds(100);
+ ASSERT_TRUE(WaitForSendNack());
+ ASSERT_EQ(999u, sent_nacks_.size());
+ EXPECT_EQ(0xffff, sent_nacks_[0]);
+ EXPECT_EQ(0, sent_nacks_[1]);
+ for (int seq_num = 3; seq_num < 501; ++seq_num)
+ EXPECT_EQ(seq_num, sent_nacks_[seq_num - 1]);
+ for (int seq_num = 502; seq_num < 1001; ++seq_num)
+ EXPECT_EQ(seq_num, sent_nacks_[seq_num - 2]);
+
+ // Adding packet 1004 will cause the nack list to reach it's max limit.
+ // It will then clear all nacks up to the next keyframe (seq num 2),
+ // thus removing 0xffff and 0 from the nack list.
+ sent_nacks_.clear();
+ nack_module.OnReceivedPacket(1004, false, false);
+ ASSERT_EQ(2u, sent_nacks_.size());
+ EXPECT_EQ(1002, sent_nacks_[0]);
+ EXPECT_EQ(1003, sent_nacks_[1]);
+
+ sent_nacks_.clear();
+ clock_->AdvanceTimeMilliseconds(100);
+ ASSERT_TRUE(WaitForSendNack());
+ ASSERT_EQ(999u, sent_nacks_.size());
+ for (int seq_num = 3; seq_num < 501; ++seq_num)
+ EXPECT_EQ(seq_num, sent_nacks_[seq_num - 3]);
+ for (int seq_num = 502; seq_num < 1001; ++seq_num)
+ EXPECT_EQ(seq_num, sent_nacks_[seq_num - 4]);
+
+ // Adding packet 1007 will cause the nack module to overflow again, thus
+ // clearing everything up to 501 which is the next keyframe.
+ nack_module.OnReceivedPacket(1007, false, false);
+ sent_nacks_.clear();
+ clock_->AdvanceTimeMilliseconds(100);
+ ASSERT_TRUE(WaitForSendNack());
+ ASSERT_EQ(503u, sent_nacks_.size());
+ for (int seq_num = 502; seq_num < 1001; ++seq_num)
+ EXPECT_EQ(seq_num, sent_nacks_[seq_num - 502]);
+ EXPECT_EQ(1005, sent_nacks_[501]);
+ EXPECT_EQ(1006, sent_nacks_[502]);
+}
+
+TEST_F(TestNackRequester, ResendNack) {
+ NackRequester& nack_module = CreateNackModule(TimeDelta::Millis(1));
+ nack_module.OnReceivedPacket(1, false, false);
+ nack_module.OnReceivedPacket(3, false, false);
+ size_t expected_nacks_sent = 1;
+ ASSERT_EQ(expected_nacks_sent, sent_nacks_.size());
+ EXPECT_EQ(2, sent_nacks_[0]);
+
+ nack_module.UpdateRtt(1);
+ clock_->AdvanceTimeMilliseconds(1);
+ WaitForSendNack(); // Fast retransmit allowed.
+ EXPECT_EQ(++expected_nacks_sent, sent_nacks_.size());
+
+ // Each try has to wait rtt by default.
+ for (int i = 2; i < 10; ++i) {
+ // Change RTT, above the 40ms max for exponential backoff.
+ TimeDelta rtt = TimeDelta::Millis(160); // + (i * 10 - 40)
+ nack_module.UpdateRtt(rtt.ms());
+
+ // RTT gets capped at 160ms in backoff calculations.
+ TimeDelta expected_backoff_delay =
+ (i - 1) * std::min(rtt, TimeDelta::Millis(160));
+
+ // Move to one millisecond before next allowed NACK.
+ clock_->AdvanceTimeMilliseconds(expected_backoff_delay.ms() - 1);
+ Flush();
+ EXPECT_EQ(expected_nacks_sent, sent_nacks_.size());
+
+ // Move to one millisecond after next allowed NACK.
+ // After rather than on to avoid rounding errors.
+ clock_->AdvanceTimeMilliseconds(2);
+ WaitForSendNack(); // Now allowed.
+ EXPECT_EQ(++expected_nacks_sent, sent_nacks_.size());
+ }
+
+ // Giving up after 10 tries.
+ clock_->AdvanceTimeMilliseconds(3000);
+ Flush();
+ EXPECT_EQ(expected_nacks_sent, sent_nacks_.size());
+}
+
+TEST_F(TestNackRequester, ResendPacketMaxRetries) {
+ NackRequester& nack_module = CreateNackModule(TimeDelta::Millis(1));
+ nack_module.OnReceivedPacket(1, false, false);
+ nack_module.OnReceivedPacket(3, false, false);
+ ASSERT_EQ(1u, sent_nacks_.size());
+ EXPECT_EQ(2, sent_nacks_[0]);
+
+ int backoff_factor = 1;
+ for (size_t retries = 1; retries < 10; ++retries) {
+ // Exponential backoff, so that we don't reject NACK because of time.
+ clock_->AdvanceTimeMilliseconds(backoff_factor * kDefaultRttMs);
+ backoff_factor *= 2;
+ WaitForSendNack();
+ EXPECT_EQ(retries + 1, sent_nacks_.size());
+ }
+
+ clock_->AdvanceTimeMilliseconds(backoff_factor * kDefaultRttMs);
+ Flush();
+ EXPECT_EQ(10u, sent_nacks_.size());
+}
+
+TEST_F(TestNackRequester, TooLargeNackList) {
+ NackRequester& nack_module = CreateNackModule();
+ nack_module.OnReceivedPacket(0, false, false);
+ nack_module.OnReceivedPacket(1001, false, false);
+ EXPECT_EQ(1000u, sent_nacks_.size());
+ EXPECT_EQ(0, keyframes_requested_);
+ nack_module.OnReceivedPacket(1003, false, false);
+ EXPECT_EQ(1000u, sent_nacks_.size());
+ EXPECT_EQ(1, keyframes_requested_);
+ nack_module.OnReceivedPacket(1004, false, false);
+ EXPECT_EQ(1000u, sent_nacks_.size());
+ EXPECT_EQ(1, keyframes_requested_);
+}
+
+TEST_F(TestNackRequester, TooLargeNackListWithKeyFrame) {
+ NackRequester& nack_module = CreateNackModule();
+ nack_module.OnReceivedPacket(0, false, false);
+ nack_module.OnReceivedPacket(1, true, false);
+ nack_module.OnReceivedPacket(1001, false, false);
+ EXPECT_EQ(999u, sent_nacks_.size());
+ EXPECT_EQ(0, keyframes_requested_);
+ nack_module.OnReceivedPacket(1003, false, false);
+ EXPECT_EQ(1000u, sent_nacks_.size());
+ EXPECT_EQ(0, keyframes_requested_);
+ nack_module.OnReceivedPacket(1005, false, false);
+ EXPECT_EQ(1000u, sent_nacks_.size());
+ EXPECT_EQ(1, keyframes_requested_);
+}
+
+TEST_F(TestNackRequester, ClearUpTo) {
+ NackRequester& nack_module = CreateNackModule(TimeDelta::Millis(1));
+ nack_module.OnReceivedPacket(0, false, false);
+ nack_module.OnReceivedPacket(100, false, false);
+ EXPECT_EQ(99u, sent_nacks_.size());
+
+ sent_nacks_.clear();
+ clock_->AdvanceTimeMilliseconds(100);
+ nack_module.ClearUpTo(50);
+ WaitForSendNack();
+ ASSERT_EQ(50u, sent_nacks_.size());
+ EXPECT_EQ(50, sent_nacks_[0]);
+}
+
+TEST_F(TestNackRequester, ClearUpToWrap) {
+ NackRequester& nack_module = CreateNackModule();
+ nack_module.OnReceivedPacket(0xfff0, false, false);
+ nack_module.OnReceivedPacket(0xf, false, false);
+ EXPECT_EQ(30u, sent_nacks_.size());
+
+ sent_nacks_.clear();
+ clock_->AdvanceTimeMilliseconds(100);
+ nack_module.ClearUpTo(0);
+ WaitForSendNack();
+ ASSERT_EQ(15u, sent_nacks_.size());
+ EXPECT_EQ(0, sent_nacks_[0]);
+}
+
+TEST_F(TestNackRequester, PacketNackCount) {
+ NackRequester& nack_module = CreateNackModule(TimeDelta::Millis(1));
+ EXPECT_EQ(0, nack_module.OnReceivedPacket(0, false, false));
+ EXPECT_EQ(0, nack_module.OnReceivedPacket(2, false, false));
+ EXPECT_EQ(1, nack_module.OnReceivedPacket(1, false, false));
+
+ sent_nacks_.clear();
+ nack_module.UpdateRtt(100);
+ EXPECT_EQ(0, nack_module.OnReceivedPacket(5, false, false));
+ clock_->AdvanceTimeMilliseconds(100);
+ WaitForSendNack();
+ EXPECT_EQ(4u, sent_nacks_.size());
+
+ clock_->AdvanceTimeMilliseconds(125);
+ WaitForSendNack();
+
+ EXPECT_EQ(6u, sent_nacks_.size());
+
+ EXPECT_EQ(3, nack_module.OnReceivedPacket(3, false, false));
+ EXPECT_EQ(3, nack_module.OnReceivedPacket(4, false, false));
+ EXPECT_EQ(0, nack_module.OnReceivedPacket(4, false, false));
+}
+
+TEST_F(TestNackRequester, NackListFullAndNoOverlapWithKeyframes) {
+ NackRequester& nack_module = CreateNackModule();
+ const int kMaxNackPackets = 1000;
+ const unsigned int kFirstGap = kMaxNackPackets - 20;
+ const unsigned int kSecondGap = 200;
+ uint16_t seq_num = 0;
+ nack_module.OnReceivedPacket(seq_num++, true, false);
+ seq_num += kFirstGap;
+ nack_module.OnReceivedPacket(seq_num++, true, false);
+ EXPECT_EQ(kFirstGap, sent_nacks_.size());
+ sent_nacks_.clear();
+ seq_num += kSecondGap;
+ nack_module.OnReceivedPacket(seq_num, true, false);
+ EXPECT_EQ(kSecondGap, sent_nacks_.size());
+}
+
+TEST_F(TestNackRequester, HandleFecRecoveredPacket) {
+ NackRequester& nack_module = CreateNackModule();
+ nack_module.OnReceivedPacket(1, false, false);
+ nack_module.OnReceivedPacket(4, false, true);
+ EXPECT_EQ(0u, sent_nacks_.size());
+ nack_module.OnReceivedPacket(5, false, false);
+ EXPECT_EQ(2u, sent_nacks_.size());
+}
+
+TEST_F(TestNackRequester, SendNackWithoutDelay) {
+ NackRequester& nack_module = CreateNackModule();
+ nack_module.OnReceivedPacket(0, false, false);
+ nack_module.OnReceivedPacket(100, false, false);
+ EXPECT_EQ(99u, sent_nacks_.size());
+}
+
+class TestNackRequesterWithFieldTrial : public ::testing::Test,
+ public NackSender,
+ public KeyFrameRequestSender {
+ protected:
+ TestNackRequesterWithFieldTrial()
+ : nack_delay_field_trial_("WebRTC-SendNackDelayMs/10/"),
+ clock_(new SimulatedClock(0)),
+ nack_module_(TaskQueueBase::Current(),
+ &nack_periodic_processor_,
+ clock_.get(),
+ this,
+ this,
+ nack_delay_field_trial_),
+ keyframes_requested_(0) {}
+
+ void SendNack(const std::vector<uint16_t>& sequence_numbers,
+ bool buffering_allowed) override {
+ sent_nacks_.insert(sent_nacks_.end(), sequence_numbers.begin(),
+ sequence_numbers.end());
+ }
+
+ void RequestKeyFrame() override { ++keyframes_requested_; }
+
+ test::ScopedKeyValueConfig nack_delay_field_trial_;
+ rtc::AutoThread main_thread_;
+ std::unique_ptr<SimulatedClock> clock_;
+ NackPeriodicProcessor nack_periodic_processor_;
+ NackRequester nack_module_;
+ std::vector<uint16_t> sent_nacks_;
+ int keyframes_requested_;
+};
+
+TEST_F(TestNackRequesterWithFieldTrial, SendNackWithDelay) {
+ nack_module_.OnReceivedPacket(0, false, false);
+ nack_module_.OnReceivedPacket(100, false, false);
+ EXPECT_EQ(0u, sent_nacks_.size());
+ clock_->AdvanceTimeMilliseconds(10);
+ nack_module_.OnReceivedPacket(106, false, false);
+ EXPECT_EQ(99u, sent_nacks_.size());
+ clock_->AdvanceTimeMilliseconds(10);
+ nack_module_.OnReceivedPacket(109, false, false);
+ EXPECT_EQ(104u, sent_nacks_.size());
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/packet.cc b/third_party/libwebrtc/modules/video_coding/packet.cc
new file mode 100644
index 0000000000..f1bac4a305
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/packet.cc
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/packet.h"
+
+#include "api/rtp_headers.h"
+
+namespace webrtc {
+
+VCMPacket::VCMPacket()
+ : payloadType(0),
+ timestamp(0),
+ ntp_time_ms_(0),
+ seqNum(0),
+ dataPtr(NULL),
+ sizeBytes(0),
+ markerBit(false),
+ timesNacked(-1),
+ completeNALU(kNaluUnset),
+ insertStartCode(false),
+ video_header() {
+ video_header.playout_delay = {-1, -1};
+}
+
+VCMPacket::VCMPacket(const uint8_t* ptr,
+ size_t size,
+ const RTPHeader& rtp_header,
+ const RTPVideoHeader& videoHeader,
+ int64_t ntp_time_ms,
+ Timestamp receive_time)
+ : payloadType(rtp_header.payloadType),
+ timestamp(rtp_header.timestamp),
+ ntp_time_ms_(ntp_time_ms),
+ seqNum(rtp_header.sequenceNumber),
+ dataPtr(ptr),
+ sizeBytes(size),
+ markerBit(rtp_header.markerBit),
+ timesNacked(-1),
+ completeNALU(kNaluIncomplete),
+ insertStartCode(videoHeader.codec == kVideoCodecH264 &&
+ videoHeader.is_first_packet_in_frame),
+ video_header(videoHeader),
+ packet_info(rtp_header, receive_time) {
+ if (is_first_packet_in_frame() && markerBit) {
+ completeNALU = kNaluComplete;
+ } else if (is_first_packet_in_frame()) {
+ completeNALU = kNaluStart;
+ } else if (markerBit) {
+ completeNALU = kNaluEnd;
+ } else {
+ completeNALU = kNaluIncomplete;
+ }
+
+ // Playout decisions are made entirely based on first packet in a frame.
+ if (!is_first_packet_in_frame()) {
+ video_header.playout_delay = {-1, -1};
+ }
+}
+
+VCMPacket::~VCMPacket() = default;
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/packet.h b/third_party/libwebrtc/modules/video_coding/packet.h
new file mode 100644
index 0000000000..9aa2d5ce08
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/packet.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_PACKET_H_
+#define MODULES_VIDEO_CODING_PACKET_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "absl/types/optional.h"
+#include "api/rtp_headers.h"
+#include "api/rtp_packet_info.h"
+#include "api/units/timestamp.h"
+#include "api/video/video_frame_type.h"
+#include "modules/rtp_rtcp/source/rtp_generic_frame_descriptor.h"
+#include "modules/rtp_rtcp/source/rtp_video_header.h"
+
+namespace webrtc {
+
+// Used to indicate if a received packet contain a complete NALU (or equivalent)
+enum VCMNaluCompleteness {
+ kNaluUnset = 0, // Packet has not been filled.
+ kNaluComplete = 1, // Packet can be decoded as is.
+ kNaluStart, // Packet contain beginning of NALU
+ kNaluIncomplete, // Packet is not beginning or end of NALU
+ kNaluEnd, // Packet is the end of a NALU
+};
+
+class VCMPacket {
+ public:
+ VCMPacket();
+
+ VCMPacket(const uint8_t* ptr,
+ size_t size,
+ const RTPHeader& rtp_header,
+ const RTPVideoHeader& video_header,
+ int64_t ntp_time_ms,
+ Timestamp receive_time);
+
+ ~VCMPacket();
+
+ VideoCodecType codec() const { return video_header.codec; }
+ int width() const { return video_header.width; }
+ int height() const { return video_header.height; }
+
+ bool is_first_packet_in_frame() const {
+ return video_header.is_first_packet_in_frame;
+ }
+ bool is_last_packet_in_frame() const {
+ return video_header.is_last_packet_in_frame;
+ }
+
+ uint8_t payloadType;
+ uint32_t timestamp;
+ // NTP time of the capture time in local timebase in milliseconds.
+ int64_t ntp_time_ms_;
+ uint16_t seqNum;
+ const uint8_t* dataPtr;
+ size_t sizeBytes;
+ bool markerBit;
+ int timesNacked;
+
+ VCMNaluCompleteness completeNALU; // Default is kNaluIncomplete.
+ bool insertStartCode; // True if a start code should be inserted before this
+ // packet.
+ RTPVideoHeader video_header;
+ absl::optional<RtpGenericFrameDescriptor> generic_descriptor;
+
+ RtpPacketInfo packet_info;
+};
+
+} // namespace webrtc
+#endif // MODULES_VIDEO_CODING_PACKET_H_
diff --git a/third_party/libwebrtc/modules/video_coding/packet_buffer.cc b/third_party/libwebrtc/modules/video_coding/packet_buffer.cc
new file mode 100644
index 0000000000..04f02fce97
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/packet_buffer.cc
@@ -0,0 +1,422 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/packet_buffer.h"
+
+#include <string.h>
+
+#include <algorithm>
+#include <cstdint>
+#include <limits>
+#include <utility>
+#include <vector>
+
+#include "absl/types/variant.h"
+#include "api/array_view.h"
+#include "api/rtp_packet_info.h"
+#include "api/video/video_frame_type.h"
+#include "common_video/h264/h264_common.h"
+#include "modules/rtp_rtcp/source/rtp_header_extensions.h"
+#include "modules/rtp_rtcp/source/rtp_packet_received.h"
+#include "modules/rtp_rtcp/source/rtp_video_header.h"
+#include "modules/video_coding/codecs/h264/include/h264_globals.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/numerics/mod_ops.h"
+
+namespace webrtc {
+namespace video_coding {
+
+PacketBuffer::Packet::Packet(const RtpPacketReceived& rtp_packet,
+ const RTPVideoHeader& video_header)
+ : marker_bit(rtp_packet.Marker()),
+ payload_type(rtp_packet.PayloadType()),
+ seq_num(rtp_packet.SequenceNumber()),
+ timestamp(rtp_packet.Timestamp()),
+ times_nacked(-1),
+ video_header(video_header) {}
+
+PacketBuffer::PacketBuffer(size_t start_buffer_size, size_t max_buffer_size)
+ : max_size_(max_buffer_size),
+ first_seq_num_(0),
+ first_packet_received_(false),
+ is_cleared_to_first_seq_num_(false),
+ buffer_(start_buffer_size),
+ sps_pps_idr_is_h264_keyframe_(false) {
+ RTC_DCHECK_LE(start_buffer_size, max_buffer_size);
+ // Buffer size must always be a power of 2.
+ RTC_DCHECK((start_buffer_size & (start_buffer_size - 1)) == 0);
+ RTC_DCHECK((max_buffer_size & (max_buffer_size - 1)) == 0);
+}
+
+PacketBuffer::~PacketBuffer() {
+ Clear();
+}
+
+PacketBuffer::InsertResult PacketBuffer::InsertPacket(
+ std::unique_ptr<PacketBuffer::Packet> packet) {
+ PacketBuffer::InsertResult result;
+
+ uint16_t seq_num = packet->seq_num;
+ size_t index = seq_num % buffer_.size();
+
+ if (!first_packet_received_) {
+ first_seq_num_ = seq_num;
+ first_packet_received_ = true;
+ } else if (AheadOf(first_seq_num_, seq_num)) {
+ // If we have explicitly cleared past this packet then it's old,
+ // don't insert it, just silently ignore it.
+ if (is_cleared_to_first_seq_num_) {
+ return result;
+ }
+
+ first_seq_num_ = seq_num;
+ }
+
+ if (buffer_[index] != nullptr) {
+ // Duplicate packet, just delete the payload.
+ if (buffer_[index]->seq_num == packet->seq_num) {
+ return result;
+ }
+
+ // The packet buffer is full, try to expand the buffer.
+ while (ExpandBufferSize() && buffer_[seq_num % buffer_.size()] != nullptr) {
+ }
+ index = seq_num % buffer_.size();
+
+ // Packet buffer is still full since we were unable to expand the buffer.
+ if (buffer_[index] != nullptr) {
+ // Clear the buffer, delete payload, and return false to signal that a
+ // new keyframe is needed.
+ RTC_LOG(LS_WARNING) << "Clear PacketBuffer and request key frame.";
+ ClearInternal();
+ result.buffer_cleared = true;
+ return result;
+ }
+ }
+
+ packet->continuous = false;
+ buffer_[index] = std::move(packet);
+
+ UpdateMissingPackets(seq_num);
+
+ received_padding_.erase(
+ received_padding_.begin(),
+ received_padding_.lower_bound(seq_num - (buffer_.size() / 4)));
+
+ result.packets = FindFrames(seq_num);
+ return result;
+}
+
+uint32_t PacketBuffer::ClearTo(uint16_t seq_num) {
+ // We have already cleared past this sequence number, no need to do anything.
+ if (is_cleared_to_first_seq_num_ &&
+ AheadOf<uint16_t>(first_seq_num_, seq_num)) {
+ return 0;
+ }
+
+ // If the packet buffer was cleared between a frame was created and returned.
+ if (!first_packet_received_)
+ return 0;
+
+ // Avoid iterating over the buffer more than once by capping the number of
+ // iterations to the `size_` of the buffer.
+ ++seq_num;
+ uint32_t num_cleared_packets = 0;
+ size_t diff = ForwardDiff<uint16_t>(first_seq_num_, seq_num);
+ size_t iterations = std::min(diff, buffer_.size());
+ for (size_t i = 0; i < iterations; ++i) {
+ auto& stored = buffer_[first_seq_num_ % buffer_.size()];
+ if (stored != nullptr && AheadOf<uint16_t>(seq_num, stored->seq_num)) {
+ ++num_cleared_packets;
+ stored = nullptr;
+ }
+ ++first_seq_num_;
+ }
+
+ // If `diff` is larger than `iterations` it means that we don't increment
+ // `first_seq_num_` until we reach `seq_num`, so we set it here.
+ first_seq_num_ = seq_num;
+
+ is_cleared_to_first_seq_num_ = true;
+ missing_packets_.erase(missing_packets_.begin(),
+ missing_packets_.lower_bound(seq_num));
+
+ received_padding_.erase(received_padding_.begin(),
+ received_padding_.lower_bound(seq_num));
+
+ return num_cleared_packets;
+}
+
+void PacketBuffer::Clear() {
+ ClearInternal();
+}
+
+PacketBuffer::InsertResult PacketBuffer::InsertPadding(uint16_t seq_num) {
+ PacketBuffer::InsertResult result;
+ UpdateMissingPackets(seq_num);
+ received_padding_.insert(seq_num);
+ result.packets = FindFrames(static_cast<uint16_t>(seq_num + 1));
+ return result;
+}
+
+void PacketBuffer::ForceSpsPpsIdrIsH264Keyframe() {
+ sps_pps_idr_is_h264_keyframe_ = true;
+}
+
+void PacketBuffer::ResetSpsPpsIdrIsH264Keyframe() {
+ sps_pps_idr_is_h264_keyframe_ = false;
+}
+
+void PacketBuffer::ClearInternal() {
+ for (auto& entry : buffer_) {
+ entry = nullptr;
+ }
+
+ first_packet_received_ = false;
+ is_cleared_to_first_seq_num_ = false;
+ newest_inserted_seq_num_.reset();
+ missing_packets_.clear();
+ received_padding_.clear();
+}
+
+bool PacketBuffer::ExpandBufferSize() {
+ if (buffer_.size() == max_size_) {
+ RTC_LOG(LS_WARNING) << "PacketBuffer is already at max size (" << max_size_
+ << "), failed to increase size.";
+ return false;
+ }
+
+ size_t new_size = std::min(max_size_, 2 * buffer_.size());
+ std::vector<std::unique_ptr<Packet>> new_buffer(new_size);
+ for (std::unique_ptr<Packet>& entry : buffer_) {
+ if (entry != nullptr) {
+ new_buffer[entry->seq_num % new_size] = std::move(entry);
+ }
+ }
+ buffer_ = std::move(new_buffer);
+ RTC_LOG(LS_INFO) << "PacketBuffer size expanded to " << new_size;
+ return true;
+}
+
+bool PacketBuffer::PotentialNewFrame(uint16_t seq_num) const {
+ size_t index = seq_num % buffer_.size();
+ int prev_index = index > 0 ? index - 1 : buffer_.size() - 1;
+ const auto& entry = buffer_[index];
+ const auto& prev_entry = buffer_[prev_index];
+
+ if (entry == nullptr)
+ return false;
+ if (entry->seq_num != seq_num)
+ return false;
+ if (entry->is_first_packet_in_frame())
+ return true;
+ if (prev_entry == nullptr)
+ return false;
+ if (prev_entry->seq_num != static_cast<uint16_t>(entry->seq_num - 1))
+ return false;
+ if (prev_entry->timestamp != entry->timestamp)
+ return false;
+ if (prev_entry->continuous)
+ return true;
+
+ return false;
+}
+
+std::vector<std::unique_ptr<PacketBuffer::Packet>> PacketBuffer::FindFrames(
+ uint16_t seq_num) {
+ std::vector<std::unique_ptr<PacketBuffer::Packet>> found_frames;
+ auto start = seq_num;
+
+ for (size_t i = 0; i < buffer_.size(); ++i) {
+ if (received_padding_.find(seq_num) != received_padding_.end()) {
+ seq_num += 1;
+ continue;
+ }
+
+ if (!PotentialNewFrame(seq_num)) {
+ break;
+ }
+
+ size_t index = seq_num % buffer_.size();
+ buffer_[index]->continuous = true;
+
+ // If all packets of the frame is continuous, find the first packet of the
+ // frame and add all packets of the frame to the returned packets.
+ if (buffer_[index]->is_last_packet_in_frame()) {
+ uint16_t start_seq_num = seq_num;
+
+ // Find the start index by searching backward until the packet with
+ // the `frame_begin` flag is set.
+ int start_index = index;
+ size_t tested_packets = 0;
+ int64_t frame_timestamp = buffer_[start_index]->timestamp;
+
+ // Identify H.264 keyframes by means of SPS, PPS, and IDR.
+ bool is_h264 = buffer_[start_index]->codec() == kVideoCodecH264;
+ bool has_h264_sps = false;
+ bool has_h264_pps = false;
+ bool has_h264_idr = false;
+ bool is_h264_keyframe = false;
+ int idr_width = -1;
+ int idr_height = -1;
+ bool full_frame_found = false;
+ while (true) {
+ ++tested_packets;
+
+ if (!is_h264) {
+ if (buffer_[start_index] == nullptr ||
+ buffer_[start_index]->is_first_packet_in_frame()) {
+ full_frame_found = buffer_[start_index] != nullptr;
+ break;
+ }
+ }
+
+ if (is_h264) {
+ const auto* h264_header = absl::get_if<RTPVideoHeaderH264>(
+ &buffer_[start_index]->video_header.video_type_header);
+ if (!h264_header || h264_header->nalus_length >= kMaxNalusPerPacket)
+ return found_frames;
+
+ for (size_t j = 0; j < h264_header->nalus_length; ++j) {
+ if (h264_header->nalus[j].type == H264::NaluType::kSps) {
+ has_h264_sps = true;
+ } else if (h264_header->nalus[j].type == H264::NaluType::kPps) {
+ has_h264_pps = true;
+ } else if (h264_header->nalus[j].type == H264::NaluType::kIdr) {
+ has_h264_idr = true;
+ }
+ }
+ if ((sps_pps_idr_is_h264_keyframe_ && has_h264_idr && has_h264_sps &&
+ has_h264_pps) ||
+ (!sps_pps_idr_is_h264_keyframe_ && has_h264_idr)) {
+ is_h264_keyframe = true;
+ // Store the resolution of key frame which is the packet with
+ // smallest index and valid resolution; typically its IDR or SPS
+ // packet; there may be packet preceeding this packet, IDR's
+ // resolution will be applied to them.
+ if (buffer_[start_index]->width() > 0 &&
+ buffer_[start_index]->height() > 0) {
+ idr_width = buffer_[start_index]->width();
+ idr_height = buffer_[start_index]->height();
+ }
+ }
+ }
+
+ if (tested_packets == buffer_.size())
+ break;
+
+ start_index = start_index > 0 ? start_index - 1 : buffer_.size() - 1;
+
+ // In the case of H264 we don't have a frame_begin bit (yes,
+ // `frame_begin` might be set to true but that is a lie). So instead
+ // we traverese backwards as long as we have a previous packet and
+ // the timestamp of that packet is the same as this one. This may cause
+ // the PacketBuffer to hand out incomplete frames.
+ // See: https://bugs.chromium.org/p/webrtc/issues/detail?id=7106
+ if (is_h264 && (buffer_[start_index] == nullptr ||
+ buffer_[start_index]->timestamp != frame_timestamp)) {
+ break;
+ }
+
+ --start_seq_num;
+ }
+
+ if (is_h264) {
+ // Warn if this is an unsafe frame.
+ if (has_h264_idr && (!has_h264_sps || !has_h264_pps)) {
+ RTC_LOG(LS_WARNING)
+ << "Received H.264-IDR frame "
+ "(SPS: "
+ << has_h264_sps << ", PPS: " << has_h264_pps << "). Treating as "
+ << (sps_pps_idr_is_h264_keyframe_ ? "delta" : "key")
+ << " frame since WebRTC-SpsPpsIdrIsH264Keyframe is "
+ << (sps_pps_idr_is_h264_keyframe_ ? "enabled." : "disabled");
+ }
+
+ // Now that we have decided whether to treat this frame as a key frame
+ // or delta frame in the frame buffer, we update the field that
+ // determines if the RtpFrameObject is a key frame or delta frame.
+ const size_t first_packet_index = start_seq_num % buffer_.size();
+ if (is_h264_keyframe) {
+ buffer_[first_packet_index]->video_header.frame_type =
+ VideoFrameType::kVideoFrameKey;
+ if (idr_width > 0 && idr_height > 0) {
+ // IDR frame was finalized and we have the correct resolution for
+ // IDR; update first packet to have same resolution as IDR.
+ buffer_[first_packet_index]->video_header.width = idr_width;
+ buffer_[first_packet_index]->video_header.height = idr_height;
+ }
+ } else {
+ buffer_[first_packet_index]->video_header.frame_type =
+ VideoFrameType::kVideoFrameDelta;
+ }
+
+ // If this is not a keyframe, make sure there are no gaps in the packet
+ // sequence numbers up until this point.
+ if (!is_h264_keyframe && missing_packets_.upper_bound(start_seq_num) !=
+ missing_packets_.begin()) {
+ return found_frames;
+ }
+ }
+
+ if (is_h264 || full_frame_found) {
+ const uint16_t end_seq_num = seq_num + 1;
+ // Use uint16_t type to handle sequence number wrap around case.
+ uint16_t num_packets = end_seq_num - start_seq_num;
+ found_frames.reserve(found_frames.size() + num_packets);
+ for (uint16_t i = start_seq_num; i != end_seq_num; ++i) {
+ std::unique_ptr<Packet>& packet = buffer_[i % buffer_.size()];
+ RTC_DCHECK(packet);
+ RTC_DCHECK_EQ(i, packet->seq_num);
+ // Ensure frame boundary flags are properly set.
+ packet->video_header.is_first_packet_in_frame = (i == start_seq_num);
+ packet->video_header.is_last_packet_in_frame = (i == seq_num);
+ found_frames.push_back(std::move(packet));
+ }
+
+ missing_packets_.erase(missing_packets_.begin(),
+ missing_packets_.upper_bound(seq_num));
+ received_padding_.erase(received_padding_.lower_bound(start),
+ received_padding_.upper_bound(seq_num));
+ }
+ }
+ ++seq_num;
+ }
+ return found_frames;
+}
+
+void PacketBuffer::UpdateMissingPackets(uint16_t seq_num) {
+ if (!newest_inserted_seq_num_)
+ newest_inserted_seq_num_ = seq_num;
+
+ const int kMaxPaddingAge = 1000;
+ if (AheadOf(seq_num, *newest_inserted_seq_num_)) {
+ uint16_t old_seq_num = seq_num - kMaxPaddingAge;
+ auto erase_to = missing_packets_.lower_bound(old_seq_num);
+ missing_packets_.erase(missing_packets_.begin(), erase_to);
+
+ // Guard against inserting a large amount of missing packets if there is a
+ // jump in the sequence number.
+ if (AheadOf(old_seq_num, *newest_inserted_seq_num_))
+ *newest_inserted_seq_num_ = old_seq_num;
+
+ ++*newest_inserted_seq_num_;
+ while (AheadOf(seq_num, *newest_inserted_seq_num_)) {
+ missing_packets_.insert(*newest_inserted_seq_num_);
+ ++*newest_inserted_seq_num_;
+ }
+ } else {
+ missing_packets_.erase(seq_num);
+ }
+}
+
+} // namespace video_coding
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/packet_buffer.h b/third_party/libwebrtc/modules/video_coding/packet_buffer.h
new file mode 100644
index 0000000000..47b2ffe199
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/packet_buffer.h
@@ -0,0 +1,134 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_PACKET_BUFFER_H_
+#define MODULES_VIDEO_CODING_PACKET_BUFFER_H_
+
+#include <memory>
+#include <queue>
+#include <set>
+#include <vector>
+
+#include "absl/base/attributes.h"
+#include "api/rtp_packet_info.h"
+#include "api/units/timestamp.h"
+#include "api/video/encoded_image.h"
+#include "modules/rtp_rtcp/source/rtp_packet_received.h"
+#include "modules/rtp_rtcp/source/rtp_video_header.h"
+#include "rtc_base/copy_on_write_buffer.h"
+#include "rtc_base/numerics/sequence_number_util.h"
+#include "rtc_base/thread_annotations.h"
+
+namespace webrtc {
+namespace video_coding {
+
+class PacketBuffer {
+ public:
+ struct Packet {
+ Packet() = default;
+ Packet(const RtpPacketReceived& rtp_packet,
+ const RTPVideoHeader& video_header);
+ Packet(const Packet&) = delete;
+ Packet(Packet&&) = delete;
+ Packet& operator=(const Packet&) = delete;
+ Packet& operator=(Packet&&) = delete;
+ ~Packet() = default;
+
+ VideoCodecType codec() const { return video_header.codec; }
+ int width() const { return video_header.width; }
+ int height() const { return video_header.height; }
+
+ bool is_first_packet_in_frame() const {
+ return video_header.is_first_packet_in_frame;
+ }
+ bool is_last_packet_in_frame() const {
+ return video_header.is_last_packet_in_frame;
+ }
+
+ // If all its previous packets have been inserted into the packet buffer.
+ // Set and used internally by the PacketBuffer.
+ bool continuous = false;
+ bool marker_bit = false;
+ uint8_t payload_type = 0;
+ uint16_t seq_num = 0;
+ uint32_t timestamp = 0;
+ int times_nacked = -1;
+
+ rtc::CopyOnWriteBuffer video_payload;
+ RTPVideoHeader video_header;
+ };
+ struct InsertResult {
+ std::vector<std::unique_ptr<Packet>> packets;
+ // Indicates if the packet buffer was cleared, which means that a key
+ // frame request should be sent.
+ bool buffer_cleared = false;
+ };
+
+ // Both `start_buffer_size` and `max_buffer_size` must be a power of 2.
+ PacketBuffer(size_t start_buffer_size, size_t max_buffer_size);
+ ~PacketBuffer();
+
+ ABSL_MUST_USE_RESULT InsertResult
+ InsertPacket(std::unique_ptr<Packet> packet);
+ ABSL_MUST_USE_RESULT InsertResult InsertPadding(uint16_t seq_num);
+
+ // Clear all packets older than |seq_num|. Returns the number of packets
+ // cleared.
+ uint32_t ClearTo(uint16_t seq_num);
+ void Clear();
+
+ void ForceSpsPpsIdrIsH264Keyframe();
+ void ResetSpsPpsIdrIsH264Keyframe();
+
+ private:
+ void ClearInternal();
+
+ // Tries to expand the buffer.
+ bool ExpandBufferSize();
+
+ // Test if all previous packets has arrived for the given sequence number.
+ bool PotentialNewFrame(uint16_t seq_num) const;
+
+ // Test if all packets of a frame has arrived, and if so, returns packets to
+ // create frames.
+ std::vector<std::unique_ptr<Packet>> FindFrames(uint16_t seq_num);
+
+ void UpdateMissingPackets(uint16_t seq_num);
+
+ // buffer_.size() and max_size_ must always be a power of two.
+ const size_t max_size_;
+
+ // The fist sequence number currently in the buffer.
+ uint16_t first_seq_num_;
+
+ // If the packet buffer has received its first packet.
+ bool first_packet_received_;
+
+ // If the buffer is cleared to `first_seq_num_`.
+ bool is_cleared_to_first_seq_num_;
+
+ // Buffer that holds the the inserted packets and information needed to
+ // determine continuity between them.
+ std::vector<std::unique_ptr<Packet>> buffer_;
+
+ absl::optional<uint16_t> newest_inserted_seq_num_;
+ std::set<uint16_t, DescendingSeqNumComp<uint16_t>> missing_packets_;
+
+ std::set<uint16_t, DescendingSeqNumComp<uint16_t>> received_padding_;
+
+ // Indicates if we should require SPS, PPS, and IDR for a particular
+ // RTP timestamp to treat the corresponding frame as a keyframe.
+ bool sps_pps_idr_is_h264_keyframe_;
+};
+
+} // namespace video_coding
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_PACKET_BUFFER_H_
diff --git a/third_party/libwebrtc/modules/video_coding/packet_buffer_gn/moz.build b/third_party/libwebrtc/modules/video_coding/packet_buffer_gn/moz.build
new file mode 100644
index 0000000000..9561cb88e8
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/packet_buffer_gn/moz.build
@@ -0,0 +1,232 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+DEFINES["WEBRTC_STRICT_FIELD_TRIALS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "!/third_party/libwebrtc/gen",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/video_coding/packet_buffer.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "crypt32",
+ "iphlpapi",
+ "secur32",
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "mips32":
+
+ DEFINES["MIPS32_LE"] = True
+ DEFINES["MIPS_FPU_LE"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "mips64":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("packet_buffer_gn")
diff --git a/third_party/libwebrtc/modules/video_coding/packet_buffer_unittest.cc b/third_party/libwebrtc/modules/video_coding/packet_buffer_unittest.cc
new file mode 100644
index 0000000000..b147977ab6
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/packet_buffer_unittest.cc
@@ -0,0 +1,828 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include "modules/video_coding/packet_buffer.h"
+
+#include <cstring>
+#include <limits>
+#include <ostream>
+#include <string>
+#include <utility>
+
+#include "api/array_view.h"
+#include "common_video/h264/h264_common.h"
+#include "modules/video_coding/frame_object.h"
+#include "rtc_base/numerics/sequence_number_unwrapper.h"
+#include "rtc_base/random.h"
+#include "test/field_trial.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace video_coding {
+namespace {
+
+using ::testing::ElementsAre;
+using ::testing::ElementsAreArray;
+using ::testing::IsEmpty;
+using ::testing::Matches;
+using ::testing::Pointee;
+using ::testing::SizeIs;
+
+constexpr int kStartSize = 16;
+constexpr int kMaxSize = 64;
+
+void IgnoreResult(PacketBuffer::InsertResult /*result*/) {}
+
+// Validates frame boundaries are valid and returns first sequence_number for
+// each frame.
+std::vector<uint16_t> StartSeqNums(
+ rtc::ArrayView<const std::unique_ptr<PacketBuffer::Packet>> packets) {
+ std::vector<uint16_t> result;
+ bool frame_boundary = true;
+ for (const auto& packet : packets) {
+ EXPECT_EQ(frame_boundary, packet->is_first_packet_in_frame());
+ if (packet->is_first_packet_in_frame()) {
+ result.push_back(packet->seq_num);
+ }
+ frame_boundary = packet->is_last_packet_in_frame();
+ }
+ EXPECT_TRUE(frame_boundary);
+ return result;
+}
+
+MATCHER_P(StartSeqNumsAre, seq_num, "") {
+ return Matches(ElementsAre(seq_num))(StartSeqNums(arg.packets));
+}
+
+MATCHER_P2(StartSeqNumsAre, seq_num1, seq_num2, "") {
+ return Matches(ElementsAre(seq_num1, seq_num2))(StartSeqNums(arg.packets));
+}
+
+MATCHER(KeyFrame, "") {
+ return arg->is_first_packet_in_frame() &&
+ arg->video_header.frame_type == VideoFrameType::kVideoFrameKey;
+}
+
+MATCHER(DeltaFrame, "") {
+ return arg->is_first_packet_in_frame() &&
+ arg->video_header.frame_type == VideoFrameType::kVideoFrameDelta;
+}
+
+struct PacketBufferInsertResult : public PacketBuffer::InsertResult {
+ explicit PacketBufferInsertResult(PacketBuffer::InsertResult result)
+ : InsertResult(std::move(result)) {}
+};
+
+void PrintTo(const PacketBufferInsertResult& result, std::ostream* os) {
+ *os << "frames: { ";
+ for (const auto& packet : result.packets) {
+ if (packet->is_first_packet_in_frame() &&
+ packet->is_last_packet_in_frame()) {
+ *os << "{sn: " << packet->seq_num << " }";
+ } else if (packet->is_first_packet_in_frame()) {
+ *os << "{sn: [" << packet->seq_num << "-";
+ } else if (packet->is_last_packet_in_frame()) {
+ *os << packet->seq_num << "] }, ";
+ }
+ }
+ *os << " }";
+ if (result.buffer_cleared) {
+ *os << ", buffer_cleared";
+ }
+}
+
+class PacketBufferTest : public ::testing::Test {
+ protected:
+ PacketBufferTest() : rand_(0x7732213), packet_buffer_(kStartSize, kMaxSize) {}
+
+ uint16_t Rand() { return rand_.Rand<uint16_t>(); }
+
+ enum IsKeyFrame { kKeyFrame, kDeltaFrame };
+ enum IsFirst { kFirst, kNotFirst };
+ enum IsLast { kLast, kNotLast };
+
+ PacketBufferInsertResult Insert(uint16_t seq_num, // packet sequence number
+ IsKeyFrame keyframe, // is keyframe
+ IsFirst first, // is first packet of frame
+ IsLast last, // is last packet of frame
+ rtc::ArrayView<const uint8_t> data = {},
+ uint32_t timestamp = 123u) { // rtp timestamp
+ auto packet = std::make_unique<PacketBuffer::Packet>();
+ packet->video_header.codec = kVideoCodecGeneric;
+ packet->timestamp = timestamp;
+ packet->seq_num = seq_num;
+ packet->video_header.frame_type = keyframe == kKeyFrame
+ ? VideoFrameType::kVideoFrameKey
+ : VideoFrameType::kVideoFrameDelta;
+ packet->video_header.is_first_packet_in_frame = first == kFirst;
+ packet->video_header.is_last_packet_in_frame = last == kLast;
+ packet->video_payload.SetData(data.data(), data.size());
+
+ return PacketBufferInsertResult(
+ packet_buffer_.InsertPacket(std::move(packet)));
+ }
+
+ Random rand_;
+ PacketBuffer packet_buffer_;
+};
+
+TEST_F(PacketBufferTest, InsertOnePacket) {
+ const uint16_t seq_num = Rand();
+ EXPECT_THAT(Insert(seq_num, kKeyFrame, kFirst, kLast).packets, SizeIs(1));
+}
+
+TEST_F(PacketBufferTest, InsertMultiplePackets) {
+ const uint16_t seq_num = Rand();
+ EXPECT_THAT(Insert(seq_num, kKeyFrame, kFirst, kLast).packets, SizeIs(1));
+ EXPECT_THAT(Insert(seq_num + 1, kKeyFrame, kFirst, kLast).packets, SizeIs(1));
+ EXPECT_THAT(Insert(seq_num + 2, kKeyFrame, kFirst, kLast).packets, SizeIs(1));
+ EXPECT_THAT(Insert(seq_num + 3, kKeyFrame, kFirst, kLast).packets, SizeIs(1));
+}
+
+TEST_F(PacketBufferTest, InsertDuplicatePacket) {
+ const uint16_t seq_num = Rand();
+ EXPECT_THAT(Insert(seq_num, kKeyFrame, kFirst, kNotLast).packets, IsEmpty());
+ EXPECT_THAT(Insert(seq_num, kKeyFrame, kFirst, kNotLast).packets, IsEmpty());
+ EXPECT_THAT(Insert(seq_num + 1, kKeyFrame, kNotFirst, kLast).packets,
+ SizeIs(2));
+}
+
+TEST_F(PacketBufferTest, SeqNumWrapOneFrame) {
+ Insert(0xFFFF, kKeyFrame, kFirst, kNotLast);
+ EXPECT_THAT(Insert(0x0, kKeyFrame, kNotFirst, kLast),
+ StartSeqNumsAre(0xFFFF));
+}
+
+TEST_F(PacketBufferTest, SeqNumWrapTwoFrames) {
+ EXPECT_THAT(Insert(0xFFFF, kKeyFrame, kFirst, kLast),
+ StartSeqNumsAre(0xFFFF));
+ EXPECT_THAT(Insert(0x0, kKeyFrame, kFirst, kLast), StartSeqNumsAre(0x0));
+}
+
+TEST_F(PacketBufferTest, InsertOldPackets) {
+ EXPECT_THAT(Insert(100, kKeyFrame, kFirst, kNotLast).packets, IsEmpty());
+ EXPECT_THAT(Insert(102, kDeltaFrame, kFirst, kLast).packets, SizeIs(1));
+ EXPECT_THAT(Insert(101, kKeyFrame, kNotFirst, kLast).packets, SizeIs(2));
+
+ EXPECT_THAT(Insert(100, kKeyFrame, kFirst, kNotLast).packets, IsEmpty());
+ EXPECT_THAT(Insert(100, kKeyFrame, kFirst, kNotLast).packets, IsEmpty());
+ EXPECT_THAT(Insert(102, kDeltaFrame, kFirst, kLast).packets, SizeIs(1));
+
+ packet_buffer_.ClearTo(102);
+ EXPECT_THAT(Insert(102, kDeltaFrame, kFirst, kLast).packets, IsEmpty());
+ EXPECT_THAT(Insert(103, kDeltaFrame, kFirst, kLast).packets, SizeIs(1));
+}
+
+TEST_F(PacketBufferTest, FrameSize) {
+ const uint16_t seq_num = Rand();
+ uint8_t data1[5] = {};
+ uint8_t data2[5] = {};
+ uint8_t data3[5] = {};
+ uint8_t data4[5] = {};
+
+ Insert(seq_num, kKeyFrame, kFirst, kNotLast, data1);
+ Insert(seq_num + 1, kKeyFrame, kNotFirst, kNotLast, data2);
+ Insert(seq_num + 2, kKeyFrame, kNotFirst, kNotLast, data3);
+ auto packets =
+ Insert(seq_num + 3, kKeyFrame, kNotFirst, kLast, data4).packets;
+ // Expect one frame of 4 packets.
+ EXPECT_THAT(StartSeqNums(packets), ElementsAre(seq_num));
+ EXPECT_THAT(packets, SizeIs(4));
+}
+
+TEST_F(PacketBufferTest, ExpandBuffer) {
+ const uint16_t seq_num = Rand();
+
+ Insert(seq_num, kKeyFrame, kFirst, kNotLast);
+ for (int i = 1; i < kStartSize; ++i)
+ EXPECT_FALSE(
+ Insert(seq_num + i, kKeyFrame, kNotFirst, kNotLast).buffer_cleared);
+
+ // Already inserted kStartSize number of packets, inserting the last packet
+ // should increase the buffer size and also result in an assembled frame.
+ EXPECT_FALSE(
+ Insert(seq_num + kStartSize, kKeyFrame, kNotFirst, kLast).buffer_cleared);
+}
+
+TEST_F(PacketBufferTest, SingleFrameExpandsBuffer) {
+ const uint16_t seq_num = Rand();
+
+ Insert(seq_num, kKeyFrame, kFirst, kNotLast);
+ for (int i = 1; i < kStartSize; ++i)
+ Insert(seq_num + i, kKeyFrame, kNotFirst, kNotLast);
+ EXPECT_THAT(Insert(seq_num + kStartSize, kKeyFrame, kNotFirst, kLast),
+ StartSeqNumsAre(seq_num));
+}
+
+TEST_F(PacketBufferTest, ExpandBufferOverflow) {
+ const uint16_t seq_num = Rand();
+
+ EXPECT_FALSE(Insert(seq_num, kKeyFrame, kFirst, kNotLast).buffer_cleared);
+ for (int i = 1; i < kMaxSize; ++i)
+ EXPECT_FALSE(
+ Insert(seq_num + i, kKeyFrame, kNotFirst, kNotLast).buffer_cleared);
+
+ // Already inserted kMaxSize number of packets, inserting the last packet
+ // should overflow the buffer and result in false being returned.
+ EXPECT_TRUE(
+ Insert(seq_num + kMaxSize, kKeyFrame, kNotFirst, kLast).buffer_cleared);
+}
+
+TEST_F(PacketBufferTest, OnePacketOneFrame) {
+ const uint16_t seq_num = Rand();
+ EXPECT_THAT(Insert(seq_num, kKeyFrame, kFirst, kLast),
+ StartSeqNumsAre(seq_num));
+}
+
+TEST_F(PacketBufferTest, TwoPacketsTwoFrames) {
+ const uint16_t seq_num = Rand();
+
+ EXPECT_THAT(Insert(seq_num, kKeyFrame, kFirst, kLast),
+ StartSeqNumsAre(seq_num));
+ EXPECT_THAT(Insert(seq_num + 1, kKeyFrame, kFirst, kLast),
+ StartSeqNumsAre(seq_num + 1));
+}
+
+TEST_F(PacketBufferTest, TwoPacketsOneFrames) {
+ const uint16_t seq_num = Rand();
+
+ EXPECT_THAT(Insert(seq_num, kKeyFrame, kFirst, kNotLast).packets, IsEmpty());
+ EXPECT_THAT(Insert(seq_num + 1, kKeyFrame, kNotFirst, kLast),
+ StartSeqNumsAre(seq_num));
+}
+
+TEST_F(PacketBufferTest, ThreePacketReorderingOneFrame) {
+ const uint16_t seq_num = Rand();
+
+ EXPECT_THAT(Insert(seq_num, kKeyFrame, kFirst, kNotLast).packets, IsEmpty());
+ EXPECT_THAT(Insert(seq_num + 2, kKeyFrame, kNotFirst, kLast).packets,
+ IsEmpty());
+ EXPECT_THAT(Insert(seq_num + 1, kKeyFrame, kNotFirst, kNotLast),
+ StartSeqNumsAre(seq_num));
+}
+
+TEST_F(PacketBufferTest, Frames) {
+ const uint16_t seq_num = Rand();
+
+ EXPECT_THAT(Insert(seq_num, kKeyFrame, kFirst, kLast),
+ StartSeqNumsAre(seq_num));
+ EXPECT_THAT(Insert(seq_num + 1, kDeltaFrame, kFirst, kLast),
+ StartSeqNumsAre(seq_num + 1));
+ EXPECT_THAT(Insert(seq_num + 2, kDeltaFrame, kFirst, kLast),
+ StartSeqNumsAre(seq_num + 2));
+ EXPECT_THAT(Insert(seq_num + 3, kDeltaFrame, kFirst, kLast),
+ StartSeqNumsAre(seq_num + 3));
+}
+
+TEST_F(PacketBufferTest, ClearSinglePacket) {
+ const uint16_t seq_num = Rand();
+
+ for (int i = 0; i < kMaxSize; ++i)
+ Insert(seq_num + i, kDeltaFrame, kFirst, kLast);
+
+ packet_buffer_.ClearTo(seq_num);
+ EXPECT_FALSE(
+ Insert(seq_num + kMaxSize, kDeltaFrame, kFirst, kLast).buffer_cleared);
+}
+
+TEST_F(PacketBufferTest, ClearPacketBeforeFullyReceivedFrame) {
+ Insert(0, kKeyFrame, kFirst, kNotLast);
+ Insert(1, kKeyFrame, kNotFirst, kNotLast);
+ packet_buffer_.ClearTo(0);
+ EXPECT_THAT(Insert(2, kKeyFrame, kNotFirst, kLast).packets, IsEmpty());
+}
+
+TEST_F(PacketBufferTest, ClearFullBuffer) {
+ for (int i = 0; i < kMaxSize; ++i)
+ Insert(i, kDeltaFrame, kFirst, kLast);
+
+ packet_buffer_.ClearTo(kMaxSize - 1);
+
+ for (int i = kMaxSize; i < 2 * kMaxSize; ++i)
+ EXPECT_FALSE(Insert(i, kDeltaFrame, kFirst, kLast).buffer_cleared);
+}
+
+TEST_F(PacketBufferTest, DontClearNewerPacket) {
+ EXPECT_THAT(Insert(0, kKeyFrame, kFirst, kLast), StartSeqNumsAre(0));
+ packet_buffer_.ClearTo(0);
+ EXPECT_THAT(Insert(2 * kStartSize, kKeyFrame, kFirst, kLast),
+ StartSeqNumsAre(2 * kStartSize));
+ EXPECT_THAT(Insert(3 * kStartSize + 1, kKeyFrame, kFirst, kNotLast).packets,
+ IsEmpty());
+ packet_buffer_.ClearTo(2 * kStartSize);
+ EXPECT_THAT(Insert(3 * kStartSize + 2, kKeyFrame, kNotFirst, kLast),
+ StartSeqNumsAre(3 * kStartSize + 1));
+}
+
+TEST_F(PacketBufferTest, OneIncompleteFrame) {
+ const uint16_t seq_num = Rand();
+
+ EXPECT_THAT(Insert(seq_num, kDeltaFrame, kFirst, kNotLast).packets,
+ IsEmpty());
+ EXPECT_THAT(Insert(seq_num + 1, kDeltaFrame, kNotFirst, kLast),
+ StartSeqNumsAre(seq_num));
+ EXPECT_THAT(Insert(seq_num - 1, kDeltaFrame, kNotFirst, kLast).packets,
+ IsEmpty());
+}
+
+TEST_F(PacketBufferTest, TwoIncompleteFramesFullBuffer) {
+ const uint16_t seq_num = Rand();
+
+ for (int i = 1; i < kMaxSize - 1; ++i)
+ Insert(seq_num + i, kDeltaFrame, kNotFirst, kNotLast);
+ EXPECT_THAT(Insert(seq_num, kDeltaFrame, kFirst, kNotLast).packets,
+ IsEmpty());
+ EXPECT_THAT(Insert(seq_num - 1, kDeltaFrame, kNotFirst, kLast).packets,
+ IsEmpty());
+}
+
+TEST_F(PacketBufferTest, FramesReordered) {
+ const uint16_t seq_num = Rand();
+
+ EXPECT_THAT(Insert(seq_num + 1, kDeltaFrame, kFirst, kLast),
+ StartSeqNumsAre(seq_num + 1));
+ EXPECT_THAT(Insert(seq_num, kKeyFrame, kFirst, kLast),
+ StartSeqNumsAre(seq_num));
+ EXPECT_THAT(Insert(seq_num + 3, kDeltaFrame, kFirst, kLast),
+ StartSeqNumsAre(seq_num + 3));
+ EXPECT_THAT(Insert(seq_num + 2, kDeltaFrame, kFirst, kLast),
+ StartSeqNumsAre(seq_num + 2));
+}
+
+TEST_F(PacketBufferTest, InsertPacketAfterSequenceNumberWrapAround) {
+ uint16_t kFirstSeqNum = 0;
+ uint32_t kTimestampDelta = 100;
+ uint32_t timestamp = 10000;
+ uint16_t seq_num = kFirstSeqNum;
+
+ // Loop until seq_num wraps around.
+ SeqNumUnwrapper<uint16_t> unwrapper;
+ while (unwrapper.Unwrap(seq_num) < std::numeric_limits<uint16_t>::max()) {
+ Insert(seq_num++, kKeyFrame, kFirst, kNotLast, {}, timestamp);
+ for (int i = 0; i < 5; ++i) {
+ Insert(seq_num++, kKeyFrame, kNotFirst, kNotLast, {}, timestamp);
+ }
+ Insert(seq_num++, kKeyFrame, kNotFirst, kLast, {}, timestamp);
+ timestamp += kTimestampDelta;
+ }
+
+ // Receive frame with overlapping sequence numbers.
+ Insert(seq_num++, kKeyFrame, kFirst, kNotLast, {}, timestamp);
+ for (int i = 0; i < 5; ++i) {
+ Insert(seq_num++, kKeyFrame, kNotFirst, kNotLast, {}, timestamp);
+ }
+ auto packets =
+ Insert(seq_num++, kKeyFrame, kNotFirst, kLast, {}, timestamp).packets;
+ // One frame of 7 packets.
+ EXPECT_THAT(StartSeqNums(packets), SizeIs(1));
+ EXPECT_THAT(packets, SizeIs(7));
+}
+
+// If `sps_pps_idr_is_keyframe` is true, we require keyframes to contain
+// SPS/PPS/IDR and the keyframes we create as part of the test do contain
+// SPS/PPS/IDR. If `sps_pps_idr_is_keyframe` is false, we only require and
+// create keyframes containing only IDR.
+class PacketBufferH264Test : public PacketBufferTest {
+ protected:
+ explicit PacketBufferH264Test(bool sps_pps_idr_is_keyframe)
+ : PacketBufferTest(), sps_pps_idr_is_keyframe_(sps_pps_idr_is_keyframe) {
+ if (sps_pps_idr_is_keyframe) {
+ packet_buffer_.ForceSpsPpsIdrIsH264Keyframe();
+ }
+ }
+
+ PacketBufferInsertResult InsertH264(
+ uint16_t seq_num, // packet sequence number
+ IsKeyFrame keyframe, // is keyframe
+ IsFirst first, // is first packet of frame
+ IsLast last, // is last packet of frame
+ uint32_t timestamp, // rtp timestamp
+ rtc::ArrayView<const uint8_t> data = {},
+ uint32_t width = 0, // width of frame (SPS/IDR)
+ uint32_t height = 0) { // height of frame (SPS/IDR)
+ auto packet = std::make_unique<PacketBuffer::Packet>();
+ packet->video_header.codec = kVideoCodecH264;
+ auto& h264_header =
+ packet->video_header.video_type_header.emplace<RTPVideoHeaderH264>();
+ packet->seq_num = seq_num;
+ packet->timestamp = timestamp;
+ if (keyframe == kKeyFrame) {
+ if (sps_pps_idr_is_keyframe_) {
+ h264_header.nalus[0].type = H264::NaluType::kSps;
+ h264_header.nalus[1].type = H264::NaluType::kPps;
+ h264_header.nalus[2].type = H264::NaluType::kIdr;
+ h264_header.nalus_length = 3;
+ } else {
+ h264_header.nalus[0].type = H264::NaluType::kIdr;
+ h264_header.nalus_length = 1;
+ }
+ }
+ packet->video_header.width = width;
+ packet->video_header.height = height;
+ packet->video_header.is_first_packet_in_frame = first == kFirst;
+ packet->video_header.is_last_packet_in_frame = last == kLast;
+ packet->video_payload.SetData(data.data(), data.size());
+
+ return PacketBufferInsertResult(
+ packet_buffer_.InsertPacket(std::move(packet)));
+ }
+
+ PacketBufferInsertResult InsertH264KeyFrameWithAud(
+ uint16_t seq_num, // packet sequence number
+ IsKeyFrame keyframe, // is keyframe
+ IsFirst first, // is first packet of frame
+ IsLast last, // is last packet of frame
+ uint32_t timestamp, // rtp timestamp
+ rtc::ArrayView<const uint8_t> data = {},
+ uint32_t width = 0, // width of frame (SPS/IDR)
+ uint32_t height = 0) { // height of frame (SPS/IDR)
+ auto packet = std::make_unique<PacketBuffer::Packet>();
+ packet->video_header.codec = kVideoCodecH264;
+ auto& h264_header =
+ packet->video_header.video_type_header.emplace<RTPVideoHeaderH264>();
+ packet->seq_num = seq_num;
+ packet->timestamp = timestamp;
+
+ // this should be the start of frame.
+ RTC_CHECK(first == kFirst);
+
+ // Insert a AUD NALU / packet without width/height.
+ h264_header.nalus[0].type = H264::NaluType::kAud;
+ h264_header.nalus_length = 1;
+ packet->video_header.is_first_packet_in_frame = true;
+ packet->video_header.is_last_packet_in_frame = false;
+ IgnoreResult(packet_buffer_.InsertPacket(std::move(packet)));
+ // insert IDR
+ return InsertH264(seq_num + 1, keyframe, kNotFirst, last, timestamp, data,
+ width, height);
+ }
+
+ const bool sps_pps_idr_is_keyframe_;
+};
+
+// This fixture is used to test the general behaviour of the packet buffer
+// in both configurations.
+class PacketBufferH264ParameterizedTest
+ : public ::testing::WithParamInterface<bool>,
+ public PacketBufferH264Test {
+ protected:
+ PacketBufferH264ParameterizedTest() : PacketBufferH264Test(GetParam()) {}
+};
+
+INSTANTIATE_TEST_SUITE_P(SpsPpsIdrIsKeyframe,
+ PacketBufferH264ParameterizedTest,
+ ::testing::Bool());
+
+TEST_P(PacketBufferH264ParameterizedTest, DontRemoveMissingPacketOnClearTo) {
+ InsertH264(0, kKeyFrame, kFirst, kLast, 0);
+ InsertH264(2, kDeltaFrame, kFirst, kNotLast, 2);
+ packet_buffer_.ClearTo(0);
+ // Expect no frame because of missing of packet #1
+ EXPECT_THAT(InsertH264(3, kDeltaFrame, kNotFirst, kLast, 2).packets,
+ IsEmpty());
+}
+
+TEST_P(PacketBufferH264ParameterizedTest, GetBitstreamOneFrameFullBuffer) {
+ uint8_t data_arr[kStartSize][1];
+ uint8_t expected[kStartSize];
+
+ for (uint8_t i = 0; i < kStartSize; ++i) {
+ data_arr[i][0] = i;
+ expected[i] = i;
+ }
+
+ InsertH264(0, kKeyFrame, kFirst, kNotLast, 1, data_arr[0]);
+ for (uint8_t i = 1; i < kStartSize - 1; ++i) {
+ InsertH264(i, kKeyFrame, kNotFirst, kNotLast, 1, data_arr[i]);
+ }
+
+ auto packets = InsertH264(kStartSize - 1, kKeyFrame, kNotFirst, kLast, 1,
+ data_arr[kStartSize - 1])
+ .packets;
+ ASSERT_THAT(StartSeqNums(packets), ElementsAre(0));
+ EXPECT_THAT(packets, SizeIs(kStartSize));
+ for (size_t i = 0; i < packets.size(); ++i) {
+ EXPECT_THAT(packets[i]->video_payload, SizeIs(1)) << "Packet #" << i;
+ }
+}
+
+TEST_P(PacketBufferH264ParameterizedTest, GetBitstreamBufferPadding) {
+ uint16_t seq_num = Rand();
+ rtc::CopyOnWriteBuffer data = "some plain old data";
+
+ auto packet = std::make_unique<PacketBuffer::Packet>();
+ auto& h264_header =
+ packet->video_header.video_type_header.emplace<RTPVideoHeaderH264>();
+ h264_header.nalus_length = 1;
+ h264_header.nalus[0].type = H264::NaluType::kIdr;
+ h264_header.packetization_type = kH264SingleNalu;
+ packet->seq_num = seq_num;
+ packet->video_header.codec = kVideoCodecH264;
+ packet->video_payload = data;
+ packet->video_header.is_first_packet_in_frame = true;
+ packet->video_header.is_last_packet_in_frame = true;
+ auto frames = packet_buffer_.InsertPacket(std::move(packet)).packets;
+
+ ASSERT_THAT(frames, SizeIs(1));
+ EXPECT_EQ(frames[0]->seq_num, seq_num);
+ EXPECT_EQ(frames[0]->video_payload, data);
+}
+
+TEST_P(PacketBufferH264ParameterizedTest, FrameResolution) {
+ uint16_t seq_num = 100;
+ uint8_t data[] = "some plain old data";
+ uint32_t width = 640;
+ uint32_t height = 360;
+ uint32_t timestamp = 1000;
+
+ auto packets = InsertH264(seq_num, kKeyFrame, kFirst, kLast, timestamp, data,
+ width, height)
+ .packets;
+
+ ASSERT_THAT(packets, SizeIs(1));
+ EXPECT_EQ(packets[0]->video_header.width, width);
+ EXPECT_EQ(packets[0]->video_header.height, height);
+}
+
+TEST_P(PacketBufferH264ParameterizedTest, FrameResolutionNaluBeforeSPS) {
+ uint16_t seq_num = 100;
+ uint8_t data[] = "some plain old data";
+ uint32_t width = 640;
+ uint32_t height = 360;
+ uint32_t timestamp = 1000;
+
+ auto packets = InsertH264KeyFrameWithAud(seq_num, kKeyFrame, kFirst, kLast,
+ timestamp, data, width, height)
+ .packets;
+
+ ASSERT_THAT(StartSeqNums(packets), ElementsAre(seq_num));
+ EXPECT_EQ(packets[0]->video_header.width, width);
+ EXPECT_EQ(packets[0]->video_header.height, height);
+}
+
+TEST_F(PacketBufferTest, FreeSlotsOnFrameCreation) {
+ const uint16_t seq_num = Rand();
+
+ Insert(seq_num, kKeyFrame, kFirst, kNotLast);
+ Insert(seq_num + 1, kDeltaFrame, kNotFirst, kNotLast);
+ EXPECT_THAT(Insert(seq_num + 2, kDeltaFrame, kNotFirst, kLast),
+ StartSeqNumsAre(seq_num));
+
+ // Insert frame that fills the whole buffer.
+ Insert(seq_num + 3, kKeyFrame, kFirst, kNotLast);
+ for (int i = 0; i < kMaxSize - 2; ++i)
+ Insert(seq_num + i + 4, kDeltaFrame, kNotFirst, kNotLast);
+ EXPECT_THAT(Insert(seq_num + kMaxSize + 2, kKeyFrame, kNotFirst, kLast),
+ StartSeqNumsAre(seq_num + 3));
+}
+
+TEST_F(PacketBufferTest, Clear) {
+ const uint16_t seq_num = Rand();
+
+ Insert(seq_num, kKeyFrame, kFirst, kNotLast);
+ Insert(seq_num + 1, kDeltaFrame, kNotFirst, kNotLast);
+ EXPECT_THAT(Insert(seq_num + 2, kDeltaFrame, kNotFirst, kLast),
+ StartSeqNumsAre(seq_num));
+
+ packet_buffer_.Clear();
+
+ Insert(seq_num + kStartSize, kKeyFrame, kFirst, kNotLast);
+ Insert(seq_num + kStartSize + 1, kDeltaFrame, kNotFirst, kNotLast);
+ EXPECT_THAT(Insert(seq_num + kStartSize + 2, kDeltaFrame, kNotFirst, kLast),
+ StartSeqNumsAre(seq_num + kStartSize));
+}
+
+TEST_F(PacketBufferTest, FramesAfterClear) {
+ Insert(9025, kDeltaFrame, kFirst, kLast);
+ Insert(9024, kKeyFrame, kFirst, kLast);
+ packet_buffer_.ClearTo(9025);
+ EXPECT_THAT(Insert(9057, kDeltaFrame, kFirst, kLast).packets, SizeIs(1));
+ EXPECT_THAT(Insert(9026, kDeltaFrame, kFirst, kLast).packets, SizeIs(1));
+}
+
+TEST_F(PacketBufferTest, SameFrameDifferentTimestamps) {
+ Insert(0, kKeyFrame, kFirst, kNotLast, {}, 1000);
+ EXPECT_THAT(Insert(1, kKeyFrame, kNotFirst, kLast, {}, 1001).packets,
+ IsEmpty());
+}
+
+TEST_F(PacketBufferTest, ContinuousSeqNumDoubleMarkerBit) {
+ Insert(2, kKeyFrame, kNotFirst, kNotLast);
+ Insert(1, kKeyFrame, kFirst, kLast);
+ EXPECT_THAT(Insert(3, kKeyFrame, kNotFirst, kLast).packets, IsEmpty());
+}
+
+TEST_F(PacketBufferTest, IncomingCodecChange) {
+ auto packet = std::make_unique<PacketBuffer::Packet>();
+ packet->video_header.is_first_packet_in_frame = true;
+ packet->video_header.is_last_packet_in_frame = true;
+ packet->video_header.codec = kVideoCodecVP8;
+ packet->video_header.video_type_header.emplace<RTPVideoHeaderVP8>();
+ packet->timestamp = 1;
+ packet->seq_num = 1;
+ packet->video_header.frame_type = VideoFrameType::kVideoFrameKey;
+ EXPECT_THAT(packet_buffer_.InsertPacket(std::move(packet)).packets,
+ SizeIs(1));
+
+ packet = std::make_unique<PacketBuffer::Packet>();
+ packet->video_header.is_first_packet_in_frame = true;
+ packet->video_header.is_last_packet_in_frame = true;
+ packet->video_header.codec = kVideoCodecH264;
+ auto& h264_header =
+ packet->video_header.video_type_header.emplace<RTPVideoHeaderH264>();
+ h264_header.nalus_length = 1;
+ packet->timestamp = 3;
+ packet->seq_num = 3;
+ packet->video_header.frame_type = VideoFrameType::kVideoFrameKey;
+ EXPECT_THAT(packet_buffer_.InsertPacket(std::move(packet)).packets,
+ IsEmpty());
+
+ packet = std::make_unique<PacketBuffer::Packet>();
+ packet->video_header.is_first_packet_in_frame = true;
+ packet->video_header.is_last_packet_in_frame = true;
+ packet->video_header.codec = kVideoCodecVP8;
+ packet->video_header.video_type_header.emplace<RTPVideoHeaderVP8>();
+ packet->timestamp = 2;
+ packet->seq_num = 2;
+ packet->video_header.frame_type = VideoFrameType::kVideoFrameDelta;
+ EXPECT_THAT(packet_buffer_.InsertPacket(std::move(packet)).packets,
+ SizeIs(2));
+}
+
+TEST_F(PacketBufferTest, TooManyNalusInPacket) {
+ auto packet = std::make_unique<PacketBuffer::Packet>();
+ packet->video_header.codec = kVideoCodecH264;
+ packet->timestamp = 1;
+ packet->seq_num = 1;
+ packet->video_header.frame_type = VideoFrameType::kVideoFrameKey;
+ packet->video_header.is_first_packet_in_frame = true;
+ packet->video_header.is_last_packet_in_frame = true;
+ auto& h264_header =
+ packet->video_header.video_type_header.emplace<RTPVideoHeaderH264>();
+ h264_header.nalus_length = kMaxNalusPerPacket;
+ EXPECT_THAT(packet_buffer_.InsertPacket(std::move(packet)).packets,
+ IsEmpty());
+}
+
+TEST_P(PacketBufferH264ParameterizedTest, OneFrameFillBuffer) {
+ InsertH264(0, kKeyFrame, kFirst, kNotLast, 1000);
+ for (int i = 1; i < kStartSize - 1; ++i)
+ InsertH264(i, kKeyFrame, kNotFirst, kNotLast, 1000);
+ EXPECT_THAT(InsertH264(kStartSize - 1, kKeyFrame, kNotFirst, kLast, 1000),
+ StartSeqNumsAre(0));
+}
+
+TEST_P(PacketBufferH264ParameterizedTest, CreateFramesAfterFilledBuffer) {
+ EXPECT_THAT(InsertH264(kStartSize - 2, kKeyFrame, kFirst, kLast, 0).packets,
+ SizeIs(1));
+
+ InsertH264(kStartSize, kDeltaFrame, kFirst, kNotLast, 2000);
+ for (int i = 1; i < kStartSize; ++i)
+ InsertH264(kStartSize + i, kDeltaFrame, kNotFirst, kNotLast, 2000);
+ EXPECT_THAT(
+ InsertH264(kStartSize + kStartSize, kDeltaFrame, kNotFirst, kLast, 2000)
+ .packets,
+ IsEmpty());
+
+ EXPECT_THAT(InsertH264(kStartSize - 1, kKeyFrame, kFirst, kLast, 1000),
+ StartSeqNumsAre(kStartSize - 1, kStartSize));
+}
+
+TEST_P(PacketBufferH264ParameterizedTest, OneFrameMaxSeqNum) {
+ InsertH264(65534, kKeyFrame, kFirst, kNotLast, 1000);
+ EXPECT_THAT(InsertH264(65535, kKeyFrame, kNotFirst, kLast, 1000),
+ StartSeqNumsAre(65534));
+}
+
+TEST_P(PacketBufferH264ParameterizedTest, ClearMissingPacketsOnKeyframe) {
+ InsertH264(0, kKeyFrame, kFirst, kLast, 1000);
+ InsertH264(2, kKeyFrame, kFirst, kLast, 3000);
+ InsertH264(3, kDeltaFrame, kFirst, kNotLast, 4000);
+ InsertH264(4, kDeltaFrame, kNotFirst, kLast, 4000);
+
+ EXPECT_THAT(InsertH264(kStartSize + 1, kKeyFrame, kFirst, kLast, 18000),
+ StartSeqNumsAre(kStartSize + 1));
+}
+
+TEST_P(PacketBufferH264ParameterizedTest, FindFramesOnPadding) {
+ EXPECT_THAT(InsertH264(0, kKeyFrame, kFirst, kLast, 1000),
+ StartSeqNumsAre(0));
+ EXPECT_THAT(InsertH264(2, kDeltaFrame, kFirst, kLast, 1000).packets,
+ IsEmpty());
+
+ EXPECT_THAT(packet_buffer_.InsertPadding(1), StartSeqNumsAre(2));
+}
+
+TEST_P(PacketBufferH264ParameterizedTest, FindFramesOnReorderedPadding) {
+ EXPECT_THAT(InsertH264(0, kKeyFrame, kFirst, kLast, 1001),
+ StartSeqNumsAre(0));
+ EXPECT_THAT(InsertH264(1, kDeltaFrame, kFirst, kNotLast, 1002).packets,
+ IsEmpty());
+ EXPECT_THAT(packet_buffer_.InsertPadding(3).packets, IsEmpty());
+ EXPECT_THAT(InsertH264(4, kDeltaFrame, kFirst, kLast, 1003).packets,
+ IsEmpty());
+ EXPECT_THAT(InsertH264(2, kDeltaFrame, kNotFirst, kLast, 1002),
+ StartSeqNumsAre(1, 4));
+}
+
+class PacketBufferH264XIsKeyframeTest : public PacketBufferH264Test {
+ protected:
+ const uint16_t kSeqNum = 5;
+
+ explicit PacketBufferH264XIsKeyframeTest(bool sps_pps_idr_is_keyframe)
+ : PacketBufferH264Test(sps_pps_idr_is_keyframe) {}
+
+ std::unique_ptr<PacketBuffer::Packet> CreatePacket() {
+ auto packet = std::make_unique<PacketBuffer::Packet>();
+ packet->video_header.codec = kVideoCodecH264;
+ packet->seq_num = kSeqNum;
+
+ packet->video_header.is_first_packet_in_frame = true;
+ packet->video_header.is_last_packet_in_frame = true;
+ return packet;
+ }
+};
+
+class PacketBufferH264IdrIsKeyframeTest
+ : public PacketBufferH264XIsKeyframeTest {
+ protected:
+ PacketBufferH264IdrIsKeyframeTest()
+ : PacketBufferH264XIsKeyframeTest(false) {}
+};
+
+TEST_F(PacketBufferH264IdrIsKeyframeTest, IdrIsKeyframe) {
+ auto packet = CreatePacket();
+ auto& h264_header =
+ packet->video_header.video_type_header.emplace<RTPVideoHeaderH264>();
+ h264_header.nalus[0].type = H264::NaluType::kIdr;
+ h264_header.nalus_length = 1;
+ EXPECT_THAT(packet_buffer_.InsertPacket(std::move(packet)).packets,
+ ElementsAre(KeyFrame()));
+}
+
+TEST_F(PacketBufferH264IdrIsKeyframeTest, SpsPpsIdrIsKeyframe) {
+ auto packet = CreatePacket();
+ auto& h264_header =
+ packet->video_header.video_type_header.emplace<RTPVideoHeaderH264>();
+ h264_header.nalus[0].type = H264::NaluType::kSps;
+ h264_header.nalus[1].type = H264::NaluType::kPps;
+ h264_header.nalus[2].type = H264::NaluType::kIdr;
+ h264_header.nalus_length = 3;
+
+ EXPECT_THAT(packet_buffer_.InsertPacket(std::move(packet)).packets,
+ ElementsAre(KeyFrame()));
+}
+
+class PacketBufferH264SpsPpsIdrIsKeyframeTest
+ : public PacketBufferH264XIsKeyframeTest {
+ protected:
+ PacketBufferH264SpsPpsIdrIsKeyframeTest()
+ : PacketBufferH264XIsKeyframeTest(true) {}
+};
+
+TEST_F(PacketBufferH264SpsPpsIdrIsKeyframeTest, IdrIsNotKeyframe) {
+ auto packet = CreatePacket();
+ auto& h264_header =
+ packet->video_header.video_type_header.emplace<RTPVideoHeaderH264>();
+ h264_header.nalus[0].type = H264::NaluType::kIdr;
+ h264_header.nalus_length = 1;
+
+ EXPECT_THAT(packet_buffer_.InsertPacket(std::move(packet)).packets,
+ ElementsAre(DeltaFrame()));
+}
+
+TEST_F(PacketBufferH264SpsPpsIdrIsKeyframeTest, SpsPpsIsNotKeyframe) {
+ auto packet = CreatePacket();
+ auto& h264_header =
+ packet->video_header.video_type_header.emplace<RTPVideoHeaderH264>();
+ h264_header.nalus[0].type = H264::NaluType::kSps;
+ h264_header.nalus[1].type = H264::NaluType::kPps;
+ h264_header.nalus_length = 2;
+
+ EXPECT_THAT(packet_buffer_.InsertPacket(std::move(packet)).packets,
+ ElementsAre(DeltaFrame()));
+}
+
+TEST_F(PacketBufferH264SpsPpsIdrIsKeyframeTest, SpsPpsIdrIsKeyframe) {
+ auto packet = CreatePacket();
+ auto& h264_header =
+ packet->video_header.video_type_header.emplace<RTPVideoHeaderH264>();
+ h264_header.nalus[0].type = H264::NaluType::kSps;
+ h264_header.nalus[1].type = H264::NaluType::kPps;
+ h264_header.nalus[2].type = H264::NaluType::kIdr;
+ h264_header.nalus_length = 3;
+
+ EXPECT_THAT(packet_buffer_.InsertPacket(std::move(packet)).packets,
+ ElementsAre(KeyFrame()));
+}
+
+} // namespace
+} // namespace video_coding
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/receiver.cc b/third_party/libwebrtc/modules/video_coding/receiver.cc
new file mode 100644
index 0000000000..3f954ec9bf
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/receiver.cc
@@ -0,0 +1,191 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/receiver.h"
+
+
+#include <cstdint>
+#include <cstdlib>
+#include <utility>
+#include <vector>
+
+#include "absl/memory/memory.h"
+#include "api/video/encoded_image.h"
+#include "modules/video_coding/encoded_frame.h"
+#include "modules/video_coding/internal_defines.h"
+#include "modules/video_coding/jitter_buffer_common.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/numerics/safe_conversions.h"
+#include "rtc_base/trace_event.h"
+#include "system_wrappers/include/clock.h"
+
+namespace webrtc {
+
+enum { kMaxReceiverDelayMs = 10000 };
+
+VCMReceiver::VCMReceiver(VCMTiming* timing,
+ Clock* clock,
+ const FieldTrialsView& field_trials)
+ : VCMReceiver::VCMReceiver(timing,
+ clock,
+ absl::WrapUnique(EventWrapper::Create()),
+ absl::WrapUnique(EventWrapper::Create()),
+ field_trials) {}
+
+VCMReceiver::VCMReceiver(VCMTiming* timing,
+ Clock* clock,
+ std::unique_ptr<EventWrapper> receiver_event,
+ std::unique_ptr<EventWrapper> jitter_buffer_event,
+ const FieldTrialsView& field_trials)
+ : clock_(clock),
+ jitter_buffer_(clock_, std::move(jitter_buffer_event), field_trials),
+ timing_(timing),
+ render_wait_event_(std::move(receiver_event)),
+ max_video_delay_ms_(kMaxVideoDelayMs) {
+ jitter_buffer_.Start();
+}
+
+VCMReceiver::~VCMReceiver() {
+ render_wait_event_->Set();
+}
+
+int32_t VCMReceiver::InsertPacket(const VCMPacket& packet) {
+ // Insert the packet into the jitter buffer. The packet can either be empty or
+ // contain media at this point.
+ bool retransmitted = false;
+ const VCMFrameBufferEnum ret =
+ jitter_buffer_.InsertPacket(packet, &retransmitted);
+ if (ret == kOldPacket) {
+ return VCM_OK;
+ } else if (ret == kFlushIndicator) {
+ return VCM_FLUSH_INDICATOR;
+ } else if (ret < 0) {
+ return VCM_JITTER_BUFFER_ERROR;
+ }
+ if (ret == kCompleteSession && !retransmitted) {
+ // We don't want to include timestamps which have suffered from
+ // retransmission here, since we compensate with extra retransmission
+ // delay within the jitter estimate.
+ timing_->IncomingTimestamp(packet.timestamp, clock_->CurrentTime());
+ }
+ return VCM_OK;
+}
+
+VCMEncodedFrame* VCMReceiver::FrameForDecoding(uint16_t max_wait_time_ms,
+ bool prefer_late_decoding) {
+ const int64_t start_time_ms = clock_->TimeInMilliseconds();
+ uint32_t frame_timestamp = 0;
+ int min_playout_delay_ms = -1;
+ int max_playout_delay_ms = -1;
+ int64_t render_time_ms = 0;
+ // Exhaust wait time to get a complete frame for decoding.
+ VCMEncodedFrame* found_frame =
+ jitter_buffer_.NextCompleteFrame(max_wait_time_ms);
+
+ if (found_frame) {
+ frame_timestamp = found_frame->Timestamp();
+ min_playout_delay_ms = found_frame->EncodedImage().playout_delay_.min_ms;
+ max_playout_delay_ms = found_frame->EncodedImage().playout_delay_.max_ms;
+ } else {
+ return nullptr;
+ }
+
+ if (min_playout_delay_ms >= 0)
+ timing_->set_min_playout_delay(TimeDelta::Millis(min_playout_delay_ms));
+
+ if (max_playout_delay_ms >= 0)
+ timing_->set_max_playout_delay(TimeDelta::Millis(max_playout_delay_ms));
+
+ // We have a frame - Set timing and render timestamp.
+ timing_->SetJitterDelay(
+ TimeDelta::Millis(jitter_buffer_.EstimatedJitterMs()));
+ const Timestamp now = clock_->CurrentTime();
+ const int64_t now_ms = now.ms();
+ timing_->UpdateCurrentDelay(frame_timestamp);
+ render_time_ms = timing_->RenderTime(frame_timestamp, now).ms();
+ // Check render timing.
+ bool timing_error = false;
+ // Assume that render timing errors are due to changes in the video stream.
+ if (render_time_ms < 0) {
+ timing_error = true;
+ } else if (std::abs(render_time_ms - now_ms) > max_video_delay_ms_) {
+ int frame_delay = static_cast<int>(std::abs(render_time_ms - now_ms));
+ RTC_LOG(LS_WARNING)
+ << "A frame about to be decoded is out of the configured "
+ "delay bounds ("
+ << frame_delay << " > " << max_video_delay_ms_
+ << "). Resetting the video jitter buffer.";
+ timing_error = true;
+ } else if (static_cast<int>(timing_->TargetVideoDelay().ms()) >
+ max_video_delay_ms_) {
+ RTC_LOG(LS_WARNING) << "The video target delay has grown larger than "
+ << max_video_delay_ms_
+ << " ms. Resetting jitter buffer.";
+ timing_error = true;
+ }
+
+ if (timing_error) {
+ // Timing error => reset timing and flush the jitter buffer.
+ jitter_buffer_.Flush();
+ timing_->Reset();
+ return NULL;
+ }
+
+ if (prefer_late_decoding) {
+ // Decode frame as close as possible to the render timestamp.
+ const int32_t available_wait_time =
+ max_wait_time_ms -
+ static_cast<int32_t>(clock_->TimeInMilliseconds() - start_time_ms);
+ uint16_t new_max_wait_time =
+ static_cast<uint16_t>(VCM_MAX(available_wait_time, 0));
+ uint32_t wait_time_ms = rtc::saturated_cast<uint32_t>(
+ timing_
+ ->MaxWaitingTime(Timestamp::Millis(render_time_ms),
+ clock_->CurrentTime(),
+ /*too_many_frames_queued=*/false)
+ .ms());
+ if (new_max_wait_time < wait_time_ms) {
+ // We're not allowed to wait until the frame is supposed to be rendered,
+ // waiting as long as we're allowed to avoid busy looping, and then return
+ // NULL. Next call to this function might return the frame.
+ render_wait_event_->Wait(new_max_wait_time);
+ return NULL;
+ }
+ // Wait until it's time to render.
+ render_wait_event_->Wait(wait_time_ms);
+ }
+
+ // Extract the frame from the jitter buffer and set the render time.
+ VCMEncodedFrame* frame = jitter_buffer_.ExtractAndSetDecode(frame_timestamp);
+ if (frame == NULL) {
+ return NULL;
+ }
+ frame->SetRenderTime(render_time_ms);
+ TRACE_EVENT_ASYNC_STEP1("webrtc", "Video", frame->Timestamp(), "SetRenderTS",
+ "render_time", frame->RenderTimeMs());
+ return frame;
+}
+
+void VCMReceiver::ReleaseFrame(VCMEncodedFrame* frame) {
+ jitter_buffer_.ReleaseFrame(frame);
+}
+
+void VCMReceiver::SetNackSettings(size_t max_nack_list_size,
+ int max_packet_age_to_nack,
+ int max_incomplete_time_ms) {
+ jitter_buffer_.SetNackSettings(max_nack_list_size, max_packet_age_to_nack,
+ max_incomplete_time_ms);
+}
+
+std::vector<uint16_t> VCMReceiver::NackList(bool* request_key_frame) {
+ return jitter_buffer_.GetNackList(request_key_frame);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/receiver.h b/third_party/libwebrtc/modules/video_coding/receiver.h
new file mode 100644
index 0000000000..069f8c55c7
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/receiver.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_RECEIVER_H_
+#define MODULES_VIDEO_CODING_RECEIVER_H_
+
+#include <memory>
+#include <vector>
+
+#include "api/field_trials_view.h"
+#include "modules/video_coding/event_wrapper.h"
+#include "modules/video_coding/include/video_coding.h"
+#include "modules/video_coding/include/video_coding_defines.h"
+#include "modules/video_coding/jitter_buffer.h"
+#include "modules/video_coding/packet.h"
+#include "modules/video_coding/timing/timing.h"
+
+namespace webrtc {
+
+class Clock;
+class VCMEncodedFrame;
+
+class VCMReceiver {
+ public:
+ VCMReceiver(VCMTiming* timing,
+ Clock* clock,
+ const FieldTrialsView& field_trials);
+
+ // Using this constructor, you can specify a different event implemetation for
+ // the jitter buffer. Useful for unit tests when you want to simulate incoming
+ // packets, in which case the jitter buffer's wait event is different from
+ // that of VCMReceiver itself.
+ VCMReceiver(VCMTiming* timing,
+ Clock* clock,
+ std::unique_ptr<EventWrapper> receiver_event,
+ std::unique_ptr<EventWrapper> jitter_buffer_event,
+ const FieldTrialsView& field_trials);
+
+ ~VCMReceiver();
+
+ int32_t InsertPacket(const VCMPacket& packet);
+ VCMEncodedFrame* FrameForDecoding(uint16_t max_wait_time_ms,
+ bool prefer_late_decoding);
+ void ReleaseFrame(VCMEncodedFrame* frame);
+
+ // NACK.
+ void SetNackSettings(size_t max_nack_list_size,
+ int max_packet_age_to_nack,
+ int max_incomplete_time_ms);
+ std::vector<uint16_t> NackList(bool* request_key_frame);
+
+ private:
+ Clock* const clock_;
+ VCMJitterBuffer jitter_buffer_;
+ VCMTiming* timing_;
+ std::unique_ptr<EventWrapper> render_wait_event_;
+ int max_video_delay_ms_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_RECEIVER_H_
diff --git a/third_party/libwebrtc/modules/video_coding/receiver_unittest.cc b/third_party/libwebrtc/modules/video_coding/receiver_unittest.cc
new file mode 100644
index 0000000000..2beb97e972
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/receiver_unittest.cc
@@ -0,0 +1,493 @@
+/* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/receiver.h"
+
+#include <string.h>
+
+#include <cstdint>
+#include <memory>
+#include <queue>
+#include <vector>
+
+#include "modules/video_coding/encoded_frame.h"
+#include "modules/video_coding/jitter_buffer_common.h"
+#include "modules/video_coding/packet.h"
+#include "modules/video_coding/test/stream_generator.h"
+#include "modules/video_coding/timing/timing.h"
+#include "rtc_base/checks.h"
+#include "system_wrappers/include/clock.h"
+#include "test/gtest.h"
+#include "test/scoped_key_value_config.h"
+
+namespace webrtc {
+
+class TestVCMReceiver : public ::testing::Test {
+ protected:
+ TestVCMReceiver()
+ : clock_(0),
+ timing_(&clock_, field_trials_),
+ receiver_(&timing_, &clock_, field_trials_),
+ stream_generator_(0, clock_.TimeInMilliseconds()) {}
+
+ int32_t InsertPacket(int index) {
+ VCMPacket packet;
+ bool packet_available = stream_generator_.GetPacket(&packet, index);
+ EXPECT_TRUE(packet_available);
+ if (!packet_available)
+ return kGeneralError; // Return here to avoid crashes below.
+ return receiver_.InsertPacket(packet);
+ }
+
+ int32_t InsertPacketAndPop(int index) {
+ VCMPacket packet;
+ bool packet_available = stream_generator_.PopPacket(&packet, index);
+ EXPECT_TRUE(packet_available);
+ if (!packet_available)
+ return kGeneralError; // Return here to avoid crashes below.
+ return receiver_.InsertPacket(packet);
+ }
+
+ int32_t InsertFrame(VideoFrameType frame_type, bool complete) {
+ int num_of_packets = complete ? 1 : 2;
+ stream_generator_.GenerateFrame(
+ frame_type,
+ (frame_type != VideoFrameType::kEmptyFrame) ? num_of_packets : 0,
+ (frame_type == VideoFrameType::kEmptyFrame) ? 1 : 0,
+ clock_.TimeInMilliseconds());
+ int32_t ret = InsertPacketAndPop(0);
+ if (!complete) {
+ // Drop the second packet.
+ VCMPacket packet;
+ stream_generator_.PopPacket(&packet, 0);
+ }
+ clock_.AdvanceTimeMilliseconds(kDefaultFramePeriodMs);
+ return ret;
+ }
+
+ bool DecodeNextFrame() {
+ VCMEncodedFrame* frame = receiver_.FrameForDecoding(0, false);
+ if (!frame)
+ return false;
+ receiver_.ReleaseFrame(frame);
+ return true;
+ }
+
+ test::ScopedKeyValueConfig field_trials_;
+ SimulatedClock clock_;
+ VCMTiming timing_;
+ VCMReceiver receiver_;
+ StreamGenerator stream_generator_;
+};
+
+TEST_F(TestVCMReceiver, NonDecodableDuration_Empty) {
+ const size_t kMaxNackListSize = 1000;
+ const int kMaxPacketAgeToNack = 1000;
+ const int kMaxNonDecodableDuration = 500;
+ const int kMinDelayMs = 500;
+ receiver_.SetNackSettings(kMaxNackListSize, kMaxPacketAgeToNack,
+ kMaxNonDecodableDuration);
+ EXPECT_GE(InsertFrame(VideoFrameType::kVideoFrameKey, true), kNoError);
+ // Advance time until it's time to decode the key frame.
+ clock_.AdvanceTimeMilliseconds(kMinDelayMs);
+ EXPECT_TRUE(DecodeNextFrame());
+ bool request_key_frame = false;
+ std::vector<uint16_t> nack_list = receiver_.NackList(&request_key_frame);
+ EXPECT_FALSE(request_key_frame);
+}
+
+TEST_F(TestVCMReceiver, NonDecodableDuration_NoKeyFrame) {
+ const size_t kMaxNackListSize = 1000;
+ const int kMaxPacketAgeToNack = 1000;
+ const int kMaxNonDecodableDuration = 500;
+ receiver_.SetNackSettings(kMaxNackListSize, kMaxPacketAgeToNack,
+ kMaxNonDecodableDuration);
+ const int kNumFrames = kDefaultFrameRate * kMaxNonDecodableDuration / 1000;
+ for (int i = 0; i < kNumFrames; ++i) {
+ EXPECT_GE(InsertFrame(VideoFrameType::kVideoFrameDelta, true), kNoError);
+ }
+ bool request_key_frame = false;
+ std::vector<uint16_t> nack_list = receiver_.NackList(&request_key_frame);
+ EXPECT_TRUE(request_key_frame);
+}
+
+TEST_F(TestVCMReceiver, NonDecodableDuration_OneIncomplete) {
+ const size_t kMaxNackListSize = 1000;
+ const int kMaxPacketAgeToNack = 1000;
+ const int kMaxNonDecodableDuration = 500;
+ const int kMaxNonDecodableDurationFrames =
+ (kDefaultFrameRate * kMaxNonDecodableDuration + 500) / 1000;
+ const int kMinDelayMs = 500;
+ receiver_.SetNackSettings(kMaxNackListSize, kMaxPacketAgeToNack,
+ kMaxNonDecodableDuration);
+ timing_.set_min_playout_delay(TimeDelta::Millis(kMinDelayMs));
+ int64_t key_frame_inserted = clock_.TimeInMilliseconds();
+ EXPECT_GE(InsertFrame(VideoFrameType::kVideoFrameKey, true), kNoError);
+ // Insert an incomplete frame.
+ EXPECT_GE(InsertFrame(VideoFrameType::kVideoFrameDelta, false), kNoError);
+ // Insert enough frames to have too long non-decodable sequence.
+ for (int i = 0; i < kMaxNonDecodableDurationFrames; ++i) {
+ EXPECT_GE(InsertFrame(VideoFrameType::kVideoFrameDelta, true), kNoError);
+ }
+ // Advance time until it's time to decode the key frame.
+ clock_.AdvanceTimeMilliseconds(kMinDelayMs - clock_.TimeInMilliseconds() -
+ key_frame_inserted);
+ EXPECT_TRUE(DecodeNextFrame());
+ // Make sure we get a key frame request.
+ bool request_key_frame = false;
+ std::vector<uint16_t> nack_list = receiver_.NackList(&request_key_frame);
+ EXPECT_TRUE(request_key_frame);
+}
+
+TEST_F(TestVCMReceiver, NonDecodableDuration_NoTrigger) {
+ const size_t kMaxNackListSize = 1000;
+ const int kMaxPacketAgeToNack = 1000;
+ const int kMaxNonDecodableDuration = 500;
+ const int kMaxNonDecodableDurationFrames =
+ (kDefaultFrameRate * kMaxNonDecodableDuration + 500) / 1000;
+ const int kMinDelayMs = 500;
+ receiver_.SetNackSettings(kMaxNackListSize, kMaxPacketAgeToNack,
+ kMaxNonDecodableDuration);
+ timing_.set_min_playout_delay(TimeDelta::Millis(kMinDelayMs));
+ int64_t key_frame_inserted = clock_.TimeInMilliseconds();
+ EXPECT_GE(InsertFrame(VideoFrameType::kVideoFrameKey, true), kNoError);
+ // Insert an incomplete frame.
+ EXPECT_GE(InsertFrame(VideoFrameType::kVideoFrameDelta, false), kNoError);
+ // Insert all but one frame to not trigger a key frame request due to
+ // too long duration of non-decodable frames.
+ for (int i = 0; i < kMaxNonDecodableDurationFrames - 1; ++i) {
+ EXPECT_GE(InsertFrame(VideoFrameType::kVideoFrameDelta, true), kNoError);
+ }
+ // Advance time until it's time to decode the key frame.
+ clock_.AdvanceTimeMilliseconds(kMinDelayMs - clock_.TimeInMilliseconds() -
+ key_frame_inserted);
+ EXPECT_TRUE(DecodeNextFrame());
+ // Make sure we don't get a key frame request since we haven't generated
+ // enough frames.
+ bool request_key_frame = false;
+ std::vector<uint16_t> nack_list = receiver_.NackList(&request_key_frame);
+ EXPECT_FALSE(request_key_frame);
+}
+
+TEST_F(TestVCMReceiver, NonDecodableDuration_NoTrigger2) {
+ const size_t kMaxNackListSize = 1000;
+ const int kMaxPacketAgeToNack = 1000;
+ const int kMaxNonDecodableDuration = 500;
+ const int kMaxNonDecodableDurationFrames =
+ (kDefaultFrameRate * kMaxNonDecodableDuration + 500) / 1000;
+ const int kMinDelayMs = 500;
+ receiver_.SetNackSettings(kMaxNackListSize, kMaxPacketAgeToNack,
+ kMaxNonDecodableDuration);
+ timing_.set_min_playout_delay(TimeDelta::Millis(kMinDelayMs));
+ int64_t key_frame_inserted = clock_.TimeInMilliseconds();
+ EXPECT_GE(InsertFrame(VideoFrameType::kVideoFrameKey, true), kNoError);
+ // Insert enough frames to have too long non-decodable sequence, except that
+ // we don't have any losses.
+ for (int i = 0; i < kMaxNonDecodableDurationFrames; ++i) {
+ EXPECT_GE(InsertFrame(VideoFrameType::kVideoFrameDelta, true), kNoError);
+ }
+ // Insert an incomplete frame.
+ EXPECT_GE(InsertFrame(VideoFrameType::kVideoFrameDelta, false), kNoError);
+ // Advance time until it's time to decode the key frame.
+ clock_.AdvanceTimeMilliseconds(kMinDelayMs - clock_.TimeInMilliseconds() -
+ key_frame_inserted);
+ EXPECT_TRUE(DecodeNextFrame());
+ // Make sure we don't get a key frame request since the non-decodable duration
+ // is only one frame.
+ bool request_key_frame = false;
+ std::vector<uint16_t> nack_list = receiver_.NackList(&request_key_frame);
+ EXPECT_FALSE(request_key_frame);
+}
+
+TEST_F(TestVCMReceiver, NonDecodableDuration_KeyFrameAfterIncompleteFrames) {
+ const size_t kMaxNackListSize = 1000;
+ const int kMaxPacketAgeToNack = 1000;
+ const int kMaxNonDecodableDuration = 500;
+ const int kMaxNonDecodableDurationFrames =
+ (kDefaultFrameRate * kMaxNonDecodableDuration + 500) / 1000;
+ const int kMinDelayMs = 500;
+ receiver_.SetNackSettings(kMaxNackListSize, kMaxPacketAgeToNack,
+ kMaxNonDecodableDuration);
+ timing_.set_min_playout_delay(TimeDelta::Millis(kMinDelayMs));
+ int64_t key_frame_inserted = clock_.TimeInMilliseconds();
+ EXPECT_GE(InsertFrame(VideoFrameType::kVideoFrameKey, true), kNoError);
+ // Insert an incomplete frame.
+ EXPECT_GE(InsertFrame(VideoFrameType::kVideoFrameDelta, false), kNoError);
+ // Insert enough frames to have too long non-decodable sequence.
+ for (int i = 0; i < kMaxNonDecodableDurationFrames; ++i) {
+ EXPECT_GE(InsertFrame(VideoFrameType::kVideoFrameDelta, true), kNoError);
+ }
+ EXPECT_GE(InsertFrame(VideoFrameType::kVideoFrameKey, true), kNoError);
+ // Advance time until it's time to decode the key frame.
+ clock_.AdvanceTimeMilliseconds(kMinDelayMs - clock_.TimeInMilliseconds() -
+ key_frame_inserted);
+ EXPECT_TRUE(DecodeNextFrame());
+ // Make sure we don't get a key frame request since we have a key frame
+ // in the list.
+ bool request_key_frame = false;
+ std::vector<uint16_t> nack_list = receiver_.NackList(&request_key_frame);
+ EXPECT_FALSE(request_key_frame);
+}
+
+// A simulated clock, when time elapses, will insert frames into the jitter
+// buffer, based on initial settings.
+class SimulatedClockWithFrames : public SimulatedClock {
+ public:
+ SimulatedClockWithFrames(StreamGenerator* stream_generator,
+ VCMReceiver* receiver)
+ : SimulatedClock(0),
+ stream_generator_(stream_generator),
+ receiver_(receiver) {}
+ virtual ~SimulatedClockWithFrames() {}
+
+ // If `stop_on_frame` is true and next frame arrives between now and
+ // now+`milliseconds`, the clock will be advanced to the arrival time of next
+ // frame.
+ // Otherwise, the clock will be advanced by `milliseconds`.
+ //
+ // For both cases, a frame will be inserted into the jitter buffer at the
+ // instant when the clock time is timestamps_.front().arrive_time.
+ //
+ // Return true if some frame arrives between now and now+`milliseconds`.
+ bool AdvanceTimeMilliseconds(int64_t milliseconds, bool stop_on_frame) {
+ return AdvanceTimeMicroseconds(milliseconds * 1000, stop_on_frame);
+ }
+
+ bool AdvanceTimeMicroseconds(int64_t microseconds, bool stop_on_frame) {
+ int64_t start_time = TimeInMicroseconds();
+ int64_t end_time = start_time + microseconds;
+ bool frame_injected = false;
+ while (!timestamps_.empty() &&
+ timestamps_.front().arrive_time <= end_time) {
+ RTC_DCHECK_GE(timestamps_.front().arrive_time, start_time);
+
+ SimulatedClock::AdvanceTimeMicroseconds(timestamps_.front().arrive_time -
+ TimeInMicroseconds());
+ GenerateAndInsertFrame((timestamps_.front().render_time + 500) / 1000);
+ timestamps_.pop();
+ frame_injected = true;
+
+ if (stop_on_frame)
+ return frame_injected;
+ }
+
+ if (TimeInMicroseconds() < end_time) {
+ SimulatedClock::AdvanceTimeMicroseconds(end_time - TimeInMicroseconds());
+ }
+ return frame_injected;
+ }
+
+ // Input timestamps are in unit Milliseconds.
+ // And `arrive_timestamps` must be positive and in increasing order.
+ // `arrive_timestamps` determine when we are going to insert frames into the
+ // jitter buffer.
+ // `render_timestamps` are the timestamps on the frame.
+ void SetFrames(const int64_t* arrive_timestamps,
+ const int64_t* render_timestamps,
+ size_t size) {
+ int64_t previous_arrive_timestamp = 0;
+ for (size_t i = 0; i < size; i++) {
+ RTC_CHECK_GE(arrive_timestamps[i], previous_arrive_timestamp);
+ timestamps_.push(TimestampPair(arrive_timestamps[i] * 1000,
+ render_timestamps[i] * 1000));
+ previous_arrive_timestamp = arrive_timestamps[i];
+ }
+ }
+
+ private:
+ struct TimestampPair {
+ TimestampPair(int64_t arrive_timestamp, int64_t render_timestamp)
+ : arrive_time(arrive_timestamp), render_time(render_timestamp) {}
+
+ int64_t arrive_time;
+ int64_t render_time;
+ };
+
+ void GenerateAndInsertFrame(int64_t render_timestamp_ms) {
+ VCMPacket packet;
+ stream_generator_->GenerateFrame(VideoFrameType::kVideoFrameKey,
+ 1, // media packets
+ 0, // empty packets
+ render_timestamp_ms);
+
+ bool packet_available = stream_generator_->PopPacket(&packet, 0);
+ EXPECT_TRUE(packet_available);
+ if (!packet_available)
+ return; // Return here to avoid crashes below.
+ receiver_->InsertPacket(packet);
+ }
+
+ std::queue<TimestampPair> timestamps_;
+ StreamGenerator* stream_generator_;
+ VCMReceiver* receiver_;
+};
+
+// Use a SimulatedClockWithFrames
+// Wait call will do either of these:
+// 1. If `stop_on_frame` is true, the clock will be turned to the exact instant
+// that the first frame comes and the frame will be inserted into the jitter
+// buffer, or the clock will be turned to now + `max_time` if no frame comes in
+// the window.
+// 2. If `stop_on_frame` is false, the clock will be turn to now + `max_time`,
+// and all the frames arriving between now and now + `max_time` will be
+// inserted into the jitter buffer.
+//
+// This is used to simulate the JitterBuffer getting packets from internet as
+// time elapses.
+
+class FrameInjectEvent : public EventWrapper {
+ public:
+ FrameInjectEvent(SimulatedClockWithFrames* clock, bool stop_on_frame)
+ : clock_(clock), stop_on_frame_(stop_on_frame) {}
+
+ bool Set() override { return true; }
+
+ EventTypeWrapper Wait(int max_time_ms) override {
+ if (clock_->AdvanceTimeMilliseconds(max_time_ms, stop_on_frame_) &&
+ stop_on_frame_) {
+ return EventTypeWrapper::kEventSignaled;
+ } else {
+ return EventTypeWrapper::kEventTimeout;
+ }
+ }
+
+ private:
+ SimulatedClockWithFrames* clock_;
+ bool stop_on_frame_;
+};
+
+class VCMReceiverTimingTest : public ::testing::Test {
+ protected:
+ VCMReceiverTimingTest()
+ : clock_(&stream_generator_, &receiver_),
+ stream_generator_(0, clock_.TimeInMilliseconds()),
+ timing_(&clock_, field_trials_),
+ receiver_(
+ &timing_,
+ &clock_,
+ std::unique_ptr<EventWrapper>(new FrameInjectEvent(&clock_, false)),
+ std::unique_ptr<EventWrapper>(new FrameInjectEvent(&clock_, true)),
+ field_trials_) {}
+
+ virtual void SetUp() {}
+
+ test::ScopedKeyValueConfig field_trials_;
+ SimulatedClockWithFrames clock_;
+ StreamGenerator stream_generator_;
+ VCMTiming timing_;
+ VCMReceiver receiver_;
+};
+
+// Test whether VCMReceiver::FrameForDecoding handles parameter
+// `max_wait_time_ms` correctly:
+// 1. The function execution should never take more than `max_wait_time_ms`.
+// 2. If the function exit before now + `max_wait_time_ms`, a frame must be
+// returned.
+TEST_F(VCMReceiverTimingTest, FrameForDecoding) {
+ const size_t kNumFrames = 100;
+ const int kFramePeriod = 40;
+ int64_t arrive_timestamps[kNumFrames];
+ int64_t render_timestamps[kNumFrames];
+
+ // Construct test samples.
+ // render_timestamps are the timestamps stored in the Frame;
+ // arrive_timestamps controls when the Frame packet got received.
+ for (size_t i = 0; i < kNumFrames; i++) {
+ // Preset frame rate to 25Hz.
+ // But we add a reasonable deviation to arrive_timestamps to mimic Internet
+ // fluctuation.
+ arrive_timestamps[i] =
+ (i + 1) * kFramePeriod + (i % 10) * ((i % 2) ? 1 : -1);
+ render_timestamps[i] = (i + 1) * kFramePeriod;
+ }
+
+ clock_.SetFrames(arrive_timestamps, render_timestamps, kNumFrames);
+
+ // Record how many frames we finally get out of the receiver.
+ size_t num_frames_return = 0;
+
+ const int64_t kMaxWaitTime = 30;
+
+ // Ideally, we should get all frames that we input in InitializeFrames.
+ // In the case that FrameForDecoding kills frames by error, we rely on the
+ // build bot to kill the test.
+ while (num_frames_return < kNumFrames) {
+ int64_t start_time = clock_.TimeInMilliseconds();
+ VCMEncodedFrame* frame = receiver_.FrameForDecoding(kMaxWaitTime, false);
+ int64_t end_time = clock_.TimeInMilliseconds();
+
+ // In any case the FrameForDecoding should not wait longer than
+ // max_wait_time.
+ // In the case that we did not get a frame, it should have been waiting for
+ // exactly max_wait_time. (By the testing samples we constructed above, we
+ // are sure there is no timing error, so the only case it returns with NULL
+ // is that it runs out of time.)
+ if (frame) {
+ receiver_.ReleaseFrame(frame);
+ ++num_frames_return;
+ EXPECT_GE(kMaxWaitTime, end_time - start_time);
+ } else {
+ EXPECT_EQ(kMaxWaitTime, end_time - start_time);
+ }
+ }
+}
+
+// Test whether VCMReceiver::FrameForDecoding handles parameter
+// `prefer_late_decoding` and `max_wait_time_ms` correctly:
+// 1. The function execution should never take more than `max_wait_time_ms`.
+// 2. If the function exit before now + `max_wait_time_ms`, a frame must be
+// returned and the end time must be equal to the render timestamp - delay
+// for decoding and rendering.
+TEST_F(VCMReceiverTimingTest, FrameForDecodingPreferLateDecoding) {
+ const size_t kNumFrames = 100;
+ const int kFramePeriod = 40;
+
+ int64_t arrive_timestamps[kNumFrames];
+ int64_t render_timestamps[kNumFrames];
+
+ auto timings = timing_.GetTimings();
+ TimeDelta render_delay = timings.render_delay;
+ TimeDelta max_decode = timings.max_decode_duration;
+
+ // Construct test samples.
+ // render_timestamps are the timestamps stored in the Frame;
+ // arrive_timestamps controls when the Frame packet got received.
+ for (size_t i = 0; i < kNumFrames; i++) {
+ // Preset frame rate to 25Hz.
+ // But we add a reasonable deviation to arrive_timestamps to mimic Internet
+ // fluctuation.
+ arrive_timestamps[i] =
+ (i + 1) * kFramePeriod + (i % 10) * ((i % 2) ? 1 : -1);
+ render_timestamps[i] = (i + 1) * kFramePeriod;
+ }
+
+ clock_.SetFrames(arrive_timestamps, render_timestamps, kNumFrames);
+
+ // Record how many frames we finally get out of the receiver.
+ size_t num_frames_return = 0;
+ const int64_t kMaxWaitTime = 30;
+ bool prefer_late_decoding = true;
+ while (num_frames_return < kNumFrames) {
+ int64_t start_time = clock_.TimeInMilliseconds();
+
+ VCMEncodedFrame* frame =
+ receiver_.FrameForDecoding(kMaxWaitTime, prefer_late_decoding);
+ int64_t end_time = clock_.TimeInMilliseconds();
+ if (frame) {
+ EXPECT_EQ(frame->RenderTimeMs() - max_decode.ms() - render_delay.ms(),
+ end_time);
+ receiver_.ReleaseFrame(frame);
+ ++num_frames_return;
+ } else {
+ EXPECT_EQ(kMaxWaitTime, end_time - start_time);
+ }
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/rtp_frame_id_only_ref_finder.cc b/third_party/libwebrtc/modules/video_coding/rtp_frame_id_only_ref_finder.cc
new file mode 100644
index 0000000000..9f3d5bb296
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/rtp_frame_id_only_ref_finder.cc
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/rtp_frame_id_only_ref_finder.h"
+
+#include <utility>
+
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+
+RtpFrameReferenceFinder::ReturnVector RtpFrameIdOnlyRefFinder::ManageFrame(
+ std::unique_ptr<RtpFrameObject> frame,
+ int frame_id) {
+ frame->SetSpatialIndex(0);
+ frame->SetId(unwrapper_.Unwrap(frame_id & (kFrameIdLength - 1)));
+ frame->num_references =
+ frame->frame_type() == VideoFrameType::kVideoFrameKey ? 0 : 1;
+ frame->references[0] = frame->Id() - 1;
+
+ RtpFrameReferenceFinder::ReturnVector res;
+ res.push_back(std::move(frame));
+ return res;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/rtp_frame_id_only_ref_finder.h b/third_party/libwebrtc/modules/video_coding/rtp_frame_id_only_ref_finder.h
new file mode 100644
index 0000000000..5c0bdc8b26
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/rtp_frame_id_only_ref_finder.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_RTP_FRAME_ID_ONLY_REF_FINDER_H_
+#define MODULES_VIDEO_CODING_RTP_FRAME_ID_ONLY_REF_FINDER_H_
+
+#include <memory>
+
+#include "absl/container/inlined_vector.h"
+#include "modules/video_coding/frame_object.h"
+#include "modules/video_coding/rtp_frame_reference_finder.h"
+#include "rtc_base/numerics/sequence_number_unwrapper.h"
+
+namespace webrtc {
+
+class RtpFrameIdOnlyRefFinder {
+ public:
+ RtpFrameIdOnlyRefFinder() = default;
+
+ RtpFrameReferenceFinder::ReturnVector ManageFrame(
+ std::unique_ptr<RtpFrameObject> frame,
+ int frame_id);
+
+ private:
+ static constexpr int kFrameIdLength = 1 << 15;
+ SeqNumUnwrapper<uint16_t, kFrameIdLength> unwrapper_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_RTP_FRAME_ID_ONLY_REF_FINDER_H_
diff --git a/third_party/libwebrtc/modules/video_coding/rtp_frame_reference_finder.cc b/third_party/libwebrtc/modules/video_coding/rtp_frame_reference_finder.cc
new file mode 100644
index 0000000000..a44b76bf15
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/rtp_frame_reference_finder.cc
@@ -0,0 +1,189 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/rtp_frame_reference_finder.h"
+
+#include <utility>
+
+#include "absl/types/variant.h"
+#include "modules/video_coding/frame_object.h"
+#include "modules/video_coding/rtp_frame_id_only_ref_finder.h"
+#include "modules/video_coding/rtp_generic_ref_finder.h"
+#include "modules/video_coding/rtp_seq_num_only_ref_finder.h"
+#include "modules/video_coding/rtp_vp8_ref_finder.h"
+#include "modules/video_coding/rtp_vp9_ref_finder.h"
+
+namespace webrtc {
+namespace internal {
+class RtpFrameReferenceFinderImpl {
+ public:
+ RtpFrameReferenceFinderImpl() = default;
+
+ RtpFrameReferenceFinder::ReturnVector ManageFrame(
+ std::unique_ptr<RtpFrameObject> frame);
+ RtpFrameReferenceFinder::ReturnVector PaddingReceived(uint16_t seq_num);
+ void ClearTo(uint16_t seq_num);
+
+ private:
+ using RefFinder = absl::variant<absl::monostate,
+ RtpGenericFrameRefFinder,
+ RtpFrameIdOnlyRefFinder,
+ RtpSeqNumOnlyRefFinder,
+ RtpVp8RefFinder,
+ RtpVp9RefFinder>;
+
+ template <typename T>
+ T& GetRefFinderAs();
+ RefFinder ref_finder_;
+};
+
+RtpFrameReferenceFinder::ReturnVector RtpFrameReferenceFinderImpl::ManageFrame(
+ std::unique_ptr<RtpFrameObject> frame) {
+ const RTPVideoHeader& video_header = frame->GetRtpVideoHeader();
+
+ if (video_header.generic.has_value()) {
+ return GetRefFinderAs<RtpGenericFrameRefFinder>().ManageFrame(
+ std::move(frame), *video_header.generic);
+ }
+
+ switch (frame->codec_type()) {
+ case kVideoCodecVP8: {
+ const RTPVideoHeaderVP8& vp8_header =
+ absl::get<RTPVideoHeaderVP8>(video_header.video_type_header);
+
+ if (vp8_header.temporalIdx == kNoTemporalIdx ||
+ vp8_header.tl0PicIdx == kNoTl0PicIdx) {
+ if (vp8_header.pictureId == kNoPictureId) {
+ return GetRefFinderAs<RtpSeqNumOnlyRefFinder>().ManageFrame(
+ std::move(frame));
+ }
+
+ return GetRefFinderAs<RtpFrameIdOnlyRefFinder>().ManageFrame(
+ std::move(frame), vp8_header.pictureId);
+ }
+
+ return GetRefFinderAs<RtpVp8RefFinder>().ManageFrame(std::move(frame));
+ }
+ case kVideoCodecVP9: {
+ const RTPVideoHeaderVP9& vp9_header =
+ absl::get<RTPVideoHeaderVP9>(video_header.video_type_header);
+
+ if (vp9_header.temporal_idx == kNoTemporalIdx) {
+ if (vp9_header.picture_id == kNoPictureId) {
+ return GetRefFinderAs<RtpSeqNumOnlyRefFinder>().ManageFrame(
+ std::move(frame));
+ }
+
+ return GetRefFinderAs<RtpFrameIdOnlyRefFinder>().ManageFrame(
+ std::move(frame), vp9_header.picture_id);
+ }
+
+ return GetRefFinderAs<RtpVp9RefFinder>().ManageFrame(std::move(frame));
+ }
+ case kVideoCodecGeneric: {
+ if (auto* generic_header = absl::get_if<RTPVideoHeaderLegacyGeneric>(
+ &video_header.video_type_header)) {
+ return GetRefFinderAs<RtpFrameIdOnlyRefFinder>().ManageFrame(
+ std::move(frame), generic_header->picture_id);
+ }
+
+ return GetRefFinderAs<RtpSeqNumOnlyRefFinder>().ManageFrame(
+ std::move(frame));
+ }
+ default: {
+ return GetRefFinderAs<RtpSeqNumOnlyRefFinder>().ManageFrame(
+ std::move(frame));
+ }
+ }
+}
+
+RtpFrameReferenceFinder::ReturnVector
+RtpFrameReferenceFinderImpl::PaddingReceived(uint16_t seq_num) {
+ if (auto* ref_finder = absl::get_if<RtpSeqNumOnlyRefFinder>(&ref_finder_)) {
+ return ref_finder->PaddingReceived(seq_num);
+ }
+ return {};
+}
+
+void RtpFrameReferenceFinderImpl::ClearTo(uint16_t seq_num) {
+ struct ClearToVisitor {
+ void operator()(absl::monostate& ref_finder) {}
+ void operator()(RtpGenericFrameRefFinder& ref_finder) {}
+ void operator()(RtpFrameIdOnlyRefFinder& ref_finder) {}
+ void operator()(RtpSeqNumOnlyRefFinder& ref_finder) {
+ ref_finder.ClearTo(seq_num);
+ }
+ void operator()(RtpVp8RefFinder& ref_finder) {
+ ref_finder.ClearTo(seq_num);
+ }
+ void operator()(RtpVp9RefFinder& ref_finder) {
+ ref_finder.ClearTo(seq_num);
+ }
+ uint16_t seq_num;
+ };
+
+ absl::visit(ClearToVisitor{seq_num}, ref_finder_);
+}
+
+template <typename T>
+T& RtpFrameReferenceFinderImpl::GetRefFinderAs() {
+ if (auto* ref_finder = absl::get_if<T>(&ref_finder_)) {
+ return *ref_finder;
+ }
+ return ref_finder_.emplace<T>();
+}
+
+} // namespace internal
+
+RtpFrameReferenceFinder::RtpFrameReferenceFinder()
+ : RtpFrameReferenceFinder(0) {}
+
+RtpFrameReferenceFinder::RtpFrameReferenceFinder(
+ int64_t picture_id_offset)
+ : picture_id_offset_(picture_id_offset),
+ impl_(std::make_unique<internal::RtpFrameReferenceFinderImpl>()) {}
+
+RtpFrameReferenceFinder::~RtpFrameReferenceFinder() = default;
+
+RtpFrameReferenceFinder::ReturnVector RtpFrameReferenceFinder::ManageFrame(
+ std::unique_ptr<RtpFrameObject> frame) {
+ // If we have cleared past this frame, drop it.
+ if (cleared_to_seq_num_ != -1 &&
+ AheadOf<uint16_t>(cleared_to_seq_num_, frame->first_seq_num())) {
+ return {};
+ }
+
+ auto frames = impl_->ManageFrame(std::move(frame));
+ AddPictureIdOffset(frames);
+ return frames;
+}
+
+RtpFrameReferenceFinder::ReturnVector RtpFrameReferenceFinder::PaddingReceived(
+ uint16_t seq_num) {
+ auto frames = impl_->PaddingReceived(seq_num);
+ AddPictureIdOffset(frames);
+ return frames;
+}
+
+void RtpFrameReferenceFinder::ClearTo(uint16_t seq_num) {
+ cleared_to_seq_num_ = seq_num;
+ impl_->ClearTo(seq_num);
+}
+
+void RtpFrameReferenceFinder::AddPictureIdOffset(ReturnVector& frames) {
+ for (auto& frame : frames) {
+ frame->SetId(frame->Id() + picture_id_offset_);
+ for (size_t i = 0; i < frame->num_references; ++i) {
+ frame->references[i] += picture_id_offset_;
+ }
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/rtp_frame_reference_finder.h b/third_party/libwebrtc/modules/video_coding/rtp_frame_reference_finder.h
new file mode 100644
index 0000000000..9ce63cd8a4
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/rtp_frame_reference_finder.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_RTP_FRAME_REFERENCE_FINDER_H_
+#define MODULES_VIDEO_CODING_RTP_FRAME_REFERENCE_FINDER_H_
+
+#include <memory>
+
+#include "modules/video_coding/frame_object.h"
+
+namespace webrtc {
+namespace internal {
+class RtpFrameReferenceFinderImpl;
+} // namespace internal
+
+class RtpFrameReferenceFinder {
+ public:
+ using ReturnVector = absl::InlinedVector<std::unique_ptr<RtpFrameObject>, 3>;
+
+ RtpFrameReferenceFinder();
+ explicit RtpFrameReferenceFinder(int64_t picture_id_offset);
+ ~RtpFrameReferenceFinder();
+
+ // The RtpFrameReferenceFinder will hold onto the frame until:
+ // - the required information to determine its references has been received,
+ // in which case it (and possibly other) frames are returned, or
+ // - There are too many stashed frames (determined by `kMaxStashedFrames`),
+ // in which case it gets dropped, or
+ // - It gets cleared by ClearTo, in which case its dropped.
+ // - The frame is old, in which case it also gets dropped.
+ ReturnVector ManageFrame(std::unique_ptr<RtpFrameObject> frame);
+
+ // Notifies that padding has been received, which the reference finder
+ // might need to calculate the references of a frame.
+ ReturnVector PaddingReceived(uint16_t seq_num);
+
+ // Clear all stashed frames that include packets older than `seq_num`.
+ void ClearTo(uint16_t seq_num);
+
+ private:
+ void AddPictureIdOffset(ReturnVector& frames);
+
+ // How far frames have been cleared out of the buffer by RTP sequence number.
+ // A frame will be cleared if it contains a packet with a sequence number
+ // older than `cleared_to_seq_num_`.
+ int cleared_to_seq_num_ = -1;
+ const int64_t picture_id_offset_;
+ std::unique_ptr<internal::RtpFrameReferenceFinderImpl> impl_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_RTP_FRAME_REFERENCE_FINDER_H_
diff --git a/third_party/libwebrtc/modules/video_coding/rtp_frame_reference_finder_unittest.cc b/third_party/libwebrtc/modules/video_coding/rtp_frame_reference_finder_unittest.cc
new file mode 100644
index 0000000000..c58f1a987d
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/rtp_frame_reference_finder_unittest.cc
@@ -0,0 +1,322 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <cstring>
+#include <limits>
+#include <map>
+#include <set>
+#include <utility>
+#include <vector>
+
+#include "modules/video_coding/frame_object.h"
+#include "modules/video_coding/packet_buffer.h"
+#include "modules/video_coding/rtp_frame_reference_finder.h"
+#include "rtc_base/random.h"
+#include "rtc_base/ref_count.h"
+#include "system_wrappers/include/clock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+namespace {
+std::unique_ptr<RtpFrameObject> CreateFrame(
+ uint16_t seq_num_start,
+ uint16_t seq_num_end,
+ bool keyframe,
+ VideoCodecType codec,
+ const RTPVideoTypeHeader& video_type_header) {
+ RTPVideoHeader video_header;
+ video_header.frame_type = keyframe ? VideoFrameType::kVideoFrameKey
+ : VideoFrameType::kVideoFrameDelta;
+ video_header.video_type_header = video_type_header;
+
+ // clang-format off
+ return std::make_unique<RtpFrameObject>(
+ seq_num_start,
+ seq_num_end,
+ /*markerBit=*/true,
+ /*times_nacked=*/0,
+ /*first_packet_received_time=*/0,
+ /*last_packet_received_time=*/0,
+ /*rtp_timestamp=*/0,
+ /*ntp_time_ms=*/0,
+ VideoSendTiming(),
+ /*payload_type=*/0,
+ codec,
+ kVideoRotation_0,
+ VideoContentType::UNSPECIFIED,
+ video_header,
+ /*color_space=*/absl::nullopt,
+ RtpPacketInfos(),
+ EncodedImageBuffer::Create(/*size=*/0));
+ // clang-format on
+}
+} // namespace
+
+class TestRtpFrameReferenceFinder : public ::testing::Test {
+ protected:
+ TestRtpFrameReferenceFinder()
+ : rand_(0x8739211),
+ reference_finder_(std::make_unique<RtpFrameReferenceFinder>()),
+ frames_from_callback_(FrameComp()) {}
+
+ uint16_t Rand() { return rand_.Rand<uint16_t>(); }
+
+ void OnCompleteFrames(RtpFrameReferenceFinder::ReturnVector frames) {
+ for (auto& frame : frames) {
+ int64_t pid = frame->Id();
+ uint16_t sidx = *frame->SpatialIndex();
+ auto frame_it = frames_from_callback_.find(std::make_pair(pid, sidx));
+ if (frame_it != frames_from_callback_.end()) {
+ ADD_FAILURE() << "Already received frame with (pid:sidx): (" << pid
+ << ":" << sidx << ")";
+ return;
+ }
+
+ frames_from_callback_.insert(
+ std::make_pair(std::make_pair(pid, sidx), std::move(frame)));
+ }
+ }
+
+ void InsertGeneric(uint16_t seq_num_start,
+ uint16_t seq_num_end,
+ bool keyframe) {
+ std::unique_ptr<RtpFrameObject> frame =
+ CreateFrame(seq_num_start, seq_num_end, keyframe, kVideoCodecGeneric,
+ RTPVideoTypeHeader());
+
+ OnCompleteFrames(reference_finder_->ManageFrame(std::move(frame)));
+ }
+
+ void InsertH264(uint16_t seq_num_start, uint16_t seq_num_end, bool keyframe) {
+ std::unique_ptr<RtpFrameObject> frame =
+ CreateFrame(seq_num_start, seq_num_end, keyframe, kVideoCodecH264,
+ RTPVideoTypeHeader());
+ OnCompleteFrames(reference_finder_->ManageFrame(std::move(frame)));
+ }
+
+ void InsertPadding(uint16_t seq_num) {
+ OnCompleteFrames(reference_finder_->PaddingReceived(seq_num));
+ }
+
+ // Check if a frame with picture id `pid` and spatial index `sidx` has been
+ // delivered from the packet buffer, and if so, if it has the references
+ // specified by `refs`.
+ template <typename... T>
+ void CheckReferences(int64_t picture_id_offset,
+ uint16_t sidx,
+ T... refs) const {
+ int64_t pid = picture_id_offset;
+ auto frame_it = frames_from_callback_.find(std::make_pair(pid, sidx));
+ if (frame_it == frames_from_callback_.end()) {
+ ADD_FAILURE() << "Could not find frame with (pid:sidx): (" << pid << ":"
+ << sidx << ")";
+ return;
+ }
+
+ std::set<int64_t> actual_refs;
+ for (uint8_t r = 0; r < frame_it->second->num_references; ++r)
+ actual_refs.insert(frame_it->second->references[r]);
+
+ std::set<int64_t> expected_refs;
+ RefsToSet(&expected_refs, refs...);
+
+ ASSERT_EQ(expected_refs, actual_refs);
+ }
+
+ template <typename... T>
+ void CheckReferencesGeneric(int64_t pid, T... refs) const {
+ CheckReferences(pid, 0, refs...);
+ }
+
+ template <typename... T>
+ void CheckReferencesH264(int64_t pid, T... refs) const {
+ CheckReferences(pid, 0, refs...);
+ }
+
+ template <typename... T>
+ void RefsToSet(std::set<int64_t>* m, int64_t ref, T... refs) const {
+ m->insert(ref);
+ RefsToSet(m, refs...);
+ }
+
+ void RefsToSet(std::set<int64_t>* m) const {}
+
+ Random rand_;
+ std::unique_ptr<RtpFrameReferenceFinder> reference_finder_;
+ struct FrameComp {
+ bool operator()(const std::pair<int64_t, uint8_t> f1,
+ const std::pair<int64_t, uint8_t> f2) const {
+ if (f1.first == f2.first)
+ return f1.second < f2.second;
+ return f1.first < f2.first;
+ }
+ };
+ std::
+ map<std::pair<int64_t, uint8_t>, std::unique_ptr<EncodedFrame>, FrameComp>
+ frames_from_callback_;
+};
+
+TEST_F(TestRtpFrameReferenceFinder, PaddingPackets) {
+ uint16_t sn = Rand();
+
+ InsertGeneric(sn, sn, true);
+ InsertGeneric(sn + 2, sn + 2, false);
+ EXPECT_EQ(1UL, frames_from_callback_.size());
+ InsertPadding(sn + 1);
+ EXPECT_EQ(2UL, frames_from_callback_.size());
+}
+
+TEST_F(TestRtpFrameReferenceFinder, PaddingPacketsReordered) {
+ uint16_t sn = Rand();
+
+ InsertGeneric(sn, sn, true);
+ InsertPadding(sn + 1);
+ InsertPadding(sn + 4);
+ InsertGeneric(sn + 2, sn + 3, false);
+
+ EXPECT_EQ(2UL, frames_from_callback_.size());
+ CheckReferencesGeneric(sn);
+ CheckReferencesGeneric(sn + 3, sn + 0);
+}
+
+TEST_F(TestRtpFrameReferenceFinder, PaddingPacketsReorderedMultipleKeyframes) {
+ uint16_t sn = Rand();
+
+ InsertGeneric(sn, sn, true);
+ InsertPadding(sn + 1);
+ InsertPadding(sn + 4);
+ InsertGeneric(sn + 2, sn + 3, false);
+ InsertGeneric(sn + 5, sn + 5, true);
+ InsertPadding(sn + 6);
+ InsertPadding(sn + 9);
+ InsertGeneric(sn + 7, sn + 8, false);
+
+ EXPECT_EQ(4UL, frames_from_callback_.size());
+}
+
+TEST_F(TestRtpFrameReferenceFinder, AdvanceSavedKeyframe) {
+ uint16_t sn = Rand();
+
+ InsertGeneric(sn, sn, true);
+ InsertGeneric(sn + 1, sn + 1, true);
+ InsertGeneric(sn + 2, sn + 10000, false);
+ InsertGeneric(sn + 10001, sn + 20000, false);
+ InsertGeneric(sn + 20001, sn + 30000, false);
+ InsertGeneric(sn + 30001, sn + 40000, false);
+
+ EXPECT_EQ(6UL, frames_from_callback_.size());
+}
+
+TEST_F(TestRtpFrameReferenceFinder, ClearTo) {
+ uint16_t sn = Rand();
+
+ InsertGeneric(sn, sn + 1, true);
+ InsertGeneric(sn + 4, sn + 5, false); // stashed
+ EXPECT_EQ(1UL, frames_from_callback_.size());
+
+ InsertGeneric(sn + 6, sn + 7, true); // keyframe
+ EXPECT_EQ(2UL, frames_from_callback_.size());
+ reference_finder_->ClearTo(sn + 7);
+
+ InsertGeneric(sn + 8, sn + 9, false); // first frame after keyframe.
+ EXPECT_EQ(3UL, frames_from_callback_.size());
+
+ InsertGeneric(sn + 2, sn + 3, false); // late, cleared past this frame.
+ EXPECT_EQ(3UL, frames_from_callback_.size());
+}
+
+TEST_F(TestRtpFrameReferenceFinder, H264KeyFrameReferences) {
+ uint16_t sn = Rand();
+ InsertH264(sn, sn, true);
+
+ ASSERT_EQ(1UL, frames_from_callback_.size());
+ CheckReferencesH264(sn);
+}
+
+TEST_F(TestRtpFrameReferenceFinder, H264SequenceNumberWrap) {
+ uint16_t sn = 0xFFFF;
+
+ InsertH264(sn - 1, sn - 1, true);
+ InsertH264(sn, sn, false);
+ InsertH264(sn + 1, sn + 1, false);
+ InsertH264(sn + 2, sn + 2, false);
+
+ ASSERT_EQ(4UL, frames_from_callback_.size());
+ CheckReferencesH264(sn - 1);
+ CheckReferencesH264(sn, sn - 1);
+ CheckReferencesH264(sn + 1, sn);
+ CheckReferencesH264(sn + 2, sn + 1);
+}
+
+TEST_F(TestRtpFrameReferenceFinder, H264Frames) {
+ uint16_t sn = Rand();
+
+ InsertH264(sn, sn, true);
+ InsertH264(sn + 1, sn + 1, false);
+ InsertH264(sn + 2, sn + 2, false);
+ InsertH264(sn + 3, sn + 3, false);
+
+ ASSERT_EQ(4UL, frames_from_callback_.size());
+ CheckReferencesH264(sn);
+ CheckReferencesH264(sn + 1, sn);
+ CheckReferencesH264(sn + 2, sn + 1);
+ CheckReferencesH264(sn + 3, sn + 2);
+}
+
+TEST_F(TestRtpFrameReferenceFinder, H264Reordering) {
+ uint16_t sn = Rand();
+
+ InsertH264(sn, sn, true);
+ InsertH264(sn + 1, sn + 1, false);
+ InsertH264(sn + 3, sn + 3, false);
+ InsertH264(sn + 2, sn + 2, false);
+ InsertH264(sn + 5, sn + 5, false);
+ InsertH264(sn + 6, sn + 6, false);
+ InsertH264(sn + 4, sn + 4, false);
+
+ ASSERT_EQ(7UL, frames_from_callback_.size());
+ CheckReferencesH264(sn);
+ CheckReferencesH264(sn + 1, sn);
+ CheckReferencesH264(sn + 2, sn + 1);
+ CheckReferencesH264(sn + 3, sn + 2);
+ CheckReferencesH264(sn + 4, sn + 3);
+ CheckReferencesH264(sn + 5, sn + 4);
+ CheckReferencesH264(sn + 6, sn + 5);
+}
+
+TEST_F(TestRtpFrameReferenceFinder, H264SequenceNumberWrapMulti) {
+ uint16_t sn = 0xFFFF;
+
+ InsertH264(sn - 3, sn - 2, true);
+ InsertH264(sn - 1, sn + 1, false);
+ InsertH264(sn + 2, sn + 3, false);
+ InsertH264(sn + 4, sn + 7, false);
+
+ ASSERT_EQ(4UL, frames_from_callback_.size());
+ CheckReferencesH264(sn - 2);
+ CheckReferencesH264(sn + 1, sn - 2);
+ CheckReferencesH264(sn + 3, sn + 1);
+ CheckReferencesH264(sn + 7, sn + 3);
+}
+
+TEST_F(TestRtpFrameReferenceFinder, Av1FrameNoDependencyDescriptor) {
+ uint16_t sn = 0xFFFF;
+ std::unique_ptr<RtpFrameObject> frame =
+ CreateFrame(/*seq_num_start=*/sn, /*seq_num_end=*/sn, /*keyframe=*/true,
+ kVideoCodecAV1, RTPVideoTypeHeader());
+
+ OnCompleteFrames(reference_finder_->ManageFrame(std::move(frame)));
+
+ ASSERT_EQ(1UL, frames_from_callback_.size());
+ CheckReferencesGeneric(sn);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/rtp_generic_ref_finder.cc b/third_party/libwebrtc/modules/video_coding/rtp_generic_ref_finder.cc
new file mode 100644
index 0000000000..fd5b8afda1
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/rtp_generic_ref_finder.cc
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/rtp_generic_ref_finder.h"
+
+#include <utility>
+
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+
+RtpFrameReferenceFinder::ReturnVector RtpGenericFrameRefFinder::ManageFrame(
+ std::unique_ptr<RtpFrameObject> frame,
+ const RTPVideoHeader::GenericDescriptorInfo& descriptor) {
+ // Frame IDs are unwrapped in the RtpVideoStreamReceiver, no need to unwrap
+ // them here.
+ frame->SetId(descriptor.frame_id);
+ frame->SetSpatialIndex(descriptor.spatial_index);
+ if (descriptor.temporal_index != kNoTemporalIdx)
+ frame->SetTemporalIndex(descriptor.temporal_index);
+
+ RtpFrameReferenceFinder::ReturnVector res;
+ if (EncodedFrame::kMaxFrameReferences < descriptor.dependencies.size()) {
+ RTC_LOG(LS_WARNING) << "Too many dependencies in generic descriptor.";
+ return res;
+ }
+
+ frame->num_references = descriptor.dependencies.size();
+ for (size_t i = 0; i < descriptor.dependencies.size(); ++i) {
+ frame->references[i] = descriptor.dependencies[i];
+ }
+
+ res.push_back(std::move(frame));
+ return res;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/rtp_generic_ref_finder.h b/third_party/libwebrtc/modules/video_coding/rtp_generic_ref_finder.h
new file mode 100644
index 0000000000..87d7b59406
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/rtp_generic_ref_finder.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_RTP_GENERIC_REF_FINDER_H_
+#define MODULES_VIDEO_CODING_RTP_GENERIC_REF_FINDER_H_
+
+#include <memory>
+
+#include "modules/video_coding/frame_object.h"
+#include "modules/video_coding/rtp_frame_reference_finder.h"
+
+namespace webrtc {
+
+class RtpGenericFrameRefFinder {
+ public:
+ RtpGenericFrameRefFinder() = default;
+
+ RtpFrameReferenceFinder::ReturnVector ManageFrame(
+ std::unique_ptr<RtpFrameObject> frame,
+ const RTPVideoHeader::GenericDescriptorInfo& descriptor);
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_RTP_GENERIC_REF_FINDER_H_
diff --git a/third_party/libwebrtc/modules/video_coding/rtp_seq_num_only_ref_finder.cc b/third_party/libwebrtc/modules/video_coding/rtp_seq_num_only_ref_finder.cc
new file mode 100644
index 0000000000..59b027e2ce
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/rtp_seq_num_only_ref_finder.cc
@@ -0,0 +1,186 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/rtp_seq_num_only_ref_finder.h"
+
+#include <utility>
+
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+
+RtpFrameReferenceFinder::ReturnVector RtpSeqNumOnlyRefFinder::ManageFrame(
+ std::unique_ptr<RtpFrameObject> frame) {
+ FrameDecision decision = ManageFrameInternal(frame.get());
+
+ RtpFrameReferenceFinder::ReturnVector res;
+ switch (decision) {
+ case kStash:
+ if (stashed_frames_.size() > kMaxStashedFrames)
+ stashed_frames_.pop_back();
+ stashed_frames_.push_front(std::move(frame));
+ return res;
+ case kHandOff:
+ res.push_back(std::move(frame));
+ RetryStashedFrames(res);
+ return res;
+ case kDrop:
+ return res;
+ }
+
+ return res;
+}
+
+RtpSeqNumOnlyRefFinder::FrameDecision
+RtpSeqNumOnlyRefFinder::ManageFrameInternal(RtpFrameObject* frame) {
+ if (frame->frame_type() == VideoFrameType::kVideoFrameKey) {
+ last_seq_num_gop_.insert(std::make_pair(
+ frame->last_seq_num(),
+ std::make_pair(frame->last_seq_num(), frame->last_seq_num())));
+ }
+
+ // We have received a frame but not yet a keyframe, stash this frame.
+ if (last_seq_num_gop_.empty())
+ return kStash;
+
+ // Clean up info for old keyframes but make sure to keep info
+ // for the last keyframe.
+ auto clean_to = last_seq_num_gop_.lower_bound(frame->last_seq_num() - 100);
+ for (auto it = last_seq_num_gop_.begin();
+ it != clean_to && last_seq_num_gop_.size() > 1;) {
+ it = last_seq_num_gop_.erase(it);
+ }
+
+ // Find the last sequence number of the last frame for the keyframe
+ // that this frame indirectly references.
+ auto seq_num_it = last_seq_num_gop_.upper_bound(frame->last_seq_num());
+ if (seq_num_it == last_seq_num_gop_.begin()) {
+ RTC_LOG(LS_WARNING) << "Generic frame with packet range ["
+ << frame->first_seq_num() << ", "
+ << frame->last_seq_num()
+ << "] has no GoP, dropping frame.";
+ return kDrop;
+ }
+ seq_num_it--;
+
+ // Make sure the packet sequence numbers are continuous, otherwise stash
+ // this frame.
+ uint16_t last_picture_id_gop = seq_num_it->second.first;
+ uint16_t last_picture_id_with_padding_gop = seq_num_it->second.second;
+ if (frame->frame_type() == VideoFrameType::kVideoFrameDelta) {
+ uint16_t prev_seq_num = frame->first_seq_num() - 1;
+
+ if (prev_seq_num != last_picture_id_with_padding_gop)
+ return kStash;
+ }
+
+ RTC_DCHECK(AheadOrAt(frame->last_seq_num(), seq_num_it->first));
+
+ // Since keyframes can cause reordering we can't simply assign the
+ // picture id according to some incrementing counter.
+ frame->SetId(frame->last_seq_num());
+ frame->num_references =
+ frame->frame_type() == VideoFrameType::kVideoFrameDelta;
+ frame->references[0] = rtp_seq_num_unwrapper_.Unwrap(last_picture_id_gop);
+ if (AheadOf<uint16_t>(frame->Id(), last_picture_id_gop)) {
+ seq_num_it->second.first = frame->Id();
+ seq_num_it->second.second = frame->Id();
+ }
+
+ UpdateLastPictureIdWithPadding(frame->Id());
+ frame->SetSpatialIndex(0);
+ frame->SetId(rtp_seq_num_unwrapper_.Unwrap(frame->Id()));
+ return kHandOff;
+}
+
+void RtpSeqNumOnlyRefFinder::RetryStashedFrames(
+ RtpFrameReferenceFinder::ReturnVector& res) {
+ bool complete_frame = false;
+ do {
+ complete_frame = false;
+ for (auto frame_it = stashed_frames_.begin();
+ frame_it != stashed_frames_.end();) {
+ FrameDecision decision = ManageFrameInternal(frame_it->get());
+
+ switch (decision) {
+ case kStash:
+ ++frame_it;
+ break;
+ case kHandOff:
+ complete_frame = true;
+ res.push_back(std::move(*frame_it));
+ [[fallthrough]];
+ case kDrop:
+ frame_it = stashed_frames_.erase(frame_it);
+ }
+ }
+ } while (complete_frame);
+}
+
+void RtpSeqNumOnlyRefFinder::UpdateLastPictureIdWithPadding(uint16_t seq_num) {
+ auto gop_seq_num_it = last_seq_num_gop_.upper_bound(seq_num);
+
+ // If this padding packet "belongs" to a group of pictures that we don't track
+ // anymore, do nothing.
+ if (gop_seq_num_it == last_seq_num_gop_.begin())
+ return;
+ --gop_seq_num_it;
+
+ // Calculate the next contiuous sequence number and search for it in
+ // the padding packets we have stashed.
+ uint16_t next_seq_num_with_padding = gop_seq_num_it->second.second + 1;
+ auto padding_seq_num_it =
+ stashed_padding_.lower_bound(next_seq_num_with_padding);
+
+ // While there still are padding packets and those padding packets are
+ // continuous, then advance the "last-picture-id-with-padding" and remove
+ // the stashed padding packet.
+ while (padding_seq_num_it != stashed_padding_.end() &&
+ *padding_seq_num_it == next_seq_num_with_padding) {
+ gop_seq_num_it->second.second = next_seq_num_with_padding;
+ ++next_seq_num_with_padding;
+ padding_seq_num_it = stashed_padding_.erase(padding_seq_num_it);
+ }
+
+ // In the case where the stream has been continuous without any new keyframes
+ // for a while there is a risk that new frames will appear to be older than
+ // the keyframe they belong to due to wrapping sequence number. In order
+ // to prevent this we advance the picture id of the keyframe every so often.
+ if (ForwardDiff(gop_seq_num_it->first, seq_num) > 10000) {
+ auto save = gop_seq_num_it->second;
+ last_seq_num_gop_.clear();
+ last_seq_num_gop_[seq_num] = save;
+ }
+}
+
+RtpFrameReferenceFinder::ReturnVector RtpSeqNumOnlyRefFinder::PaddingReceived(
+ uint16_t seq_num) {
+ auto clean_padding_to =
+ stashed_padding_.lower_bound(seq_num - kMaxPaddingAge);
+ stashed_padding_.erase(stashed_padding_.begin(), clean_padding_to);
+ stashed_padding_.insert(seq_num);
+ UpdateLastPictureIdWithPadding(seq_num);
+ RtpFrameReferenceFinder::ReturnVector res;
+ RetryStashedFrames(res);
+ return res;
+}
+
+void RtpSeqNumOnlyRefFinder::ClearTo(uint16_t seq_num) {
+ auto it = stashed_frames_.begin();
+ while (it != stashed_frames_.end()) {
+ if (AheadOf<uint16_t>(seq_num, (*it)->first_seq_num())) {
+ it = stashed_frames_.erase(it);
+ } else {
+ ++it;
+ }
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/rtp_seq_num_only_ref_finder.h b/third_party/libwebrtc/modules/video_coding/rtp_seq_num_only_ref_finder.h
new file mode 100644
index 0000000000..c05655b579
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/rtp_seq_num_only_ref_finder.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_RTP_SEQ_NUM_ONLY_REF_FINDER_H_
+#define MODULES_VIDEO_CODING_RTP_SEQ_NUM_ONLY_REF_FINDER_H_
+
+#include <deque>
+#include <map>
+#include <memory>
+#include <set>
+#include <utility>
+
+#include "absl/container/inlined_vector.h"
+#include "modules/video_coding/frame_object.h"
+#include "modules/video_coding/rtp_frame_reference_finder.h"
+#include "rtc_base/numerics/sequence_number_unwrapper.h"
+
+namespace webrtc {
+
+class RtpSeqNumOnlyRefFinder {
+ public:
+ RtpSeqNumOnlyRefFinder() = default;
+
+ RtpFrameReferenceFinder::ReturnVector ManageFrame(
+ std::unique_ptr<RtpFrameObject> frame);
+ RtpFrameReferenceFinder::ReturnVector PaddingReceived(uint16_t seq_num);
+ void ClearTo(uint16_t seq_num);
+
+ private:
+ static constexpr int kMaxStashedFrames = 100;
+ static constexpr int kMaxPaddingAge = 100;
+
+ enum FrameDecision { kStash, kHandOff, kDrop };
+
+ FrameDecision ManageFrameInternal(RtpFrameObject* frame);
+ void RetryStashedFrames(RtpFrameReferenceFinder::ReturnVector& res);
+ void UpdateLastPictureIdWithPadding(uint16_t seq_num);
+
+ // For every group of pictures, hold two sequence numbers. The first being
+ // the sequence number of the last packet of the last completed frame, and
+ // the second being the sequence number of the last packet of the last
+ // completed frame advanced by any potential continuous packets of padding.
+ std::map<uint16_t,
+ std::pair<uint16_t, uint16_t>,
+ DescendingSeqNumComp<uint16_t>>
+ last_seq_num_gop_;
+
+ // Padding packets that have been received but that are not yet continuous
+ // with any group of pictures.
+ std::set<uint16_t, DescendingSeqNumComp<uint16_t>> stashed_padding_;
+
+ // Frames that have been fully received but didn't have all the information
+ // needed to determine their references.
+ std::deque<std::unique_ptr<RtpFrameObject>> stashed_frames_;
+
+ // Unwrapper used to unwrap generic RTP streams. In a generic stream we derive
+ // a picture id from the packet sequence number.
+ SeqNumUnwrapper<uint16_t> rtp_seq_num_unwrapper_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_RTP_SEQ_NUM_ONLY_REF_FINDER_H_
diff --git a/third_party/libwebrtc/modules/video_coding/rtp_vp8_ref_finder.cc b/third_party/libwebrtc/modules/video_coding/rtp_vp8_ref_finder.cc
new file mode 100644
index 0000000000..185756ce51
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/rtp_vp8_ref_finder.cc
@@ -0,0 +1,254 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/rtp_vp8_ref_finder.h"
+
+#include <utility>
+
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+
+RtpFrameReferenceFinder::ReturnVector RtpVp8RefFinder::ManageFrame(
+ std::unique_ptr<RtpFrameObject> frame) {
+ const RTPVideoHeaderVP8& codec_header = absl::get<RTPVideoHeaderVP8>(
+ frame->GetRtpVideoHeader().video_type_header);
+
+ if (codec_header.temporalIdx != kNoTemporalIdx)
+ frame->SetTemporalIndex(codec_header.temporalIdx);
+
+ int64_t unwrapped_tl0 = tl0_unwrapper_.Unwrap(codec_header.tl0PicIdx & 0xFF);
+ FrameDecision decision =
+ ManageFrameInternal(frame.get(), codec_header, unwrapped_tl0);
+
+ RtpFrameReferenceFinder::ReturnVector res;
+ switch (decision) {
+ case kStash:
+ if (stashed_frames_.size() > kMaxStashedFrames) {
+ stashed_frames_.pop_back();
+ }
+ stashed_frames_.push_front(
+ {.unwrapped_tl0 = unwrapped_tl0, .frame = std::move(frame)});
+ return res;
+ case kHandOff:
+ res.push_back(std::move(frame));
+ RetryStashedFrames(res);
+ return res;
+ case kDrop:
+ return res;
+ }
+
+ return res;
+}
+
+RtpVp8RefFinder::FrameDecision RtpVp8RefFinder::ManageFrameInternal(
+ RtpFrameObject* frame,
+ const RTPVideoHeaderVP8& codec_header,
+ int64_t unwrapped_tl0) {
+ // Protect against corrupted packets with arbitrary large temporal idx.
+ if (codec_header.temporalIdx >= kMaxTemporalLayers)
+ return kDrop;
+
+ frame->SetSpatialIndex(0);
+ frame->SetId(codec_header.pictureId & 0x7FFF);
+
+ if (last_picture_id_ == -1)
+ last_picture_id_ = frame->Id();
+
+ // Clean up info about not yet received frames that are too old.
+ uint16_t old_picture_id =
+ Subtract<kFrameIdLength>(frame->Id(), kMaxNotYetReceivedFrames);
+ auto clean_frames_to = not_yet_received_frames_.lower_bound(old_picture_id);
+ not_yet_received_frames_.erase(not_yet_received_frames_.begin(),
+ clean_frames_to);
+ // Avoid re-adding picture ids that were just erased.
+ if (AheadOf<uint16_t, kFrameIdLength>(old_picture_id, last_picture_id_)) {
+ last_picture_id_ = old_picture_id;
+ }
+ // Find if there has been a gap in fully received frames and save the picture
+ // id of those frames in `not_yet_received_frames_`.
+ if (AheadOf<uint16_t, kFrameIdLength>(frame->Id(), last_picture_id_)) {
+ do {
+ last_picture_id_ = Add<kFrameIdLength>(last_picture_id_, 1);
+ not_yet_received_frames_.insert(last_picture_id_);
+ } while (last_picture_id_ != frame->Id());
+ }
+
+ // Clean up info for base layers that are too old.
+ int64_t old_tl0_pic_idx = unwrapped_tl0 - kMaxLayerInfo;
+ auto clean_layer_info_to = layer_info_.lower_bound(old_tl0_pic_idx);
+ layer_info_.erase(layer_info_.begin(), clean_layer_info_to);
+
+ if (frame->frame_type() == VideoFrameType::kVideoFrameKey) {
+ if (codec_header.temporalIdx != 0) {
+ return kDrop;
+ }
+ frame->num_references = 0;
+ layer_info_[unwrapped_tl0].fill(-1);
+ UpdateLayerInfoVp8(frame, unwrapped_tl0, codec_header.temporalIdx);
+ return kHandOff;
+ }
+
+ auto layer_info_it = layer_info_.find(
+ codec_header.temporalIdx == 0 ? unwrapped_tl0 - 1 : unwrapped_tl0);
+
+ // If we don't have the base layer frame yet, stash this frame.
+ if (layer_info_it == layer_info_.end())
+ return kStash;
+
+ // A non keyframe base layer frame has been received, copy the layer info
+ // from the previous base layer frame and set a reference to the previous
+ // base layer frame.
+ if (codec_header.temporalIdx == 0) {
+ layer_info_it =
+ layer_info_.emplace(unwrapped_tl0, layer_info_it->second).first;
+ frame->num_references = 1;
+ int64_t last_pid_on_layer = layer_info_it->second[0];
+
+ // Is this an old frame that has already been used to update the state? If
+ // so, drop it.
+ if (AheadOrAt<uint16_t, kFrameIdLength>(last_pid_on_layer, frame->Id())) {
+ return kDrop;
+ }
+
+ frame->references[0] = last_pid_on_layer;
+ UpdateLayerInfoVp8(frame, unwrapped_tl0, codec_header.temporalIdx);
+ return kHandOff;
+ }
+
+ // Layer sync frame, this frame only references its base layer frame.
+ if (codec_header.layerSync) {
+ frame->num_references = 1;
+ int64_t last_pid_on_layer = layer_info_it->second[codec_header.temporalIdx];
+
+ // Is this an old frame that has already been used to update the state? If
+ // so, drop it.
+ if (last_pid_on_layer != -1 &&
+ AheadOrAt<uint16_t, kFrameIdLength>(last_pid_on_layer, frame->Id())) {
+ return kDrop;
+ }
+
+ frame->references[0] = layer_info_it->second[0];
+ UpdateLayerInfoVp8(frame, unwrapped_tl0, codec_header.temporalIdx);
+ return kHandOff;
+ }
+
+ // Find all references for this frame.
+ frame->num_references = 0;
+ for (uint8_t layer = 0; layer <= codec_header.temporalIdx; ++layer) {
+ // If we have not yet received a previous frame on this temporal layer,
+ // stash this frame.
+ if (layer_info_it->second[layer] == -1)
+ return kStash;
+
+ // If the last frame on this layer is ahead of this frame it means that
+ // a layer sync frame has been received after this frame for the same
+ // base layer frame, drop this frame.
+ if (AheadOf<uint16_t, kFrameIdLength>(layer_info_it->second[layer],
+ frame->Id())) {
+ return kDrop;
+ }
+
+ // If we have not yet received a frame between this frame and the referenced
+ // frame then we have to wait for that frame to be completed first.
+ auto not_received_frame_it =
+ not_yet_received_frames_.upper_bound(layer_info_it->second[layer]);
+ if (not_received_frame_it != not_yet_received_frames_.end() &&
+ AheadOf<uint16_t, kFrameIdLength>(frame->Id(),
+ *not_received_frame_it)) {
+ return kStash;
+ }
+
+ if (!(AheadOf<uint16_t, kFrameIdLength>(frame->Id(),
+ layer_info_it->second[layer]))) {
+ RTC_LOG(LS_WARNING) << "Frame with picture id " << frame->Id()
+ << " and packet range [" << frame->first_seq_num()
+ << ", " << frame->last_seq_num()
+ << "] already received, "
+ " dropping frame.";
+ return kDrop;
+ }
+
+ ++frame->num_references;
+ frame->references[layer] = layer_info_it->second[layer];
+ }
+
+ UpdateLayerInfoVp8(frame, unwrapped_tl0, codec_header.temporalIdx);
+ return kHandOff;
+}
+
+void RtpVp8RefFinder::UpdateLayerInfoVp8(RtpFrameObject* frame,
+ int64_t unwrapped_tl0,
+ uint8_t temporal_idx) {
+ auto layer_info_it = layer_info_.find(unwrapped_tl0);
+
+ // Update this layer info and newer.
+ while (layer_info_it != layer_info_.end()) {
+ if (layer_info_it->second[temporal_idx] != -1 &&
+ AheadOf<uint16_t, kFrameIdLength>(layer_info_it->second[temporal_idx],
+ frame->Id())) {
+ // The frame was not newer, then no subsequent layer info have to be
+ // update.
+ break;
+ }
+
+ layer_info_it->second[temporal_idx] = frame->Id();
+ ++unwrapped_tl0;
+ layer_info_it = layer_info_.find(unwrapped_tl0);
+ }
+ not_yet_received_frames_.erase(frame->Id());
+
+ UnwrapPictureIds(frame);
+}
+
+void RtpVp8RefFinder::RetryStashedFrames(
+ RtpFrameReferenceFinder::ReturnVector& res) {
+ bool complete_frame = false;
+ do {
+ complete_frame = false;
+ for (auto it = stashed_frames_.begin(); it != stashed_frames_.end();) {
+ const RTPVideoHeaderVP8& codec_header = absl::get<RTPVideoHeaderVP8>(
+ it->frame->GetRtpVideoHeader().video_type_header);
+ FrameDecision decision =
+ ManageFrameInternal(it->frame.get(), codec_header, it->unwrapped_tl0);
+
+ switch (decision) {
+ case kStash:
+ ++it;
+ break;
+ case kHandOff:
+ complete_frame = true;
+ res.push_back(std::move(it->frame));
+ [[fallthrough]];
+ case kDrop:
+ it = stashed_frames_.erase(it);
+ }
+ }
+ } while (complete_frame);
+}
+
+void RtpVp8RefFinder::UnwrapPictureIds(RtpFrameObject* frame) {
+ for (size_t i = 0; i < frame->num_references; ++i)
+ frame->references[i] = unwrapper_.Unwrap(frame->references[i]);
+ frame->SetId(unwrapper_.Unwrap(frame->Id()));
+}
+
+void RtpVp8RefFinder::ClearTo(uint16_t seq_num) {
+ auto it = stashed_frames_.begin();
+ while (it != stashed_frames_.end()) {
+ if (AheadOf<uint16_t>(seq_num, it->frame->first_seq_num())) {
+ it = stashed_frames_.erase(it);
+ } else {
+ ++it;
+ }
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/rtp_vp8_ref_finder.h b/third_party/libwebrtc/modules/video_coding/rtp_vp8_ref_finder.h
new file mode 100644
index 0000000000..26df658a3b
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/rtp_vp8_ref_finder.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_RTP_VP8_REF_FINDER_H_
+#define MODULES_VIDEO_CODING_RTP_VP8_REF_FINDER_H_
+
+#include <deque>
+#include <map>
+#include <memory>
+#include <set>
+
+#include "absl/container/inlined_vector.h"
+#include "modules/video_coding/frame_object.h"
+#include "modules/video_coding/rtp_frame_reference_finder.h"
+#include "rtc_base/numerics/sequence_number_unwrapper.h"
+
+namespace webrtc {
+
+class RtpVp8RefFinder {
+ public:
+ RtpVp8RefFinder() = default;
+
+ RtpFrameReferenceFinder::ReturnVector ManageFrame(
+ std::unique_ptr<RtpFrameObject> frame);
+ void ClearTo(uint16_t seq_num);
+
+ private:
+ static constexpr int kFrameIdLength = 1 << 15;
+ static constexpr int kMaxLayerInfo = 50;
+ static constexpr int kMaxNotYetReceivedFrames = 100;
+ static constexpr int kMaxStashedFrames = 100;
+ static constexpr int kMaxTemporalLayers = 5;
+
+ struct UnwrappedTl0Frame {
+ int64_t unwrapped_tl0;
+ std::unique_ptr<RtpFrameObject> frame;
+ };
+
+ enum FrameDecision { kStash, kHandOff, kDrop };
+
+ FrameDecision ManageFrameInternal(RtpFrameObject* frame,
+ const RTPVideoHeaderVP8& codec_header,
+ int64_t unwrapped_tl0);
+ void RetryStashedFrames(RtpFrameReferenceFinder::ReturnVector& res);
+ void UpdateLayerInfoVp8(RtpFrameObject* frame,
+ int64_t unwrapped_tl0,
+ uint8_t temporal_idx);
+ void UnwrapPictureIds(RtpFrameObject* frame);
+
+ // Save the last picture id in order to detect when there is a gap in frames
+ // that have not yet been fully received.
+ int last_picture_id_ = -1;
+
+ // Frames earlier than the last received frame that have not yet been
+ // fully received.
+ std::set<uint16_t, DescendingSeqNumComp<uint16_t, kFrameIdLength>>
+ not_yet_received_frames_;
+
+ // Frames that have been fully received but didn't have all the information
+ // needed to determine their references.
+ std::deque<UnwrappedTl0Frame> stashed_frames_;
+
+ // Holds the information about the last completed frame for a given temporal
+ // layer given an unwrapped Tl0 picture index.
+ std::map<int64_t, std::array<int64_t, kMaxTemporalLayers>> layer_info_;
+
+ // Unwrapper used to unwrap VP8/VP9 streams which have their picture id
+ // specified.
+ SeqNumUnwrapper<uint16_t, kFrameIdLength> unwrapper_;
+
+ SeqNumUnwrapper<uint8_t> tl0_unwrapper_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_RTP_VP8_REF_FINDER_H_
diff --git a/third_party/libwebrtc/modules/video_coding/rtp_vp8_ref_finder_unittest.cc b/third_party/libwebrtc/modules/video_coding/rtp_vp8_ref_finder_unittest.cc
new file mode 100644
index 0000000000..7dc6cd5521
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/rtp_vp8_ref_finder_unittest.cc
@@ -0,0 +1,370 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/rtp_vp8_ref_finder.h"
+
+#include <utility>
+#include <vector>
+
+#include "modules/video_coding/frame_object.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+using ::testing::Contains;
+using ::testing::Eq;
+using ::testing::Matcher;
+using ::testing::Matches;
+using ::testing::SizeIs;
+using ::testing::UnorderedElementsAreArray;
+
+namespace webrtc {
+namespace {
+
+MATCHER_P2(HasIdAndRefs, id, refs, "") {
+ return Matches(Eq(id))(arg->Id()) &&
+ Matches(UnorderedElementsAreArray(refs))(
+ rtc::ArrayView<int64_t>(arg->references, arg->num_references));
+}
+
+Matcher<const std::vector<std::unique_ptr<EncodedFrame>>&>
+HasFrameWithIdAndRefs(int64_t frame_id, const std::vector<int64_t>& refs) {
+ return Contains(HasIdAndRefs(frame_id, refs));
+}
+
+class Frame {
+ public:
+ Frame& AsKeyFrame(bool is_keyframe = true) {
+ is_keyframe_ = is_keyframe;
+ return *this;
+ }
+
+ Frame& Pid(int pid) {
+ picture_id_ = pid;
+ return *this;
+ }
+
+ Frame& Tid(int tid) {
+ temporal_id_ = tid;
+ return *this;
+ }
+
+ Frame& Tl0(int tl0) {
+ tl0_idx_ = tl0;
+ return *this;
+ }
+
+ Frame& AsSync(bool is_sync = true) {
+ sync = is_sync;
+ return *this;
+ }
+
+ operator std::unique_ptr<RtpFrameObject>() {
+ RTPVideoHeaderVP8 vp8_header{};
+ vp8_header.pictureId = *picture_id_;
+ vp8_header.temporalIdx = *temporal_id_;
+ vp8_header.tl0PicIdx = *tl0_idx_;
+ vp8_header.layerSync = sync;
+
+ RTPVideoHeader video_header;
+ video_header.frame_type = is_keyframe_ ? VideoFrameType::kVideoFrameKey
+ : VideoFrameType::kVideoFrameDelta;
+ video_header.video_type_header = vp8_header;
+ // clang-format off
+ return std::make_unique<RtpFrameObject>(
+ /*seq_num_start=*/0,
+ /*seq_num_end=*/0,
+ /*markerBit=*/true,
+ /*times_nacked=*/0,
+ /*first_packet_received_time=*/0,
+ /*last_packet_received_time=*/0,
+ /*rtp_timestamp=*/0,
+ /*ntp_time_ms=*/0,
+ VideoSendTiming(),
+ /*payload_type=*/0,
+ kVideoCodecVP8,
+ kVideoRotation_0,
+ VideoContentType::UNSPECIFIED,
+ video_header,
+ /*color_space=*/absl::nullopt,
+ RtpPacketInfos(),
+ EncodedImageBuffer::Create(/*size=*/0));
+ // clang-format on
+ }
+
+ private:
+ bool is_keyframe_ = false;
+ absl::optional<int> picture_id_;
+ absl::optional<int> temporal_id_;
+ absl::optional<int> tl0_idx_;
+ bool sync = false;
+};
+
+} // namespace
+
+class RtpVp8RefFinderTest : public ::testing::Test {
+ protected:
+ RtpVp8RefFinderTest() : ref_finder_(std::make_unique<RtpVp8RefFinder>()) {}
+
+ void Insert(std::unique_ptr<RtpFrameObject> frame) {
+ for (auto& f : ref_finder_->ManageFrame(std::move(frame))) {
+ frames_.push_back(std::move(f));
+ }
+ }
+
+ std::unique_ptr<RtpVp8RefFinder> ref_finder_;
+ std::vector<std::unique_ptr<EncodedFrame>> frames_;
+};
+
+TEST_F(RtpVp8RefFinderTest, Vp8RepeatedFrame_0) {
+ Insert(Frame().Pid(0).Tid(0).Tl0(1).AsKeyFrame());
+ Insert(Frame().Pid(1).Tid(0).Tl0(2));
+ Insert(Frame().Pid(1).Tid(0).Tl0(2));
+
+ EXPECT_THAT(frames_, SizeIs(2));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(0, {}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(1, {0}));
+}
+
+TEST_F(RtpVp8RefFinderTest, Vp8RepeatedFrameLayerSync_01) {
+ Insert(Frame().Pid(0).Tid(0).Tl0(1).AsKeyFrame());
+ Insert(Frame().Pid(1).Tid(1).Tl0(1).AsSync());
+ Insert(Frame().Pid(1).Tid(1).Tl0(1).AsSync());
+
+ EXPECT_THAT(frames_, SizeIs(2));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(0, {}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(1, {0}));
+}
+
+TEST_F(RtpVp8RefFinderTest, Vp8RepeatedFrame_01) {
+ Insert(Frame().Pid(0).Tid(0).Tl0(1).AsKeyFrame());
+ Insert(Frame().Pid(1).Tid(0).Tl0(2).AsSync());
+ Insert(Frame().Pid(2).Tid(0).Tl0(3));
+ Insert(Frame().Pid(3).Tid(0).Tl0(4));
+ Insert(Frame().Pid(3).Tid(0).Tl0(4));
+
+ EXPECT_THAT(frames_, SizeIs(4));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(0, {}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(1, {0}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(2, {1}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(3, {2}));
+}
+
+TEST_F(RtpVp8RefFinderTest, Vp8TemporalLayers_0) {
+ Insert(Frame().Pid(0).Tid(0).Tl0(1).AsKeyFrame());
+ Insert(Frame().Pid(1).Tid(0).Tl0(2));
+
+ EXPECT_THAT(frames_, SizeIs(2));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(0, {}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(1, {0}));
+}
+
+TEST_F(RtpVp8RefFinderTest, Vp8DuplicateTl1Frames) {
+ Insert(Frame().Pid(0).Tid(0).Tl0(0).AsKeyFrame());
+ Insert(Frame().Pid(1).Tid(1).Tl0(0).AsSync());
+ Insert(Frame().Pid(2).Tid(0).Tl0(1));
+ Insert(Frame().Pid(3).Tid(1).Tl0(1));
+ Insert(Frame().Pid(3).Tid(1).Tl0(1));
+ Insert(Frame().Pid(4).Tid(0).Tl0(2));
+ Insert(Frame().Pid(5).Tid(1).Tl0(2));
+
+ EXPECT_THAT(frames_, SizeIs(6));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(0, {}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(1, {0}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(2, {0}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(3, {1, 2}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(4, {2}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(5, {3, 4}));
+}
+
+TEST_F(RtpVp8RefFinderTest, Vp8TemporalLayersReordering_0) {
+ Insert(Frame().Pid(1).Tid(0).Tl0(2));
+ Insert(Frame().Pid(0).Tid(0).Tl0(1).AsKeyFrame());
+ Insert(Frame().Pid(3).Tid(0).Tl0(4));
+ Insert(Frame().Pid(2).Tid(0).Tl0(3));
+ Insert(Frame().Pid(5).Tid(0).Tl0(6));
+ Insert(Frame().Pid(6).Tid(0).Tl0(7));
+ Insert(Frame().Pid(4).Tid(0).Tl0(5));
+
+ EXPECT_THAT(frames_, SizeIs(7));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(0, {}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(1, {0}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(2, {1}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(3, {2}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(4, {3}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(5, {4}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(6, {5}));
+}
+
+TEST_F(RtpVp8RefFinderTest, Vp8TemporalLayers_01) {
+ Insert(Frame().Pid(0).Tid(0).Tl0(255).AsKeyFrame());
+ Insert(Frame().Pid(1).Tid(1).Tl0(255).AsSync());
+ Insert(Frame().Pid(2).Tid(0).Tl0(0));
+ Insert(Frame().Pid(3).Tid(1).Tl0(0));
+
+ EXPECT_THAT(frames_, SizeIs(4));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(0, {}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(1, {0}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(2, {0}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(3, {1, 2}));
+}
+
+TEST_F(RtpVp8RefFinderTest, Vp8TemporalLayersReordering_01) {
+ Insert(Frame().Pid(1).Tid(1).Tl0(255).AsSync());
+ Insert(Frame().Pid(0).Tid(0).Tl0(255).AsKeyFrame());
+ Insert(Frame().Pid(3).Tid(1).Tl0(0));
+ Insert(Frame().Pid(5).Tid(1).Tl0(1));
+ Insert(Frame().Pid(2).Tid(0).Tl0(0));
+ Insert(Frame().Pid(4).Tid(0).Tl0(1));
+ Insert(Frame().Pid(6).Tid(0).Tl0(2));
+ Insert(Frame().Pid(7).Tid(1).Tl0(2));
+
+ EXPECT_THAT(frames_, SizeIs(8));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(0, {}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(1, {0}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(2, {0}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(3, {1, 2}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(4, {2}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(5, {3, 4}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(6, {4}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(7, {5, 6}));
+}
+
+TEST_F(RtpVp8RefFinderTest, Vp8TemporalLayers_0212) {
+ Insert(Frame().Pid(0).Tid(0).Tl0(55).AsKeyFrame());
+ Insert(Frame().Pid(1).Tid(2).Tl0(55).AsSync());
+ Insert(Frame().Pid(2).Tid(1).Tl0(55).AsSync());
+ Insert(Frame().Pid(3).Tid(2).Tl0(55));
+ Insert(Frame().Pid(4).Tid(0).Tl0(56));
+ Insert(Frame().Pid(5).Tid(2).Tl0(56));
+ Insert(Frame().Pid(6).Tid(1).Tl0(56));
+ Insert(Frame().Pid(7).Tid(2).Tl0(56));
+ Insert(Frame().Pid(8).Tid(0).Tl0(57));
+ Insert(Frame().Pid(9).Tid(2).Tl0(57).AsSync());
+ Insert(Frame().Pid(10).Tid(1).Tl0(57).AsSync());
+ Insert(Frame().Pid(11).Tid(2).Tl0(57));
+
+ EXPECT_THAT(frames_, SizeIs(12));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(0, {}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(1, {0}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(2, {0}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(3, {0, 1, 2}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(4, {0}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(5, {2, 3, 4}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(6, {2, 4}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(7, {4, 5, 6}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(8, {4}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(9, {8}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(10, {8}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(11, {8, 9, 10}));
+}
+
+TEST_F(RtpVp8RefFinderTest, Vp8TemporalLayersMissingFrame_0212) {
+ Insert(Frame().Pid(0).Tid(0).Tl0(55).AsKeyFrame());
+ Insert(Frame().Pid(2).Tid(1).Tl0(55).AsSync());
+ Insert(Frame().Pid(3).Tid(2).Tl0(55));
+
+ EXPECT_THAT(frames_, SizeIs(2));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(0, {}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(2, {0}));
+}
+
+// Test with 3 temporal layers in a 0212 pattern.
+TEST_F(RtpVp8RefFinderTest, Vp8TemporalLayersReordering_0212) {
+ Insert(Frame().Pid(127).Tid(2).Tl0(55).AsSync());
+ Insert(Frame().Pid(126).Tid(0).Tl0(55).AsKeyFrame());
+ Insert(Frame().Pid(128).Tid(1).Tl0(55).AsSync());
+ Insert(Frame().Pid(130).Tid(0).Tl0(56));
+ Insert(Frame().Pid(131).Tid(2).Tl0(56));
+ Insert(Frame().Pid(129).Tid(2).Tl0(55));
+ Insert(Frame().Pid(133).Tid(2).Tl0(56));
+ Insert(Frame().Pid(135).Tid(2).Tl0(57).AsSync());
+ Insert(Frame().Pid(132).Tid(1).Tl0(56));
+ Insert(Frame().Pid(134).Tid(0).Tl0(57));
+ Insert(Frame().Pid(137).Tid(2).Tl0(57));
+ Insert(Frame().Pid(136).Tid(1).Tl0(57).AsSync());
+
+ EXPECT_THAT(frames_, SizeIs(12));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(126, {}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(127, {126}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(128, {126}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(129, {126, 127, 128}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(130, {126}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(131, {128, 129, 130}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(132, {128, 130}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(133, {130, 131, 132}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(134, {130}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(135, {134}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(136, {134}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(137, {134, 135, 136}));
+}
+
+TEST_F(RtpVp8RefFinderTest, Vp8LayerSync) {
+ Insert(Frame().Pid(0).Tid(0).Tl0(0).AsKeyFrame());
+ Insert(Frame().Pid(1).Tid(1).Tl0(0).AsSync());
+ Insert(Frame().Pid(2).Tid(0).Tl0(1));
+ Insert(Frame().Pid(4).Tid(0).Tl0(2));
+ Insert(Frame().Pid(5).Tid(1).Tl0(2).AsSync());
+ Insert(Frame().Pid(6).Tid(0).Tl0(3));
+ Insert(Frame().Pid(7).Tid(1).Tl0(3));
+
+ EXPECT_THAT(frames_, SizeIs(7));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(0, {}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(1, {0}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(2, {0}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(4, {2}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(5, {4}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(6, {4}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(7, {5, 6}));
+}
+
+TEST_F(RtpVp8RefFinderTest, Vp8Tl1SyncFrameAfterTl1Frame) {
+ Insert(Frame().Pid(1).Tid(0).Tl0(247).AsKeyFrame().AsSync());
+ Insert(Frame().Pid(3).Tid(0).Tl0(248));
+ Insert(Frame().Pid(4).Tid(1).Tl0(248));
+ Insert(Frame().Pid(5).Tid(1).Tl0(248).AsSync());
+
+ EXPECT_THAT(frames_, SizeIs(3));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(1, {}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(3, {1}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(5, {3}));
+}
+
+TEST_F(RtpVp8RefFinderTest, Vp8DetectMissingFrame_0212) {
+ Insert(Frame().Pid(1).Tid(0).Tl0(1).AsKeyFrame());
+ Insert(Frame().Pid(2).Tid(2).Tl0(1).AsSync());
+ Insert(Frame().Pid(3).Tid(1).Tl0(1).AsSync());
+ Insert(Frame().Pid(4).Tid(2).Tl0(1));
+ Insert(Frame().Pid(6).Tid(2).Tl0(2));
+ Insert(Frame().Pid(7).Tid(1).Tl0(2));
+ Insert(Frame().Pid(8).Tid(2).Tl0(2));
+ Insert(Frame().Pid(5).Tid(0).Tl0(2));
+
+ EXPECT_THAT(frames_, SizeIs(8));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(1, {}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(2, {1}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(3, {1}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(4, {1, 2, 3}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(5, {1}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(6, {3, 4, 5}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(7, {3, 5}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(8, {5, 6, 7}));
+}
+
+TEST_F(RtpVp8RefFinderTest, StashedFramesDoNotWrapTl0Backwards) {
+ Insert(Frame().Pid(0).Tid(0).Tl0(0));
+ EXPECT_THAT(frames_, SizeIs(0));
+
+ Insert(Frame().Pid(128).Tid(0).Tl0(128).AsKeyFrame());
+ EXPECT_THAT(frames_, SizeIs(1));
+ Insert(Frame().Pid(129).Tid(0).Tl0(129));
+ EXPECT_THAT(frames_, SizeIs(2));
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/rtp_vp9_ref_finder.cc b/third_party/libwebrtc/modules/video_coding/rtp_vp9_ref_finder.cc
new file mode 100644
index 0000000000..175ed3464b
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/rtp_vp9_ref_finder.cc
@@ -0,0 +1,367 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/rtp_vp9_ref_finder.h"
+
+#include <algorithm>
+#include <utility>
+
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+RtpFrameReferenceFinder::ReturnVector RtpVp9RefFinder::ManageFrame(
+ std::unique_ptr<RtpFrameObject> frame) {
+ const RTPVideoHeaderVP9& codec_header = absl::get<RTPVideoHeaderVP9>(
+ frame->GetRtpVideoHeader().video_type_header);
+
+ if (codec_header.temporal_idx != kNoTemporalIdx)
+ frame->SetTemporalIndex(codec_header.temporal_idx);
+ frame->SetSpatialIndex(codec_header.spatial_idx);
+ frame->SetId(codec_header.picture_id & (kFrameIdLength - 1));
+
+ FrameDecision decision;
+ if (codec_header.temporal_idx >= kMaxTemporalLayers ||
+ codec_header.spatial_idx >= kMaxSpatialLayers) {
+ decision = kDrop;
+ } else if (codec_header.flexible_mode) {
+ decision = ManageFrameFlexible(frame.get(), codec_header);
+ } else {
+ if (codec_header.tl0_pic_idx == kNoTl0PicIdx) {
+ RTC_LOG(LS_WARNING) << "TL0PICIDX is expected to be present in "
+ "non-flexible mode.";
+ decision = kDrop;
+ } else {
+ int64_t unwrapped_tl0 =
+ tl0_unwrapper_.Unwrap(codec_header.tl0_pic_idx & 0xFF);
+ decision = ManageFrameGof(frame.get(), codec_header, unwrapped_tl0);
+
+ if (decision == kStash) {
+ if (stashed_frames_.size() > kMaxStashedFrames) {
+ stashed_frames_.pop_back();
+ }
+
+ stashed_frames_.push_front(
+ {.unwrapped_tl0 = unwrapped_tl0, .frame = std::move(frame)});
+ }
+ }
+ }
+
+ RtpFrameReferenceFinder::ReturnVector res;
+ switch (decision) {
+ case kStash:
+ return res;
+ case kHandOff:
+ res.push_back(std::move(frame));
+ RetryStashedFrames(res);
+ return res;
+ case kDrop:
+ return res;
+ }
+
+ return res;
+}
+
+RtpVp9RefFinder::FrameDecision RtpVp9RefFinder::ManageFrameFlexible(
+ RtpFrameObject* frame,
+ const RTPVideoHeaderVP9& codec_header) {
+ if (codec_header.num_ref_pics > EncodedFrame::kMaxFrameReferences) {
+ return kDrop;
+ }
+
+ frame->num_references = codec_header.num_ref_pics;
+ for (size_t i = 0; i < frame->num_references; ++i) {
+ frame->references[i] =
+ Subtract<kFrameIdLength>(frame->Id(), codec_header.pid_diff[i]);
+ }
+
+ FlattenFrameIdAndRefs(frame, codec_header.inter_layer_predicted);
+ return kHandOff;
+}
+
+RtpVp9RefFinder::FrameDecision RtpVp9RefFinder::ManageFrameGof(
+ RtpFrameObject* frame,
+ const RTPVideoHeaderVP9& codec_header,
+ int64_t unwrapped_tl0) {
+ GofInfo* info;
+ if (codec_header.ss_data_available) {
+ if (codec_header.temporal_idx != 0) {
+ RTC_LOG(LS_WARNING) << "Received scalability structure on a non base "
+ "layer frame. Scalability structure ignored.";
+ } else {
+ if (codec_header.gof.num_frames_in_gof > kMaxVp9FramesInGof) {
+ return kDrop;
+ }
+
+ for (size_t i = 0; i < codec_header.gof.num_frames_in_gof; ++i) {
+ if (codec_header.gof.num_ref_pics[i] > kMaxVp9RefPics) {
+ return kDrop;
+ }
+ }
+
+ GofInfoVP9 gof = codec_header.gof;
+ if (gof.num_frames_in_gof == 0) {
+ RTC_LOG(LS_WARNING) << "Number of frames in GOF is zero. Assume "
+ "that stream has only one temporal layer.";
+ gof.SetGofInfoVP9(kTemporalStructureMode1);
+ }
+
+ current_ss_idx_ = Add<kMaxGofSaved>(current_ss_idx_, 1);
+ scalability_structures_[current_ss_idx_] = gof;
+ scalability_structures_[current_ss_idx_].pid_start = frame->Id();
+ gof_info_.emplace(
+ unwrapped_tl0,
+ GofInfo(&scalability_structures_[current_ss_idx_], frame->Id()));
+ }
+
+ const auto gof_info_it = gof_info_.find(unwrapped_tl0);
+ if (gof_info_it == gof_info_.end())
+ return kStash;
+
+ info = &gof_info_it->second;
+
+ if (frame->frame_type() == VideoFrameType::kVideoFrameKey) {
+ frame->num_references = 0;
+ FrameReceivedVp9(frame->Id(), info);
+ FlattenFrameIdAndRefs(frame, codec_header.inter_layer_predicted);
+ return kHandOff;
+ }
+ } else if (frame->frame_type() == VideoFrameType::kVideoFrameKey) {
+ if (frame->SpatialIndex() == 0) {
+ RTC_LOG(LS_WARNING) << "Received keyframe without scalability structure";
+ return kDrop;
+ }
+ const auto gof_info_it = gof_info_.find(unwrapped_tl0);
+ if (gof_info_it == gof_info_.end())
+ return kStash;
+
+ info = &gof_info_it->second;
+
+ frame->num_references = 0;
+ FrameReceivedVp9(frame->Id(), info);
+ FlattenFrameIdAndRefs(frame, codec_header.inter_layer_predicted);
+ return kHandOff;
+ } else {
+ auto gof_info_it = gof_info_.find(
+ (codec_header.temporal_idx == 0) ? unwrapped_tl0 - 1 : unwrapped_tl0);
+
+ // Gof info for this frame is not available yet, stash this frame.
+ if (gof_info_it == gof_info_.end())
+ return kStash;
+
+ if (codec_header.temporal_idx == 0) {
+ gof_info_it = gof_info_
+ .emplace(unwrapped_tl0,
+ GofInfo(gof_info_it->second.gof, frame->Id()))
+ .first;
+ }
+
+ info = &gof_info_it->second;
+ }
+
+ // Clean up info for base layers that are too old.
+ int64_t old_tl0_pic_idx = unwrapped_tl0 - kMaxGofSaved;
+ auto clean_gof_info_to = gof_info_.lower_bound(old_tl0_pic_idx);
+ gof_info_.erase(gof_info_.begin(), clean_gof_info_to);
+
+ FrameReceivedVp9(frame->Id(), info);
+
+ // Make sure we don't miss any frame that could potentially have the
+ // up switch flag set.
+ if (MissingRequiredFrameVp9(frame->Id(), *info))
+ return kStash;
+
+ if (codec_header.temporal_up_switch)
+ up_switch_.emplace(frame->Id(), codec_header.temporal_idx);
+
+ // Clean out old info about up switch frames.
+ uint16_t old_picture_id = Subtract<kFrameIdLength>(frame->Id(), 50);
+ auto up_switch_erase_to = up_switch_.lower_bound(old_picture_id);
+ up_switch_.erase(up_switch_.begin(), up_switch_erase_to);
+
+ size_t diff =
+ ForwardDiff<uint16_t, kFrameIdLength>(info->gof->pid_start, frame->Id());
+ size_t gof_idx = diff % info->gof->num_frames_in_gof;
+
+ if (info->gof->num_ref_pics[gof_idx] > EncodedFrame::kMaxFrameReferences) {
+ return kDrop;
+ }
+ // Populate references according to the scalability structure.
+ frame->num_references = info->gof->num_ref_pics[gof_idx];
+ for (size_t i = 0; i < frame->num_references; ++i) {
+ frame->references[i] =
+ Subtract<kFrameIdLength>(frame->Id(), info->gof->pid_diff[gof_idx][i]);
+
+ // If this is a reference to a frame earlier than the last up switch point,
+ // then ignore this reference.
+ if (UpSwitchInIntervalVp9(frame->Id(), codec_header.temporal_idx,
+ frame->references[i])) {
+ --frame->num_references;
+ }
+ }
+
+ // Override GOF references.
+ if (!codec_header.inter_pic_predicted) {
+ frame->num_references = 0;
+ }
+
+ FlattenFrameIdAndRefs(frame, codec_header.inter_layer_predicted);
+ return kHandOff;
+}
+
+bool RtpVp9RefFinder::MissingRequiredFrameVp9(uint16_t picture_id,
+ const GofInfo& info) {
+ size_t diff =
+ ForwardDiff<uint16_t, kFrameIdLength>(info.gof->pid_start, picture_id);
+ size_t gof_idx = diff % info.gof->num_frames_in_gof;
+ size_t temporal_idx = info.gof->temporal_idx[gof_idx];
+
+ if (temporal_idx >= kMaxTemporalLayers) {
+ RTC_LOG(LS_WARNING) << "At most " << kMaxTemporalLayers
+ << " temporal "
+ "layers are supported.";
+ return true;
+ }
+
+ // For every reference this frame has, check if there is a frame missing in
+ // the interval (`ref_pid`, `picture_id`) in any of the lower temporal
+ // layers. If so, we are missing a required frame.
+ uint8_t num_references = info.gof->num_ref_pics[gof_idx];
+ for (size_t i = 0; i < num_references; ++i) {
+ uint16_t ref_pid =
+ Subtract<kFrameIdLength>(picture_id, info.gof->pid_diff[gof_idx][i]);
+ for (size_t l = 0; l < temporal_idx; ++l) {
+ auto missing_frame_it = missing_frames_for_layer_[l].lower_bound(ref_pid);
+ if (missing_frame_it != missing_frames_for_layer_[l].end() &&
+ AheadOf<uint16_t, kFrameIdLength>(picture_id, *missing_frame_it)) {
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
+void RtpVp9RefFinder::FrameReceivedVp9(uint16_t picture_id, GofInfo* info) {
+ int last_picture_id = info->last_picture_id;
+ size_t gof_size = std::min(info->gof->num_frames_in_gof, kMaxVp9FramesInGof);
+
+ // If there is a gap, find which temporal layer the missing frames
+ // belong to and add the frame as missing for that temporal layer.
+ // Otherwise, remove this frame from the set of missing frames.
+ if (AheadOf<uint16_t, kFrameIdLength>(picture_id, last_picture_id)) {
+ size_t diff = ForwardDiff<uint16_t, kFrameIdLength>(info->gof->pid_start,
+ last_picture_id);
+ size_t gof_idx = diff % gof_size;
+
+ last_picture_id = Add<kFrameIdLength>(last_picture_id, 1);
+ while (last_picture_id != picture_id) {
+ gof_idx = (gof_idx + 1) % gof_size;
+ RTC_CHECK(gof_idx < kMaxVp9FramesInGof);
+
+ size_t temporal_idx = info->gof->temporal_idx[gof_idx];
+ if (temporal_idx >= kMaxTemporalLayers) {
+ RTC_LOG(LS_WARNING) << "At most " << kMaxTemporalLayers
+ << " temporal "
+ "layers are supported.";
+ return;
+ }
+
+ missing_frames_for_layer_[temporal_idx].insert(last_picture_id);
+ last_picture_id = Add<kFrameIdLength>(last_picture_id, 1);
+ }
+
+ info->last_picture_id = last_picture_id;
+ } else {
+ size_t diff =
+ ForwardDiff<uint16_t, kFrameIdLength>(info->gof->pid_start, picture_id);
+ size_t gof_idx = diff % gof_size;
+ RTC_CHECK(gof_idx < kMaxVp9FramesInGof);
+
+ size_t temporal_idx = info->gof->temporal_idx[gof_idx];
+ if (temporal_idx >= kMaxTemporalLayers) {
+ RTC_LOG(LS_WARNING) << "At most " << kMaxTemporalLayers
+ << " temporal "
+ "layers are supported.";
+ return;
+ }
+
+ missing_frames_for_layer_[temporal_idx].erase(picture_id);
+ }
+}
+
+bool RtpVp9RefFinder::UpSwitchInIntervalVp9(uint16_t picture_id,
+ uint8_t temporal_idx,
+ uint16_t pid_ref) {
+ for (auto up_switch_it = up_switch_.upper_bound(pid_ref);
+ up_switch_it != up_switch_.end() &&
+ AheadOf<uint16_t, kFrameIdLength>(picture_id, up_switch_it->first);
+ ++up_switch_it) {
+ if (up_switch_it->second < temporal_idx)
+ return true;
+ }
+
+ return false;
+}
+
+void RtpVp9RefFinder::RetryStashedFrames(
+ RtpFrameReferenceFinder::ReturnVector& res) {
+ bool complete_frame = false;
+ do {
+ complete_frame = false;
+ for (auto it = stashed_frames_.begin(); it != stashed_frames_.end();) {
+ const RTPVideoHeaderVP9& codec_header = absl::get<RTPVideoHeaderVP9>(
+ it->frame->GetRtpVideoHeader().video_type_header);
+ RTC_DCHECK(!codec_header.flexible_mode);
+ FrameDecision decision =
+ ManageFrameGof(it->frame.get(), codec_header, it->unwrapped_tl0);
+
+ switch (decision) {
+ case kStash:
+ ++it;
+ break;
+ case kHandOff:
+ complete_frame = true;
+ res.push_back(std::move(it->frame));
+ [[fallthrough]];
+ case kDrop:
+ it = stashed_frames_.erase(it);
+ }
+ }
+ } while (complete_frame);
+}
+
+void RtpVp9RefFinder::FlattenFrameIdAndRefs(RtpFrameObject* frame,
+ bool inter_layer_predicted) {
+ for (size_t i = 0; i < frame->num_references; ++i) {
+ frame->references[i] =
+ unwrapper_.Unwrap(frame->references[i]) * kMaxSpatialLayers +
+ *frame->SpatialIndex();
+ }
+ frame->SetId(unwrapper_.Unwrap(frame->Id()) * kMaxSpatialLayers +
+ *frame->SpatialIndex());
+
+ if (inter_layer_predicted &&
+ frame->num_references + 1 <= EncodedFrame::kMaxFrameReferences) {
+ frame->references[frame->num_references] = frame->Id() - 1;
+ ++frame->num_references;
+ }
+}
+
+void RtpVp9RefFinder::ClearTo(uint16_t seq_num) {
+ auto it = stashed_frames_.begin();
+ while (it != stashed_frames_.end()) {
+ if (AheadOf<uint16_t>(seq_num, it->frame->first_seq_num())) {
+ it = stashed_frames_.erase(it);
+ } else {
+ ++it;
+ }
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/rtp_vp9_ref_finder.h b/third_party/libwebrtc/modules/video_coding/rtp_vp9_ref_finder.h
new file mode 100644
index 0000000000..ea5e319fc8
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/rtp_vp9_ref_finder.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_RTP_VP9_REF_FINDER_H_
+#define MODULES_VIDEO_CODING_RTP_VP9_REF_FINDER_H_
+
+#include <deque>
+#include <map>
+#include <memory>
+#include <set>
+
+#include "absl/container/inlined_vector.h"
+#include "modules/video_coding/frame_object.h"
+#include "modules/video_coding/rtp_frame_reference_finder.h"
+#include "rtc_base/numerics/sequence_number_unwrapper.h"
+
+namespace webrtc {
+
+class RtpVp9RefFinder {
+ public:
+ RtpVp9RefFinder() = default;
+
+ RtpFrameReferenceFinder::ReturnVector ManageFrame(
+ std::unique_ptr<RtpFrameObject> frame);
+ void ClearTo(uint16_t seq_num);
+
+ private:
+ static constexpr int kFrameIdLength = 1 << 15;
+ static constexpr int kMaxGofSaved = 50;
+ static constexpr int kMaxLayerInfo = 50;
+ static constexpr int kMaxNotYetReceivedFrames = 100;
+ static constexpr int kMaxStashedFrames = 100;
+ static constexpr int kMaxTemporalLayers = 5;
+
+ enum FrameDecision { kStash, kHandOff, kDrop };
+
+ struct GofInfo {
+ GofInfo(GofInfoVP9* gof, uint16_t last_picture_id)
+ : gof(gof), last_picture_id(last_picture_id) {}
+ GofInfoVP9* gof;
+ uint16_t last_picture_id;
+ };
+
+ struct UnwrappedTl0Frame {
+ int64_t unwrapped_tl0;
+ std::unique_ptr<RtpFrameObject> frame;
+ };
+
+ FrameDecision ManageFrameFlexible(RtpFrameObject* frame,
+ const RTPVideoHeaderVP9& vp9_header);
+ FrameDecision ManageFrameGof(RtpFrameObject* frame,
+ const RTPVideoHeaderVP9& vp9_header,
+ int64_t unwrapped_tl0);
+ void RetryStashedFrames(RtpFrameReferenceFinder::ReturnVector& res);
+
+ bool MissingRequiredFrameVp9(uint16_t picture_id, const GofInfo& info);
+
+ void FrameReceivedVp9(uint16_t picture_id, GofInfo* info);
+ bool UpSwitchInIntervalVp9(uint16_t picture_id,
+ uint8_t temporal_idx,
+ uint16_t pid_ref);
+
+ void FlattenFrameIdAndRefs(RtpFrameObject* frame, bool inter_layer_predicted);
+
+ // Frames that have been fully received but didn't have all the information
+ // needed to determine their references.
+ std::deque<UnwrappedTl0Frame> stashed_frames_;
+
+ // Where the current scalability structure is in the
+ // `scalability_structures_` array.
+ uint8_t current_ss_idx_ = 0;
+
+ // Holds received scalability structures.
+ std::array<GofInfoVP9, kMaxGofSaved> scalability_structures_;
+
+ // Holds the the Gof information for a given unwrapped TL0 picture index.
+ std::map<int64_t, GofInfo> gof_info_;
+
+ // Keep track of which picture id and which temporal layer that had the
+ // up switch flag set.
+ std::map<uint16_t, uint8_t, DescendingSeqNumComp<uint16_t, kFrameIdLength>>
+ up_switch_;
+
+ // For every temporal layer, keep a set of which frames that are missing.
+ std::array<std::set<uint16_t, DescendingSeqNumComp<uint16_t, kFrameIdLength>>,
+ kMaxTemporalLayers>
+ missing_frames_for_layer_;
+
+ // Unwrapper used to unwrap VP8/VP9 streams which have their picture id
+ // specified.
+ SeqNumUnwrapper<uint16_t, kFrameIdLength> unwrapper_;
+
+ SeqNumUnwrapper<uint8_t> tl0_unwrapper_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_RTP_VP9_REF_FINDER_H_
diff --git a/third_party/libwebrtc/modules/video_coding/rtp_vp9_ref_finder_unittest.cc b/third_party/libwebrtc/modules/video_coding/rtp_vp9_ref_finder_unittest.cc
new file mode 100644
index 0000000000..51fae50902
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/rtp_vp9_ref_finder_unittest.cc
@@ -0,0 +1,637 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <utility>
+#include <vector>
+
+#include "modules/video_coding/frame_object.h"
+#include "modules/video_coding/rtp_vp9_ref_finder.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+using ::testing::Contains;
+using ::testing::Matcher;
+using ::testing::MatcherInterface;
+using ::testing::Matches;
+using ::testing::MatchResultListener;
+using ::testing::Pointee;
+using ::testing::Property;
+using ::testing::SizeIs;
+using ::testing::UnorderedElementsAreArray;
+
+namespace webrtc {
+
+namespace {
+class Frame {
+ public:
+ Frame& SeqNum(uint16_t start, uint16_t end) {
+ seq_num_start = start;
+ seq_num_end = end;
+ return *this;
+ }
+
+ Frame& AsKeyFrame(bool is_keyframe = true) {
+ keyframe = is_keyframe;
+ return *this;
+ }
+
+ Frame& Pid(int pid) {
+ picture_id = pid;
+ return *this;
+ }
+
+ Frame& SidAndTid(int sid, int tid) {
+ spatial_id = sid;
+ temporal_id = tid;
+ return *this;
+ }
+
+ Frame& Tl0(int tl0) {
+ tl0_idx = tl0;
+ return *this;
+ }
+
+ Frame& AsUpswitch(bool is_up = true) {
+ up_switch = is_up;
+ return *this;
+ }
+
+ Frame& AsInterLayer(bool is_inter_layer = true) {
+ inter_layer = is_inter_layer;
+ return *this;
+ }
+
+ Frame& NotAsInterPic(bool is_inter_pic = false) {
+ inter_pic = is_inter_pic;
+ return *this;
+ }
+
+ Frame& Gof(GofInfoVP9* ss) {
+ scalability_structure = ss;
+ return *this;
+ }
+
+ Frame& FlexRefs(const std::vector<uint8_t>& refs) {
+ flex_refs = refs;
+ return *this;
+ }
+
+ operator std::unique_ptr<RtpFrameObject>() {
+ RTPVideoHeaderVP9 vp9_header{};
+ vp9_header.picture_id = *picture_id;
+ vp9_header.temporal_idx = *temporal_id;
+ vp9_header.spatial_idx = *spatial_id;
+ if (tl0_idx.has_value()) {
+ RTC_DCHECK(flex_refs.empty());
+ vp9_header.flexible_mode = false;
+ vp9_header.tl0_pic_idx = *tl0_idx;
+ } else {
+ vp9_header.flexible_mode = true;
+ vp9_header.num_ref_pics = flex_refs.size();
+ for (size_t i = 0; i < flex_refs.size(); ++i) {
+ vp9_header.pid_diff[i] = flex_refs.at(i);
+ }
+ }
+ vp9_header.temporal_up_switch = up_switch;
+ vp9_header.inter_layer_predicted = inter_layer;
+ vp9_header.inter_pic_predicted = inter_pic && !keyframe;
+ if (scalability_structure != nullptr) {
+ vp9_header.ss_data_available = true;
+ vp9_header.gof = *scalability_structure;
+ }
+
+ RTPVideoHeader video_header;
+ video_header.frame_type = keyframe ? VideoFrameType::kVideoFrameKey
+ : VideoFrameType::kVideoFrameDelta;
+ video_header.video_type_header = vp9_header;
+ // clang-format off
+ return std::make_unique<RtpFrameObject>(
+ seq_num_start,
+ seq_num_end,
+ /*markerBit=*/true,
+ /*times_nacked=*/0,
+ /*first_packet_received_time=*/0,
+ /*last_packet_received_time=*/0,
+ /*rtp_timestamp=*/0,
+ /*ntp_time_ms=*/0,
+ VideoSendTiming(),
+ /*payload_type=*/0,
+ kVideoCodecVP9,
+ kVideoRotation_0,
+ VideoContentType::UNSPECIFIED,
+ video_header,
+ /*color_space=*/absl::nullopt,
+ RtpPacketInfos(),
+ EncodedImageBuffer::Create(/*size=*/0));
+ // clang-format on
+ }
+
+ private:
+ uint16_t seq_num_start = 0;
+ uint16_t seq_num_end = 0;
+ bool keyframe = false;
+ absl::optional<int> picture_id;
+ absl::optional<int> spatial_id;
+ absl::optional<int> temporal_id;
+ absl::optional<int> tl0_idx;
+ bool up_switch = false;
+ bool inter_layer = false;
+ bool inter_pic = true;
+ GofInfoVP9* scalability_structure = nullptr;
+ std::vector<uint8_t> flex_refs;
+};
+
+using FrameVector = std::vector<std::unique_ptr<EncodedFrame>>;
+
+// Would have been nice to use the MATCHER_P3 macro instead, but when used it
+// fails to infer the type of the vector if not explicitly given in the
+class HasFrameMatcher : public MatcherInterface<const FrameVector&> {
+ public:
+ explicit HasFrameMatcher(int64_t frame_id,
+ const std::vector<int64_t>& expected_refs)
+ : frame_id_(frame_id),
+ expected_refs_(expected_refs) {}
+
+ bool MatchAndExplain(const FrameVector& frames,
+ MatchResultListener* result_listener) const override {
+ auto it = std::find_if(frames.begin(), frames.end(),
+ [this](const std::unique_ptr<EncodedFrame>& f) {
+ return f->Id() == frame_id_;
+ });
+ if (it == frames.end()) {
+ if (result_listener->IsInterested()) {
+ *result_listener << "No frame with frame_id:" << frame_id_;
+ }
+ return false;
+ }
+
+ rtc::ArrayView<int64_t> actual_refs((*it)->references,
+ (*it)->num_references);
+ if (!Matches(UnorderedElementsAreArray(expected_refs_))(actual_refs)) {
+ if (result_listener->IsInterested()) {
+ *result_listener << "Frame with frame_id:" << frame_id_ << " and "
+ << actual_refs.size() << " references { ";
+ for (auto r : actual_refs) {
+ *result_listener << r << " ";
+ }
+ *result_listener << "}";
+ }
+ return false;
+ }
+
+ return true;
+ }
+
+ void DescribeTo(std::ostream* os) const override {
+ *os << "frame with frame_id:" << frame_id_ << " and "
+ << expected_refs_.size() << " references { ";
+ for (auto r : expected_refs_) {
+ *os << r << " ";
+ }
+ *os << "}";
+ }
+
+ private:
+ const int64_t frame_id_;
+ const std::vector<int64_t> expected_refs_;
+};
+
+} // namespace
+
+class RtpVp9RefFinderTest : public ::testing::Test {
+ protected:
+ RtpVp9RefFinderTest() : ref_finder_(std::make_unique<RtpVp9RefFinder>()) {}
+
+ void Insert(std::unique_ptr<RtpFrameObject> frame) {
+ for (auto& f : ref_finder_->ManageFrame(std::move(frame))) {
+ frames_.push_back(std::move(f));
+ }
+ }
+
+ std::unique_ptr<RtpVp9RefFinder> ref_finder_;
+ FrameVector frames_;
+};
+
+Matcher<const FrameVector&> HasFrameWithIdAndRefs(int64_t frame_id,
+ std::vector<int64_t> refs) {
+ return MakeMatcher(new HasFrameMatcher(frame_id, refs));
+}
+
+TEST_F(RtpVp9RefFinderTest, GofInsertOneFrame) {
+ GofInfoVP9 ss;
+ ss.SetGofInfoVP9(kTemporalStructureMode1);
+
+ Insert(Frame().Pid(1).SidAndTid(0, 0).Tl0(0).AsKeyFrame().Gof(&ss));
+
+ EXPECT_EQ(frames_.size(), 1UL);
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(5, {}));
+}
+
+TEST_F(RtpVp9RefFinderTest, GofTemporalLayers_0) {
+ GofInfoVP9 ss;
+ ss.SetGofInfoVP9(kTemporalStructureMode1); // Only 1 spatial layer.
+
+ Insert(Frame().Pid(1).SidAndTid(0, 0).Tl0(0).AsKeyFrame().Gof(&ss));
+ Insert(Frame().Pid(2).SidAndTid(0, 0).Tl0(1));
+
+ EXPECT_EQ(frames_.size(), 2UL);
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(5, {}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(10, {5}));
+}
+
+TEST_F(RtpVp9RefFinderTest, GofSpatialLayers_2) {
+ GofInfoVP9 ss;
+ ss.SetGofInfoVP9(kTemporalStructureMode1); // Only 1 spatial layer.
+
+ Insert(Frame().Pid(1).SidAndTid(0, 0).Tl0(0).AsKeyFrame().Gof(&ss));
+ Insert(Frame().Pid(2).SidAndTid(0, 0).Tl0(1));
+ Insert(Frame().Pid(2).SidAndTid(1, 0).Tl0(1).NotAsInterPic());
+ Insert(Frame().Pid(3).SidAndTid(0, 0).Tl0(2));
+ Insert(Frame().Pid(3).SidAndTid(1, 0).Tl0(2));
+
+ EXPECT_EQ(frames_.size(), 5UL);
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(5, {}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(10, {5}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(11, {}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(15, {10}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(16, {11}));
+}
+
+TEST_F(RtpVp9RefFinderTest, GofTemporalLayersReordered_0) {
+ GofInfoVP9 ss;
+ ss.SetGofInfoVP9(kTemporalStructureMode1); // Only 1 spatial layer.
+
+ Insert(Frame().Pid(2).SidAndTid(0, 0).Tl0(1));
+ Insert(Frame().Pid(2).SidAndTid(1, 0).Tl0(1).NotAsInterPic());
+ Insert(Frame().Pid(1).SidAndTid(0, 0).Tl0(0).AsKeyFrame().Gof(&ss));
+ Insert(Frame().Pid(3).SidAndTid(0, 0).Tl0(2));
+ Insert(Frame().Pid(3).SidAndTid(1, 0).Tl0(2));
+ Insert(Frame().Pid(4).SidAndTid(0, 0).Tl0(3));
+ Insert(Frame().Pid(5).SidAndTid(1, 0).Tl0(4));
+ Insert(Frame().Pid(4).SidAndTid(1, 0).Tl0(3));
+ Insert(Frame().Pid(5).SidAndTid(0, 0).Tl0(4));
+
+ EXPECT_EQ(frames_.size(), 9UL);
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(5, {}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(10, {5}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(11, {}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(15, {10}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(16, {11}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(20, {15}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(21, {16}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(25, {20}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(26, {21}));
+}
+
+TEST_F(RtpVp9RefFinderTest, GofSkipFramesTemporalLayers_01) {
+ GofInfoVP9 ss;
+ ss.SetGofInfoVP9(kTemporalStructureMode2); // 0101 pattern
+
+ Insert(Frame().Pid(0).SidAndTid(0, 0).Tl0(0).AsKeyFrame().NotAsInterPic().Gof(
+ &ss));
+ Insert(Frame().Pid(1).SidAndTid(0, 1).Tl0(0));
+ // Skip GOF with tl0 1
+ Insert(Frame().Pid(4).SidAndTid(0, 0).Tl0(2).AsKeyFrame().Gof(&ss));
+ Insert(Frame().Pid(5).SidAndTid(0, 1).Tl0(2));
+ // Skip GOF with tl0 3
+ // Skip GOF with tl0 4
+ Insert(Frame().Pid(10).SidAndTid(0, 0).Tl0(5).Gof(&ss));
+ Insert(Frame().Pid(11).SidAndTid(0, 1).Tl0(5));
+
+ ASSERT_EQ(6UL, frames_.size());
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(0, {}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(5, {0}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(20, {}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(25, {20}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(50, {40}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(55, {50}));
+}
+
+TEST_F(RtpVp9RefFinderTest, GofSkipFramesTemporalLayers_0212) {
+ GofInfoVP9 ss;
+ ss.SetGofInfoVP9(kTemporalStructureMode3); // 02120212 pattern
+
+ Insert(Frame().Pid(0).SidAndTid(0, 0).Tl0(0).AsKeyFrame().NotAsInterPic().Gof(
+ &ss));
+ Insert(Frame().Pid(1).SidAndTid(0, 2).Tl0(0));
+ Insert(Frame().Pid(2).SidAndTid(0, 1).Tl0(0));
+ Insert(Frame().Pid(3).SidAndTid(0, 2).Tl0(0));
+
+ ASSERT_EQ(4UL, frames_.size());
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(0, {}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(5, {0}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(10, {0}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(15, {10}));
+
+ // Skip frames with tl0 = 1
+
+ Insert(Frame().Pid(8).SidAndTid(0, 0).Tl0(2).AsKeyFrame().NotAsInterPic().Gof(
+ &ss));
+ Insert(Frame().Pid(9).SidAndTid(0, 2).Tl0(2));
+ Insert(Frame().Pid(10).SidAndTid(0, 1).Tl0(2));
+ Insert(Frame().Pid(11).SidAndTid(0, 2).Tl0(2));
+
+ ASSERT_EQ(8UL, frames_.size());
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(40, {}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(45, {40}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(50, {40}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(55, {50}));
+
+ // Now insert frames with tl0 = 1
+ Insert(Frame().Pid(4).SidAndTid(0, 0).Tl0(1).AsKeyFrame().Gof(&ss));
+ Insert(Frame().Pid(7).SidAndTid(0, 2).Tl0(1));
+
+ ASSERT_EQ(9UL, frames_.size());
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(20, {}));
+
+ Insert(Frame().Pid(5).SidAndTid(0, 2).Tl0(1));
+ Insert(Frame().Pid(6).SidAndTid(0, 1).Tl0(1));
+
+ ASSERT_EQ(12UL, frames_.size());
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(25, {20}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(30, {20}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(35, {30}));
+}
+
+TEST_F(RtpVp9RefFinderTest, GofTemporalLayers_01) {
+ GofInfoVP9 ss;
+ ss.SetGofInfoVP9(kTemporalStructureMode2); // 0101 pattern
+
+ Insert(Frame().Pid(0).SidAndTid(0, 0).Tl0(0).AsKeyFrame().NotAsInterPic().Gof(
+ &ss));
+ Insert(Frame().Pid(1).SidAndTid(0, 1).Tl0(0));
+ Insert(Frame().Pid(2).SidAndTid(0, 0).Tl0(1));
+ Insert(Frame().Pid(3).SidAndTid(0, 1).Tl0(1));
+
+ ASSERT_EQ(4UL, frames_.size());
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(0, {}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(5, {0}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(10, {0}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(15, {10}));
+}
+
+TEST_F(RtpVp9RefFinderTest, GofTemporalLayersReordered_01) {
+ GofInfoVP9 ss;
+ ss.SetGofInfoVP9(kTemporalStructureMode2); // 01 pattern
+
+ Insert(Frame().Pid(1).SidAndTid(0, 1).Tl0(0));
+ Insert(Frame().Pid(0).SidAndTid(0, 0).Tl0(0).AsKeyFrame().NotAsInterPic().Gof(
+ &ss));
+ Insert(Frame().Pid(2).SidAndTid(0, 0).Tl0(1));
+ Insert(Frame().Pid(4).SidAndTid(0, 0).Tl0(2));
+ Insert(Frame().Pid(3).SidAndTid(0, 1).Tl0(1));
+ Insert(Frame().Pid(5).SidAndTid(0, 1).Tl0(2));
+ Insert(Frame().Pid(7).SidAndTid(0, 1).Tl0(3));
+ Insert(Frame().Pid(6).SidAndTid(0, 0).Tl0(3));
+ Insert(Frame().Pid(8).SidAndTid(0, 0).Tl0(4));
+ Insert(Frame().Pid(9).SidAndTid(0, 1).Tl0(4));
+
+ ASSERT_EQ(10UL, frames_.size());
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(0, {}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(5, {0}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(10, {0}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(15, {10}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(20, {10}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(25, {20}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(30, {20}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(35, {30}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(40, {30}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(45, {40}));
+}
+
+TEST_F(RtpVp9RefFinderTest, GofTemporalLayers_0212) {
+ GofInfoVP9 ss;
+ ss.SetGofInfoVP9(kTemporalStructureMode3); // 0212 pattern
+
+ Insert(Frame().Pid(0).SidAndTid(0, 0).Tl0(0).AsKeyFrame().NotAsInterPic().Gof(
+ &ss));
+ Insert(Frame().Pid(1).SidAndTid(0, 2).Tl0(0));
+ Insert(Frame().Pid(2).SidAndTid(0, 1).Tl0(0));
+ Insert(Frame().Pid(3).SidAndTid(0, 2).Tl0(0));
+ Insert(Frame().Pid(4).SidAndTid(0, 0).Tl0(1));
+ Insert(Frame().Pid(5).SidAndTid(0, 2).Tl0(1));
+ Insert(Frame().Pid(6).SidAndTid(0, 1).Tl0(1));
+ Insert(Frame().Pid(7).SidAndTid(0, 2).Tl0(1));
+
+ ASSERT_EQ(8UL, frames_.size());
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(0, {}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(5, {0}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(10, {0}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(15, {10}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(20, {0}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(25, {20}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(30, {20}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(35, {30}));
+}
+
+TEST_F(RtpVp9RefFinderTest, GofTemporalLayersReordered_0212) {
+ GofInfoVP9 ss;
+ ss.SetGofInfoVP9(kTemporalStructureMode3); // 0212 pattern
+
+ Insert(Frame().Pid(2).SidAndTid(0, 1).Tl0(0));
+ Insert(Frame().Pid(1).SidAndTid(0, 2).Tl0(0));
+ Insert(Frame().Pid(0).SidAndTid(0, 0).Tl0(0).AsKeyFrame().NotAsInterPic().Gof(
+ &ss));
+ Insert(Frame().Pid(3).SidAndTid(0, 2).Tl0(0));
+ Insert(Frame().Pid(6).SidAndTid(0, 1).Tl0(1));
+ Insert(Frame().Pid(5).SidAndTid(0, 2).Tl0(1));
+ Insert(Frame().Pid(4).SidAndTid(0, 0).Tl0(1));
+ Insert(Frame().Pid(9).SidAndTid(0, 2).Tl0(2));
+ Insert(Frame().Pid(7).SidAndTid(0, 2).Tl0(1));
+ Insert(Frame().Pid(8).SidAndTid(0, 0).Tl0(2));
+ Insert(Frame().Pid(11).SidAndTid(0, 2).Tl0(2));
+ Insert(Frame().Pid(10).SidAndTid(0, 1).Tl0(2));
+
+ ASSERT_EQ(12UL, frames_.size());
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(0, {}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(5, {0}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(10, {0}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(15, {10}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(20, {0}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(25, {20}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(30, {20}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(35, {30}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(40, {20}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(45, {40}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(50, {40}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(55, {50}));
+}
+
+TEST_F(RtpVp9RefFinderTest, GofTemporalLayersReordered_01_0212) {
+ GofInfoVP9 ss;
+ ss.SetGofInfoVP9(kTemporalStructureMode2); // 01 pattern
+
+ Insert(Frame().Pid(1).SidAndTid(0, 1).Tl0(0));
+ Insert(Frame().Pid(0).SidAndTid(0, 0).Tl0(0).AsKeyFrame().NotAsInterPic().Gof(
+ &ss));
+ Insert(Frame().Pid(3).SidAndTid(0, 1).Tl0(1));
+ Insert(Frame().Pid(6).SidAndTid(0, 1).Tl0(2));
+ ss.SetGofInfoVP9(kTemporalStructureMode3); // 0212 pattern
+ Insert(Frame().Pid(4).SidAndTid(0, 0).Tl0(2).Gof(&ss));
+ Insert(Frame().Pid(2).SidAndTid(0, 0).Tl0(1));
+ Insert(Frame().Pid(5).SidAndTid(0, 2).Tl0(2));
+ Insert(Frame().Pid(8).SidAndTid(0, 0).Tl0(3));
+ Insert(Frame().Pid(10).SidAndTid(0, 1).Tl0(3));
+ Insert(Frame().Pid(7).SidAndTid(0, 2).Tl0(2));
+ Insert(Frame().Pid(11).SidAndTid(0, 2).Tl0(3));
+ Insert(Frame().Pid(9).SidAndTid(0, 2).Tl0(3));
+
+ ASSERT_EQ(12UL, frames_.size());
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(0, {}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(5, {0}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(10, {0}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(15, {10}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(20, {0}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(25, {20}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(30, {20}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(35, {30}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(40, {20}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(45, {40}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(50, {40}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(55, {50}));
+}
+
+TEST_F(RtpVp9RefFinderTest, FlexibleModeOneFrame) {
+ Insert(Frame().Pid(0).SidAndTid(0, 0).AsKeyFrame());
+
+ ASSERT_EQ(1UL, frames_.size());
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(0, {}));
+}
+
+TEST_F(RtpVp9RefFinderTest, FlexibleModeTwoSpatialLayers) {
+ Insert(Frame().Pid(0).SidAndTid(0, 0).AsKeyFrame());
+ Insert(Frame().Pid(0).SidAndTid(1, 0).AsKeyFrame().AsInterLayer());
+ Insert(Frame().Pid(1).SidAndTid(1, 0).FlexRefs({1}));
+ Insert(Frame().Pid(2).SidAndTid(0, 0).FlexRefs({2}));
+ Insert(Frame().Pid(2).SidAndTid(1, 0).FlexRefs({1}));
+ Insert(Frame().Pid(3).SidAndTid(1, 0).FlexRefs({1}));
+ Insert(Frame().Pid(4).SidAndTid(0, 0).FlexRefs({2}));
+ Insert(Frame().Pid(4).SidAndTid(1, 0).FlexRefs({1}));
+
+ ASSERT_EQ(8UL, frames_.size());
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(0, {}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(1, {0}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(6, {1}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(10, {0}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(11, {6}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(16, {11}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(20, {10}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(21, {16}));
+}
+
+TEST_F(RtpVp9RefFinderTest, FlexibleModeTwoSpatialLayersReordered) {
+ Insert(Frame().Pid(0).SidAndTid(1, 0).AsKeyFrame().AsInterLayer());
+ Insert(Frame().Pid(1).SidAndTid(1, 0).FlexRefs({1}));
+ Insert(Frame().Pid(0).SidAndTid(0, 0).AsKeyFrame());
+ Insert(Frame().Pid(2).SidAndTid(1, 0).FlexRefs({1}));
+ Insert(Frame().Pid(3).SidAndTid(1, 0).FlexRefs({1}));
+ Insert(Frame().Pid(2).SidAndTid(0, 0).FlexRefs({2}));
+ Insert(Frame().Pid(4).SidAndTid(1, 0).FlexRefs({1}));
+ Insert(Frame().Pid(4).SidAndTid(0, 0).FlexRefs({2}));
+
+ ASSERT_EQ(8UL, frames_.size());
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(0, {}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(1, {0}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(6, {1}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(10, {0}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(11, {6}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(16, {11}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(20, {10}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(21, {16}));
+}
+
+TEST_F(RtpVp9RefFinderTest, WrappingFlexReference) {
+ Insert(Frame().Pid(0).SidAndTid(0, 0).FlexRefs({1}));
+
+ ASSERT_EQ(1UL, frames_.size());
+ const EncodedFrame& frame = *frames_[0];
+
+ ASSERT_EQ(frame.Id() - frame.references[0], 5);
+}
+
+TEST_F(RtpVp9RefFinderTest, GofPidJump) {
+ GofInfoVP9 ss;
+ ss.SetGofInfoVP9(kTemporalStructureMode3);
+
+ Insert(Frame().Pid(0).SidAndTid(0, 0).Tl0(0).AsKeyFrame().NotAsInterPic().Gof(
+ &ss));
+ Insert(Frame().Pid(1000).SidAndTid(0, 0).Tl0(1));
+}
+
+TEST_F(RtpVp9RefFinderTest, GofTl0Jump) {
+ GofInfoVP9 ss;
+ ss.SetGofInfoVP9(kTemporalStructureMode3);
+
+ Insert(Frame()
+ .Pid(0)
+ .SidAndTid(0, 0)
+ .Tl0(125)
+ .AsUpswitch()
+ .AsKeyFrame()
+ .NotAsInterPic()
+ .Gof(&ss));
+ Insert(Frame().Pid(1).SidAndTid(0, 0).Tl0(0).Gof(&ss));
+}
+
+TEST_F(RtpVp9RefFinderTest, GofTidTooHigh) {
+ const int kMaxTemporalLayers = 5;
+ GofInfoVP9 ss;
+ ss.SetGofInfoVP9(kTemporalStructureMode2);
+ ss.temporal_idx[1] = kMaxTemporalLayers;
+
+ Insert(Frame().Pid(0).SidAndTid(0, 0).Tl0(0).AsKeyFrame().NotAsInterPic().Gof(
+ &ss));
+ Insert(Frame().Pid(1).SidAndTid(0, 0).Tl0(1));
+
+ ASSERT_EQ(1UL, frames_.size());
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(0, {}));
+}
+
+TEST_F(RtpVp9RefFinderTest, GofZeroFrames) {
+ GofInfoVP9 ss;
+ ss.num_frames_in_gof = 0;
+
+ Insert(Frame().Pid(0).SidAndTid(0, 0).Tl0(0).AsKeyFrame().NotAsInterPic().Gof(
+ &ss));
+ Insert(Frame().Pid(1).SidAndTid(0, 0).Tl0(1));
+
+ ASSERT_EQ(2UL, frames_.size());
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(0, {}));
+ EXPECT_THAT(frames_, HasFrameWithIdAndRefs(5, {0}));
+}
+
+TEST_F(RtpVp9RefFinderTest, SpatialIndex) {
+ Insert(Frame().Pid(0).SidAndTid(0, 0).AsKeyFrame());
+ Insert(Frame().Pid(0).SidAndTid(1, 0).AsKeyFrame());
+ Insert(Frame().Pid(0).SidAndTid(2, 0).AsKeyFrame());
+
+ ASSERT_EQ(3UL, frames_.size());
+ EXPECT_THAT(frames_,
+ Contains(Pointee(Property(&EncodedFrame::SpatialIndex, 0))));
+ EXPECT_THAT(frames_,
+ Contains(Pointee(Property(&EncodedFrame::SpatialIndex, 1))));
+ EXPECT_THAT(frames_,
+ Contains(Pointee(Property(&EncodedFrame::SpatialIndex, 2))));
+}
+
+TEST_F(RtpVp9RefFinderTest, StashedFramesDoNotWrapTl0Backwards) {
+ GofInfoVP9 ss;
+ ss.SetGofInfoVP9(kTemporalStructureMode1);
+
+ Insert(Frame().Pid(0).SidAndTid(0, 0).Tl0(0));
+ EXPECT_THAT(frames_, SizeIs(0));
+
+ Insert(Frame().Pid(128).SidAndTid(0, 0).Tl0(128).AsKeyFrame().Gof(&ss));
+ EXPECT_THAT(frames_, SizeIs(1));
+ Insert(Frame().Pid(129).SidAndTid(0, 0).Tl0(129));
+ EXPECT_THAT(frames_, SizeIs(2));
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/session_info.cc b/third_party/libwebrtc/modules/video_coding/session_info.cc
new file mode 100644
index 0000000000..e31b8b1d25
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/session_info.cc
@@ -0,0 +1,540 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/session_info.h"
+
+#include <string.h>
+
+#include <vector>
+
+#include "absl/types/variant.h"
+#include "modules/include/module_common_types.h"
+#include "modules/include/module_common_types_public.h"
+#include "modules/video_coding/codecs/interface/common_constants.h"
+#include "modules/video_coding/codecs/vp8/include/vp8_globals.h"
+#include "modules/video_coding/jitter_buffer_common.h"
+#include "modules/video_coding/packet.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+
+namespace {
+
+uint16_t BufferToUWord16(const uint8_t* dataBuffer) {
+ return (dataBuffer[0] << 8) | dataBuffer[1];
+}
+
+} // namespace
+
+VCMSessionInfo::VCMSessionInfo()
+ : complete_(false),
+ frame_type_(VideoFrameType::kVideoFrameDelta),
+ packets_(),
+ empty_seq_num_low_(-1),
+ empty_seq_num_high_(-1),
+ first_packet_seq_num_(-1),
+ last_packet_seq_num_(-1) {}
+
+VCMSessionInfo::~VCMSessionInfo() {}
+
+void VCMSessionInfo::UpdateDataPointers(const uint8_t* old_base_ptr,
+ const uint8_t* new_base_ptr) {
+ for (PacketIterator it = packets_.begin(); it != packets_.end(); ++it)
+ if ((*it).dataPtr != NULL) {
+ RTC_DCHECK(old_base_ptr != NULL && new_base_ptr != NULL);
+ (*it).dataPtr = new_base_ptr + ((*it).dataPtr - old_base_ptr);
+ }
+}
+
+int VCMSessionInfo::LowSequenceNumber() const {
+ if (packets_.empty())
+ return empty_seq_num_low_;
+ return packets_.front().seqNum;
+}
+
+int VCMSessionInfo::HighSequenceNumber() const {
+ if (packets_.empty())
+ return empty_seq_num_high_;
+ if (empty_seq_num_high_ == -1)
+ return packets_.back().seqNum;
+ return LatestSequenceNumber(packets_.back().seqNum, empty_seq_num_high_);
+}
+
+int VCMSessionInfo::PictureId() const {
+ if (packets_.empty())
+ return kNoPictureId;
+ if (packets_.front().video_header.codec == kVideoCodecVP8) {
+ return absl::get<RTPVideoHeaderVP8>(
+ packets_.front().video_header.video_type_header)
+ .pictureId;
+ } else if (packets_.front().video_header.codec == kVideoCodecVP9) {
+ return absl::get<RTPVideoHeaderVP9>(
+ packets_.front().video_header.video_type_header)
+ .picture_id;
+ } else {
+ return kNoPictureId;
+ }
+}
+
+int VCMSessionInfo::TemporalId() const {
+ if (packets_.empty())
+ return kNoTemporalIdx;
+ if (packets_.front().video_header.codec == kVideoCodecVP8) {
+ return absl::get<RTPVideoHeaderVP8>(
+ packets_.front().video_header.video_type_header)
+ .temporalIdx;
+ } else if (packets_.front().video_header.codec == kVideoCodecVP9) {
+ return absl::get<RTPVideoHeaderVP9>(
+ packets_.front().video_header.video_type_header)
+ .temporal_idx;
+ } else {
+ return kNoTemporalIdx;
+ }
+}
+
+bool VCMSessionInfo::LayerSync() const {
+ if (packets_.empty())
+ return false;
+ if (packets_.front().video_header.codec == kVideoCodecVP8) {
+ return absl::get<RTPVideoHeaderVP8>(
+ packets_.front().video_header.video_type_header)
+ .layerSync;
+ } else if (packets_.front().video_header.codec == kVideoCodecVP9) {
+ return absl::get<RTPVideoHeaderVP9>(
+ packets_.front().video_header.video_type_header)
+ .temporal_up_switch;
+ } else {
+ return false;
+ }
+}
+
+int VCMSessionInfo::Tl0PicId() const {
+ if (packets_.empty())
+ return kNoTl0PicIdx;
+ if (packets_.front().video_header.codec == kVideoCodecVP8) {
+ return absl::get<RTPVideoHeaderVP8>(
+ packets_.front().video_header.video_type_header)
+ .tl0PicIdx;
+ } else if (packets_.front().video_header.codec == kVideoCodecVP9) {
+ return absl::get<RTPVideoHeaderVP9>(
+ packets_.front().video_header.video_type_header)
+ .tl0_pic_idx;
+ } else {
+ return kNoTl0PicIdx;
+ }
+}
+
+std::vector<NaluInfo> VCMSessionInfo::GetNaluInfos() const {
+ if (packets_.empty() ||
+ packets_.front().video_header.codec != kVideoCodecH264)
+ return std::vector<NaluInfo>();
+ std::vector<NaluInfo> nalu_infos;
+ for (const VCMPacket& packet : packets_) {
+ const auto& h264 =
+ absl::get<RTPVideoHeaderH264>(packet.video_header.video_type_header);
+ for (size_t i = 0; i < h264.nalus_length; ++i) {
+ nalu_infos.push_back(h264.nalus[i]);
+ }
+ }
+ return nalu_infos;
+}
+
+void VCMSessionInfo::SetGofInfo(const GofInfoVP9& gof_info, size_t idx) {
+ if (packets_.empty())
+ return;
+
+ auto* vp9_header = absl::get_if<RTPVideoHeaderVP9>(
+ &packets_.front().video_header.video_type_header);
+ if (!vp9_header || vp9_header->flexible_mode)
+ return;
+
+ vp9_header->temporal_idx = gof_info.temporal_idx[idx];
+ vp9_header->temporal_up_switch = gof_info.temporal_up_switch[idx];
+ vp9_header->num_ref_pics = gof_info.num_ref_pics[idx];
+ for (uint8_t i = 0; i < gof_info.num_ref_pics[idx]; ++i) {
+ vp9_header->pid_diff[i] = gof_info.pid_diff[idx][i];
+ }
+}
+
+void VCMSessionInfo::Reset() {
+ complete_ = false;
+ frame_type_ = VideoFrameType::kVideoFrameDelta;
+ packets_.clear();
+ empty_seq_num_low_ = -1;
+ empty_seq_num_high_ = -1;
+ first_packet_seq_num_ = -1;
+ last_packet_seq_num_ = -1;
+}
+
+size_t VCMSessionInfo::SessionLength() const {
+ size_t length = 0;
+ for (PacketIteratorConst it = packets_.begin(); it != packets_.end(); ++it)
+ length += (*it).sizeBytes;
+ return length;
+}
+
+int VCMSessionInfo::NumPackets() const {
+ return packets_.size();
+}
+
+size_t VCMSessionInfo::InsertBuffer(uint8_t* frame_buffer,
+ PacketIterator packet_it) {
+ VCMPacket& packet = *packet_it;
+ PacketIterator it;
+
+ // Calculate the offset into the frame buffer for this packet.
+ size_t offset = 0;
+ for (it = packets_.begin(); it != packet_it; ++it)
+ offset += (*it).sizeBytes;
+
+ // Set the data pointer to pointing to the start of this packet in the
+ // frame buffer.
+ const uint8_t* packet_buffer = packet.dataPtr;
+ packet.dataPtr = frame_buffer + offset;
+
+ // We handle H.264 STAP-A packets in a special way as we need to remove the
+ // two length bytes between each NAL unit, and potentially add start codes.
+ // TODO(pbos): Remove H264 parsing from this step and use a fragmentation
+ // header supplied by the H264 depacketizer.
+ const size_t kH264NALHeaderLengthInBytes = 1;
+ const size_t kLengthFieldLength = 2;
+ const auto* h264 =
+ absl::get_if<RTPVideoHeaderH264>(&packet.video_header.video_type_header);
+ if (h264 && h264->packetization_type == kH264StapA) {
+ size_t required_length = 0;
+ const uint8_t* nalu_ptr = packet_buffer + kH264NALHeaderLengthInBytes;
+ // Must check that incoming data length doesn't extend past end of buffer.
+ // We allow for 100 bytes of expansion due to startcodes being longer than
+ // length fields.
+ while (nalu_ptr + kLengthFieldLength <= packet_buffer + packet.sizeBytes) {
+ size_t length = BufferToUWord16(nalu_ptr);
+ if (nalu_ptr + kLengthFieldLength + length <= packet_buffer + packet.sizeBytes) {
+ required_length +=
+ length + (packet.insertStartCode ? kH264StartCodeLengthBytes : 0);
+ nalu_ptr += kLengthFieldLength + length;
+ } else {
+ // Something is very wrong!
+ RTC_LOG(LS_ERROR) << "Failed to insert packet due to corrupt H264 STAP-A";
+ return 0;
+ }
+ }
+ ShiftSubsequentPackets(packet_it, required_length);
+ nalu_ptr = packet_buffer + kH264NALHeaderLengthInBytes;
+ uint8_t* frame_buffer_ptr = frame_buffer + offset;
+ // we already know we won't go past end-of-buffer
+ while (nalu_ptr + kLengthFieldLength <= packet_buffer + packet.sizeBytes) {
+ size_t length = BufferToUWord16(nalu_ptr);
+ nalu_ptr += kLengthFieldLength;
+ frame_buffer_ptr += Insert(nalu_ptr, length, packet.insertStartCode,
+ const_cast<uint8_t*>(frame_buffer_ptr));
+ nalu_ptr += length;
+ }
+ packet.sizeBytes = required_length;
+ return packet.sizeBytes;
+ }
+ ShiftSubsequentPackets(
+ packet_it, packet.sizeBytes +
+ (packet.insertStartCode ? kH264StartCodeLengthBytes : 0));
+
+ packet.sizeBytes =
+ Insert(packet_buffer, packet.sizeBytes, packet.insertStartCode,
+ const_cast<uint8_t*>(packet.dataPtr));
+ return packet.sizeBytes;
+}
+
+size_t VCMSessionInfo::Insert(const uint8_t* buffer,
+ size_t length,
+ bool insert_start_code,
+ uint8_t* frame_buffer) {
+ if (!buffer || !frame_buffer) {
+ return 0;
+ }
+ if (insert_start_code) {
+ const unsigned char startCode[] = {0, 0, 0, 1};
+ memcpy(frame_buffer, startCode, kH264StartCodeLengthBytes);
+ }
+ memcpy(frame_buffer + (insert_start_code ? kH264StartCodeLengthBytes : 0),
+ buffer, length);
+ length += (insert_start_code ? kH264StartCodeLengthBytes : 0);
+
+ return length;
+}
+
+void VCMSessionInfo::ShiftSubsequentPackets(PacketIterator it,
+ int steps_to_shift) {
+ ++it;
+ if (it == packets_.end())
+ return;
+ uint8_t* first_packet_ptr = const_cast<uint8_t*>((*it).dataPtr);
+ int shift_length = 0;
+ // Calculate the total move length and move the data pointers in advance.
+ for (; it != packets_.end(); ++it) {
+ shift_length += (*it).sizeBytes;
+ if ((*it).dataPtr != NULL)
+ (*it).dataPtr += steps_to_shift;
+ }
+ memmove(first_packet_ptr + steps_to_shift, first_packet_ptr, shift_length);
+}
+
+void VCMSessionInfo::UpdateCompleteSession() {
+ if (HaveFirstPacket() && HaveLastPacket()) {
+ // Do we have all the packets in this session?
+ bool complete_session = true;
+ PacketIterator it = packets_.begin();
+ PacketIterator prev_it = it;
+ ++it;
+ for (; it != packets_.end(); ++it) {
+ if (!InSequence(it, prev_it)) {
+ complete_session = false;
+ break;
+ }
+ prev_it = it;
+ }
+ complete_ = complete_session;
+ }
+}
+
+bool VCMSessionInfo::complete() const {
+ return complete_;
+}
+
+// Find the end of the NAL unit which the packet pointed to by `packet_it`
+// belongs to. Returns an iterator to the last packet of the frame if the end
+// of the NAL unit wasn't found.
+VCMSessionInfo::PacketIterator VCMSessionInfo::FindNaluEnd(
+ PacketIterator packet_it) const {
+ if ((*packet_it).completeNALU == kNaluEnd ||
+ (*packet_it).completeNALU == kNaluComplete) {
+ return packet_it;
+ }
+ // Find the end of the NAL unit.
+ for (; packet_it != packets_.end(); ++packet_it) {
+ if (((*packet_it).completeNALU == kNaluComplete &&
+ (*packet_it).sizeBytes > 0) ||
+ // Found next NALU.
+ (*packet_it).completeNALU == kNaluStart)
+ return --packet_it;
+ if ((*packet_it).completeNALU == kNaluEnd)
+ return packet_it;
+ }
+ // The end wasn't found.
+ return --packet_it;
+}
+
+size_t VCMSessionInfo::DeletePacketData(PacketIterator start,
+ PacketIterator end) {
+ size_t bytes_to_delete = 0; // The number of bytes to delete.
+ PacketIterator packet_after_end = end;
+ ++packet_after_end;
+
+ // Get the number of bytes to delete.
+ // Clear the size of these packets.
+ for (PacketIterator it = start; it != packet_after_end; ++it) {
+ bytes_to_delete += (*it).sizeBytes;
+ (*it).sizeBytes = 0;
+ (*it).dataPtr = NULL;
+ }
+ if (bytes_to_delete > 0)
+ ShiftSubsequentPackets(end, -static_cast<int>(bytes_to_delete));
+ return bytes_to_delete;
+}
+
+VCMSessionInfo::PacketIterator VCMSessionInfo::FindNextPartitionBeginning(
+ PacketIterator it) const {
+ while (it != packets_.end()) {
+ if (absl::get<RTPVideoHeaderVP8>((*it).video_header.video_type_header)
+ .beginningOfPartition) {
+ return it;
+ }
+ ++it;
+ }
+ return it;
+}
+
+VCMSessionInfo::PacketIterator VCMSessionInfo::FindPartitionEnd(
+ PacketIterator it) const {
+ RTC_DCHECK_EQ((*it).codec(), kVideoCodecVP8);
+ PacketIterator prev_it = it;
+ const int partition_id =
+ absl::get<RTPVideoHeaderVP8>((*it).video_header.video_type_header)
+ .partitionId;
+ while (it != packets_.end()) {
+ bool beginning =
+ absl::get<RTPVideoHeaderVP8>((*it).video_header.video_type_header)
+ .beginningOfPartition;
+ int current_partition_id =
+ absl::get<RTPVideoHeaderVP8>((*it).video_header.video_type_header)
+ .partitionId;
+ bool packet_loss_found = (!beginning && !InSequence(it, prev_it));
+ if (packet_loss_found ||
+ (beginning && current_partition_id != partition_id)) {
+ // Missing packet, the previous packet was the last in sequence.
+ return prev_it;
+ }
+ prev_it = it;
+ ++it;
+ }
+ return prev_it;
+}
+
+bool VCMSessionInfo::InSequence(const PacketIterator& packet_it,
+ const PacketIterator& prev_packet_it) {
+ // If the two iterators are pointing to the same packet they are considered
+ // to be in sequence.
+ return (packet_it == prev_packet_it ||
+ (static_cast<uint16_t>((*prev_packet_it).seqNum + 1) ==
+ (*packet_it).seqNum));
+}
+
+size_t VCMSessionInfo::MakeDecodable() {
+ size_t return_length = 0;
+ if (packets_.empty()) {
+ return 0;
+ }
+ PacketIterator it = packets_.begin();
+ // Make sure we remove the first NAL unit if it's not decodable.
+ if ((*it).completeNALU == kNaluIncomplete || (*it).completeNALU == kNaluEnd) {
+ PacketIterator nalu_end = FindNaluEnd(it);
+ return_length += DeletePacketData(it, nalu_end);
+ it = nalu_end;
+ }
+ PacketIterator prev_it = it;
+ // Take care of the rest of the NAL units.
+ for (; it != packets_.end(); ++it) {
+ bool start_of_nalu = ((*it).completeNALU == kNaluStart ||
+ (*it).completeNALU == kNaluComplete);
+ if (!start_of_nalu && !InSequence(it, prev_it)) {
+ // Found a sequence number gap due to packet loss.
+ PacketIterator nalu_end = FindNaluEnd(it);
+ return_length += DeletePacketData(it, nalu_end);
+ it = nalu_end;
+ }
+ prev_it = it;
+ }
+ return return_length;
+}
+
+bool VCMSessionInfo::HaveFirstPacket() const {
+ return !packets_.empty() && (first_packet_seq_num_ != -1);
+}
+
+bool VCMSessionInfo::HaveLastPacket() const {
+ return !packets_.empty() && (last_packet_seq_num_ != -1);
+}
+
+int VCMSessionInfo::InsertPacket(const VCMPacket& packet,
+ uint8_t* frame_buffer,
+ const FrameData& frame_data) {
+ if (packet.video_header.frame_type == VideoFrameType::kEmptyFrame) {
+ // Update sequence number of an empty packet.
+ // Only media packets are inserted into the packet list.
+ InformOfEmptyPacket(packet.seqNum);
+ return 0;
+ }
+
+ if (packets_.size() == kMaxPacketsInSession) {
+ RTC_LOG(LS_ERROR) << "Max number of packets per frame has been reached.";
+ return -1;
+ }
+
+ // Find the position of this packet in the packet list in sequence number
+ // order and insert it. Loop over the list in reverse order.
+ ReversePacketIterator rit = packets_.rbegin();
+ for (; rit != packets_.rend(); ++rit)
+ if (LatestSequenceNumber(packet.seqNum, (*rit).seqNum) == packet.seqNum)
+ break;
+
+ // Check for duplicate packets.
+ if (rit != packets_.rend() && (*rit).seqNum == packet.seqNum &&
+ (*rit).sizeBytes > 0)
+ return -2;
+
+ if (packet.codec() == kVideoCodecH264) {
+ // H.264 can have leading or trailing non-VCL (Video Coding Layer)
+ // NALUs, such as SPS/PPS/SEI and others. Also, the RTP marker bit is
+ // not reliable for the last packet of a frame (RFC 6184 5.1 - "Decoders
+ // [] MUST NOT rely on this property"), so allow out-of-order packets to
+ // update the first and last seq# range. Also mark as a key frame if
+ // any packet is of that type.
+ if (frame_type_ != VideoFrameType::kVideoFrameKey) {
+ frame_type_ = packet.video_header.frame_type;
+ }
+ if (packet.is_first_packet_in_frame() &&
+ (first_packet_seq_num_ == -1 ||
+ IsNewerSequenceNumber(first_packet_seq_num_, packet.seqNum))) {
+ first_packet_seq_num_ = packet.seqNum;
+ }
+ // Note: the code does *not* currently handle the Marker bit being totally
+ // absent from a frame. It does not, however, depend on it being on the last
+ // packet of the 'frame'/'session'.
+ if (packet.markerBit &&
+ (last_packet_seq_num_ == -1 ||
+ IsNewerSequenceNumber(packet.seqNum, last_packet_seq_num_))) {
+ last_packet_seq_num_ = packet.seqNum;
+ }
+ } else {
+ // Only insert media packets between first and last packets (when
+ // available).
+ // Placing check here, as to properly account for duplicate packets.
+ // Check if this is first packet (only valid for some codecs)
+ // Should only be set for one packet per session.
+ if (packet.is_first_packet_in_frame() && first_packet_seq_num_ == -1) {
+ // The first packet in a frame signals the frame type.
+ frame_type_ = packet.video_header.frame_type;
+ // Store the sequence number for the first packet.
+ first_packet_seq_num_ = static_cast<int>(packet.seqNum);
+ } else if (first_packet_seq_num_ != -1 &&
+ IsNewerSequenceNumber(first_packet_seq_num_, packet.seqNum)) {
+ RTC_LOG(LS_WARNING)
+ << "Received packet with a sequence number which is out "
+ "of frame boundaries";
+ return -3;
+ } else if (frame_type_ == VideoFrameType::kEmptyFrame &&
+ packet.video_header.frame_type != VideoFrameType::kEmptyFrame) {
+ // Update the frame type with the type of the first media packet.
+ // TODO(mikhal): Can this trigger?
+ frame_type_ = packet.video_header.frame_type;
+ }
+
+ // Track the marker bit, should only be set for one packet per session.
+ if (packet.markerBit && last_packet_seq_num_ == -1) {
+ last_packet_seq_num_ = static_cast<int>(packet.seqNum);
+ } else if (last_packet_seq_num_ != -1 &&
+ IsNewerSequenceNumber(packet.seqNum, last_packet_seq_num_)) {
+ RTC_LOG(LS_WARNING)
+ << "Received packet with a sequence number which is out "
+ "of frame boundaries";
+ return -3;
+ }
+ }
+
+ // The insert operation invalidates the iterator `rit`.
+ PacketIterator packet_list_it = packets_.insert(rit.base(), packet);
+
+ size_t returnLength = InsertBuffer(frame_buffer, packet_list_it);
+ UpdateCompleteSession();
+ return static_cast<int>(returnLength);
+}
+
+void VCMSessionInfo::InformOfEmptyPacket(uint16_t seq_num) {
+ // Empty packets may be FEC or filler packets. They are sequential and
+ // follow the data packets, therefore, we should only keep track of the high
+ // and low sequence numbers and may assume that the packets in between are
+ // empty packets belonging to the same frame (timestamp).
+ if (empty_seq_num_high_ == -1)
+ empty_seq_num_high_ = seq_num;
+ else
+ empty_seq_num_high_ = LatestSequenceNumber(seq_num, empty_seq_num_high_);
+ if (empty_seq_num_low_ == -1 ||
+ IsNewerSequenceNumber(empty_seq_num_low_, seq_num))
+ empty_seq_num_low_ = seq_num;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/session_info.h b/third_party/libwebrtc/modules/video_coding/session_info.h
new file mode 100644
index 0000000000..6079dbbb72
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/session_info.h
@@ -0,0 +1,122 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_SESSION_INFO_H_
+#define MODULES_VIDEO_CODING_SESSION_INFO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <list>
+#include <vector>
+
+#include "modules/video_coding/codecs/h264/include/h264_globals.h"
+#include "modules/video_coding/codecs/vp9/include/vp9_globals.h"
+#include "modules/video_coding/include/video_coding.h"
+#include "modules/video_coding/packet.h"
+
+namespace webrtc {
+// Used to pass data from jitter buffer to session info.
+// This data is then used in determining whether a frame is decodable.
+struct FrameData {
+ int64_t rtt_ms;
+ float rolling_average_packets_per_frame;
+};
+
+class VCMSessionInfo {
+ public:
+ VCMSessionInfo();
+ ~VCMSessionInfo();
+
+ void UpdateDataPointers(const uint8_t* old_base_ptr,
+ const uint8_t* new_base_ptr);
+ void Reset();
+ int InsertPacket(const VCMPacket& packet,
+ uint8_t* frame_buffer,
+ const FrameData& frame_data);
+ bool complete() const;
+
+ // Makes the frame decodable. I.e., only contain decodable NALUs. All
+ // non-decodable NALUs will be deleted and packets will be moved to in
+ // memory to remove any empty space.
+ // Returns the number of bytes deleted from the session.
+ size_t MakeDecodable();
+
+ size_t SessionLength() const;
+ int NumPackets() const;
+ bool HaveFirstPacket() const;
+ bool HaveLastPacket() const;
+ webrtc::VideoFrameType FrameType() const { return frame_type_; }
+ int LowSequenceNumber() const;
+
+ // Returns highest sequence number, media or empty.
+ int HighSequenceNumber() const;
+ int PictureId() const;
+ int TemporalId() const;
+ bool LayerSync() const;
+ int Tl0PicId() const;
+
+ std::vector<NaluInfo> GetNaluInfos() const;
+
+ void SetGofInfo(const GofInfoVP9& gof_info, size_t idx);
+
+ private:
+ enum { kMaxVP8Partitions = 9 };
+
+ typedef std::list<VCMPacket> PacketList;
+ typedef PacketList::iterator PacketIterator;
+ typedef PacketList::const_iterator PacketIteratorConst;
+ typedef PacketList::reverse_iterator ReversePacketIterator;
+
+ void InformOfEmptyPacket(uint16_t seq_num);
+
+ // Finds the packet of the beginning of the next VP8 partition. If
+ // none is found the returned iterator points to `packets_.end()`.
+ // `it` is expected to point to the last packet of the previous partition,
+ // or to the first packet of the frame. `packets_skipped` is incremented
+ // for each packet found which doesn't have the beginning bit set.
+ PacketIterator FindNextPartitionBeginning(PacketIterator it) const;
+
+ // Returns an iterator pointing to the last packet of the partition pointed to
+ // by `it`.
+ PacketIterator FindPartitionEnd(PacketIterator it) const;
+ static bool InSequence(const PacketIterator& it,
+ const PacketIterator& prev_it);
+ size_t InsertBuffer(uint8_t* frame_buffer, PacketIterator packetIterator);
+ size_t Insert(const uint8_t* buffer,
+ size_t length,
+ bool insert_start_code,
+ uint8_t* frame_buffer);
+ void ShiftSubsequentPackets(PacketIterator it, int steps_to_shift);
+ PacketIterator FindNaluEnd(PacketIterator packet_iter) const;
+ // Deletes the data of all packets between `start` and `end`, inclusively.
+ // Note that this function doesn't delete the actual packets.
+ size_t DeletePacketData(PacketIterator start, PacketIterator end);
+ void UpdateCompleteSession();
+
+ bool complete_;
+ webrtc::VideoFrameType frame_type_;
+ // Packets in this frame.
+ PacketList packets_;
+ int empty_seq_num_low_;
+ int empty_seq_num_high_;
+
+ // The following two variables correspond to the first and last media packets
+ // in a session defined by the first packet flag and the marker bit.
+ // They are not necessarily equal to the front and back packets, as packets
+ // may enter out of order.
+ // TODO(mikhal): Refactor the list to use a map.
+ int first_packet_seq_num_;
+ int last_packet_seq_num_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_SESSION_INFO_H_
diff --git a/third_party/libwebrtc/modules/video_coding/session_info_unittest.cc b/third_party/libwebrtc/modules/video_coding/session_info_unittest.cc
new file mode 100644
index 0000000000..867997701d
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/session_info_unittest.cc
@@ -0,0 +1,469 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/session_info.h"
+
+#include <string.h>
+
+#include "modules/video_coding/packet.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+class TestSessionInfo : public ::testing::Test {
+ protected:
+ virtual void SetUp() {
+ memset(packet_buffer_, 0, sizeof(packet_buffer_));
+ memset(frame_buffer_, 0, sizeof(frame_buffer_));
+ session_.Reset();
+ packet_.video_header.frame_type = VideoFrameType::kVideoFrameDelta;
+ packet_.sizeBytes = packet_buffer_size();
+ packet_.dataPtr = packet_buffer_;
+ packet_.seqNum = 0;
+ packet_.timestamp = 0;
+ frame_data.rtt_ms = 0;
+ frame_data.rolling_average_packets_per_frame = -1;
+ }
+
+ void FillPacket(uint8_t start_value) {
+ for (size_t i = 0; i < packet_buffer_size(); ++i)
+ packet_buffer_[i] = start_value + i;
+ }
+
+ void VerifyPacket(uint8_t* start_ptr, uint8_t start_value) {
+ for (size_t j = 0; j < packet_buffer_size(); ++j) {
+ ASSERT_EQ(start_value + j, start_ptr[j]);
+ }
+ }
+
+ size_t packet_buffer_size() const {
+ return sizeof(packet_buffer_) / sizeof(packet_buffer_[0]);
+ }
+ size_t frame_buffer_size() const {
+ return sizeof(frame_buffer_) / sizeof(frame_buffer_[0]);
+ }
+
+ enum { kPacketBufferSize = 10 };
+
+ uint8_t packet_buffer_[kPacketBufferSize];
+ uint8_t frame_buffer_[10 * kPacketBufferSize];
+
+ VCMSessionInfo session_;
+ VCMPacket packet_;
+ FrameData frame_data;
+};
+
+class TestNalUnits : public TestSessionInfo {
+ protected:
+ virtual void SetUp() {
+ TestSessionInfo::SetUp();
+ packet_.video_header.codec = kVideoCodecVP8;
+ }
+
+ bool VerifyNalu(int offset, int packets_expected, int start_value) {
+ EXPECT_GE(session_.SessionLength(),
+ packets_expected * packet_buffer_size());
+ for (int i = 0; i < packets_expected; ++i) {
+ int packet_index = (offset + i) * packet_buffer_size();
+ VerifyPacket(frame_buffer_ + packet_index, start_value + i);
+ }
+ return true;
+ }
+};
+
+class TestNackList : public TestSessionInfo {
+ protected:
+ static const size_t kMaxSeqNumListLength = 30;
+
+ virtual void SetUp() {
+ TestSessionInfo::SetUp();
+ seq_num_list_length_ = 0;
+ memset(seq_num_list_, 0, sizeof(seq_num_list_));
+ }
+
+ void BuildSeqNumList(uint16_t low, uint16_t high) {
+ size_t i = 0;
+ while (low != high + 1) {
+ EXPECT_LT(i, kMaxSeqNumListLength);
+ if (i >= kMaxSeqNumListLength) {
+ seq_num_list_length_ = kMaxSeqNumListLength;
+ return;
+ }
+ seq_num_list_[i] = low;
+ low++;
+ i++;
+ }
+ seq_num_list_length_ = i;
+ }
+
+ void VerifyAll(int value) {
+ for (int i = 0; i < seq_num_list_length_; ++i)
+ EXPECT_EQ(seq_num_list_[i], value);
+ }
+
+ int seq_num_list_[kMaxSeqNumListLength];
+ int seq_num_list_length_;
+};
+
+TEST_F(TestSessionInfo, TestSimpleAPIs) {
+ packet_.video_header.is_first_packet_in_frame = true;
+ packet_.seqNum = 0xFFFE;
+ packet_.sizeBytes = packet_buffer_size();
+ packet_.video_header.frame_type = VideoFrameType::kVideoFrameKey;
+ FillPacket(0);
+ EXPECT_EQ(packet_buffer_size(), static_cast<size_t>(session_.InsertPacket(
+ packet_, frame_buffer_, frame_data)));
+ EXPECT_FALSE(session_.HaveLastPacket());
+ EXPECT_EQ(VideoFrameType::kVideoFrameKey, session_.FrameType());
+
+ packet_.video_header.is_first_packet_in_frame = false;
+ packet_.markerBit = true;
+ packet_.seqNum += 1;
+ EXPECT_EQ(packet_buffer_size(), static_cast<size_t>(session_.InsertPacket(
+ packet_, frame_buffer_, frame_data)));
+ EXPECT_TRUE(session_.HaveLastPacket());
+ EXPECT_EQ(packet_.seqNum, session_.HighSequenceNumber());
+ EXPECT_EQ(0xFFFE, session_.LowSequenceNumber());
+
+ // Insert empty packet which will be the new high sequence number.
+ // To make things more difficult we will make sure to have a wrap here.
+ packet_.video_header.is_first_packet_in_frame = false;
+ packet_.markerBit = true;
+ packet_.seqNum = 2;
+ packet_.sizeBytes = 0;
+ packet_.video_header.frame_type = VideoFrameType::kEmptyFrame;
+ EXPECT_EQ(0, session_.InsertPacket(packet_, frame_buffer_, frame_data));
+ EXPECT_EQ(packet_.seqNum, session_.HighSequenceNumber());
+}
+
+TEST_F(TestSessionInfo, NormalOperation) {
+ packet_.seqNum = 0xFFFF;
+ packet_.video_header.is_first_packet_in_frame = true;
+ packet_.markerBit = false;
+ FillPacket(0);
+ EXPECT_EQ(packet_buffer_size(), static_cast<size_t>(session_.InsertPacket(
+ packet_, frame_buffer_, frame_data)));
+
+ packet_.video_header.is_first_packet_in_frame = false;
+ for (int i = 1; i < 9; ++i) {
+ packet_.seqNum += 1;
+ FillPacket(i);
+ ASSERT_EQ(packet_buffer_size(), static_cast<size_t>(session_.InsertPacket(
+ packet_, frame_buffer_, frame_data)));
+ }
+
+ packet_.seqNum += 1;
+ packet_.markerBit = true;
+ FillPacket(9);
+ EXPECT_EQ(packet_buffer_size(), static_cast<size_t>(session_.InsertPacket(
+ packet_, frame_buffer_, frame_data)));
+
+ EXPECT_EQ(10 * packet_buffer_size(), session_.SessionLength());
+ for (int i = 0; i < 10; ++i) {
+ SCOPED_TRACE("Calling VerifyPacket");
+ VerifyPacket(frame_buffer_ + i * packet_buffer_size(), i);
+ }
+}
+
+TEST_F(TestSessionInfo, OutOfBoundsPackets1PacketFrame) {
+ packet_.seqNum = 0x0001;
+ packet_.video_header.is_first_packet_in_frame = true;
+ packet_.markerBit = true;
+ FillPacket(1);
+ EXPECT_EQ(packet_buffer_size(), static_cast<size_t>(session_.InsertPacket(
+ packet_, frame_buffer_, frame_data)));
+
+ packet_.seqNum = 0x0004;
+ packet_.video_header.is_first_packet_in_frame = true;
+ packet_.markerBit = true;
+ FillPacket(1);
+ EXPECT_EQ(-3, session_.InsertPacket(packet_, frame_buffer_, frame_data));
+ packet_.seqNum = 0x0000;
+ packet_.video_header.is_first_packet_in_frame = false;
+ packet_.markerBit = false;
+ FillPacket(1);
+ EXPECT_EQ(-3, session_.InsertPacket(packet_, frame_buffer_, frame_data));
+}
+
+TEST_F(TestSessionInfo, SetMarkerBitOnce) {
+ packet_.seqNum = 0x0005;
+ packet_.video_header.is_first_packet_in_frame = false;
+ packet_.markerBit = true;
+ FillPacket(1);
+ EXPECT_EQ(packet_buffer_size(), static_cast<size_t>(session_.InsertPacket(
+ packet_, frame_buffer_, frame_data)));
+ ++packet_.seqNum;
+ packet_.video_header.is_first_packet_in_frame = true;
+ packet_.markerBit = true;
+ FillPacket(1);
+ EXPECT_EQ(-3, session_.InsertPacket(packet_, frame_buffer_, frame_data));
+}
+
+TEST_F(TestSessionInfo, OutOfBoundsPacketsBase) {
+ // Allow packets in the range 5-6.
+ packet_.seqNum = 0x0005;
+ packet_.video_header.is_first_packet_in_frame = true;
+ packet_.markerBit = false;
+ FillPacket(1);
+ EXPECT_EQ(packet_buffer_size(), static_cast<size_t>(session_.InsertPacket(
+ packet_, frame_buffer_, frame_data)));
+ // Insert an older packet with a first packet set.
+ packet_.seqNum = 0x0004;
+ packet_.video_header.is_first_packet_in_frame = true;
+ packet_.markerBit = true;
+ FillPacket(1);
+ EXPECT_EQ(-3, session_.InsertPacket(packet_, frame_buffer_, frame_data));
+ packet_.seqNum = 0x0006;
+ packet_.video_header.is_first_packet_in_frame = true;
+ packet_.markerBit = true;
+ FillPacket(1);
+ EXPECT_EQ(packet_buffer_size(), static_cast<size_t>(session_.InsertPacket(
+ packet_, frame_buffer_, frame_data)));
+ packet_.seqNum = 0x0008;
+ packet_.video_header.is_first_packet_in_frame = false;
+ packet_.markerBit = true;
+ FillPacket(1);
+ EXPECT_EQ(-3, session_.InsertPacket(packet_, frame_buffer_, frame_data));
+}
+
+TEST_F(TestSessionInfo, OutOfBoundsPacketsWrap) {
+ packet_.seqNum = 0xFFFE;
+ packet_.video_header.is_first_packet_in_frame = true;
+ packet_.markerBit = false;
+ FillPacket(1);
+ EXPECT_EQ(packet_buffer_size(), static_cast<size_t>(session_.InsertPacket(
+ packet_, frame_buffer_, frame_data)));
+
+ packet_.seqNum = 0x0004;
+ packet_.video_header.is_first_packet_in_frame = false;
+ packet_.markerBit = true;
+ FillPacket(1);
+ EXPECT_EQ(packet_buffer_size(), static_cast<size_t>(session_.InsertPacket(
+ packet_, frame_buffer_, frame_data)));
+ packet_.seqNum = 0x0002;
+ packet_.video_header.is_first_packet_in_frame = false;
+ packet_.markerBit = false;
+ FillPacket(1);
+ ASSERT_EQ(packet_buffer_size(), static_cast<size_t>(session_.InsertPacket(
+ packet_, frame_buffer_, frame_data)));
+ packet_.seqNum = 0xFFF0;
+ packet_.video_header.is_first_packet_in_frame = false;
+ packet_.markerBit = false;
+ FillPacket(1);
+ EXPECT_EQ(-3, session_.InsertPacket(packet_, frame_buffer_, frame_data));
+ packet_.seqNum = 0x0006;
+ packet_.video_header.is_first_packet_in_frame = false;
+ packet_.markerBit = false;
+ FillPacket(1);
+ EXPECT_EQ(-3, session_.InsertPacket(packet_, frame_buffer_, frame_data));
+}
+
+TEST_F(TestSessionInfo, OutOfBoundsOutOfOrder) {
+ // Insert out of bound regular packets, and then the first and last packet.
+ // Verify that correct bounds are maintained.
+ packet_.seqNum = 0x0003;
+ packet_.video_header.is_first_packet_in_frame = false;
+ packet_.markerBit = false;
+ FillPacket(1);
+ EXPECT_EQ(packet_buffer_size(), static_cast<size_t>(session_.InsertPacket(
+ packet_, frame_buffer_, frame_data)));
+ // Insert an older packet with a first packet set.
+ packet_.seqNum = 0x0005;
+ packet_.video_header.is_first_packet_in_frame = true;
+ packet_.markerBit = false;
+ FillPacket(1);
+ EXPECT_EQ(packet_buffer_size(), static_cast<size_t>(session_.InsertPacket(
+ packet_, frame_buffer_, frame_data)));
+ packet_.seqNum = 0x0004;
+ packet_.video_header.is_first_packet_in_frame = false;
+ packet_.markerBit = false;
+ FillPacket(1);
+ EXPECT_EQ(-3, session_.InsertPacket(packet_, frame_buffer_, frame_data));
+ packet_.seqNum = 0x0010;
+ packet_.video_header.is_first_packet_in_frame = false;
+ packet_.markerBit = false;
+ FillPacket(1);
+ EXPECT_EQ(packet_buffer_size(), static_cast<size_t>(session_.InsertPacket(
+ packet_, frame_buffer_, frame_data)));
+ packet_.seqNum = 0x0008;
+ packet_.video_header.is_first_packet_in_frame = false;
+ packet_.markerBit = true;
+ FillPacket(1);
+ EXPECT_EQ(packet_buffer_size(), static_cast<size_t>(session_.InsertPacket(
+ packet_, frame_buffer_, frame_data)));
+
+ packet_.seqNum = 0x0009;
+ packet_.video_header.is_first_packet_in_frame = false;
+ packet_.markerBit = false;
+ FillPacket(1);
+ EXPECT_EQ(-3, session_.InsertPacket(packet_, frame_buffer_, frame_data));
+}
+
+TEST_F(TestNalUnits, OnlyReceivedEmptyPacket) {
+ packet_.video_header.is_first_packet_in_frame = false;
+ packet_.completeNALU = kNaluComplete;
+ packet_.video_header.frame_type = VideoFrameType::kEmptyFrame;
+ packet_.sizeBytes = 0;
+ packet_.seqNum = 0;
+ packet_.markerBit = false;
+ EXPECT_EQ(0, session_.InsertPacket(packet_, frame_buffer_, frame_data));
+
+ EXPECT_EQ(0U, session_.MakeDecodable());
+ EXPECT_EQ(0U, session_.SessionLength());
+}
+
+TEST_F(TestNalUnits, OneIsolatedNaluLoss) {
+ packet_.video_header.is_first_packet_in_frame = true;
+ packet_.completeNALU = kNaluComplete;
+ packet_.seqNum = 0;
+ packet_.markerBit = false;
+ FillPacket(0);
+ EXPECT_EQ(packet_buffer_size(), static_cast<size_t>(session_.InsertPacket(
+ packet_, frame_buffer_, frame_data)));
+
+ packet_.video_header.is_first_packet_in_frame = false;
+ packet_.completeNALU = kNaluComplete;
+ packet_.seqNum += 2;
+ packet_.markerBit = true;
+ FillPacket(2);
+ EXPECT_EQ(packet_buffer_size(), static_cast<size_t>(session_.InsertPacket(
+ packet_, frame_buffer_, frame_data)));
+
+ EXPECT_EQ(0U, session_.MakeDecodable());
+ EXPECT_EQ(2 * packet_buffer_size(), session_.SessionLength());
+ SCOPED_TRACE("Calling VerifyNalu");
+ EXPECT_TRUE(VerifyNalu(0, 1, 0));
+ SCOPED_TRACE("Calling VerifyNalu");
+ EXPECT_TRUE(VerifyNalu(1, 1, 2));
+}
+
+TEST_F(TestNalUnits, LossInMiddleOfNalu) {
+ packet_.video_header.is_first_packet_in_frame = true;
+ packet_.completeNALU = kNaluComplete;
+ packet_.seqNum = 0;
+ packet_.markerBit = false;
+ FillPacket(0);
+ EXPECT_EQ(packet_buffer_size(), static_cast<size_t>(session_.InsertPacket(
+ packet_, frame_buffer_, frame_data)));
+
+ packet_.video_header.is_first_packet_in_frame = false;
+ packet_.completeNALU = kNaluEnd;
+ packet_.seqNum += 2;
+ packet_.markerBit = true;
+ FillPacket(2);
+ EXPECT_EQ(packet_buffer_size(), static_cast<size_t>(session_.InsertPacket(
+ packet_, frame_buffer_, frame_data)));
+
+ EXPECT_EQ(packet_buffer_size(), session_.MakeDecodable());
+ EXPECT_EQ(packet_buffer_size(), session_.SessionLength());
+ SCOPED_TRACE("Calling VerifyNalu");
+ EXPECT_TRUE(VerifyNalu(0, 1, 0));
+}
+
+TEST_F(TestNalUnits, StartAndEndOfLastNalUnitLost) {
+ packet_.video_header.is_first_packet_in_frame = true;
+ packet_.completeNALU = kNaluComplete;
+ packet_.seqNum = 0;
+ packet_.markerBit = false;
+ FillPacket(0);
+ EXPECT_EQ(packet_buffer_size(), static_cast<size_t>(session_.InsertPacket(
+ packet_, frame_buffer_, frame_data)));
+
+ packet_.video_header.is_first_packet_in_frame = false;
+ packet_.completeNALU = kNaluIncomplete;
+ packet_.seqNum += 2;
+ packet_.markerBit = false;
+ FillPacket(1);
+ EXPECT_EQ(packet_buffer_size(), static_cast<size_t>(session_.InsertPacket(
+ packet_, frame_buffer_, frame_data)));
+
+ EXPECT_EQ(packet_buffer_size(), session_.MakeDecodable());
+ EXPECT_EQ(packet_buffer_size(), session_.SessionLength());
+ SCOPED_TRACE("Calling VerifyNalu");
+ EXPECT_TRUE(VerifyNalu(0, 1, 0));
+}
+
+TEST_F(TestNalUnits, ReorderWrapNoLoss) {
+ packet_.seqNum = 0xFFFF;
+ packet_.video_header.is_first_packet_in_frame = false;
+ packet_.completeNALU = kNaluIncomplete;
+ packet_.seqNum += 1;
+ packet_.markerBit = false;
+ FillPacket(1);
+ EXPECT_EQ(packet_buffer_size(), static_cast<size_t>(session_.InsertPacket(
+ packet_, frame_buffer_, frame_data)));
+
+ packet_.video_header.is_first_packet_in_frame = true;
+ packet_.completeNALU = kNaluComplete;
+ packet_.seqNum -= 1;
+ packet_.markerBit = false;
+ FillPacket(0);
+ EXPECT_EQ(packet_buffer_size(), static_cast<size_t>(session_.InsertPacket(
+ packet_, frame_buffer_, frame_data)));
+
+ packet_.video_header.is_first_packet_in_frame = false;
+ packet_.completeNALU = kNaluEnd;
+ packet_.seqNum += 2;
+ packet_.markerBit = true;
+ FillPacket(2);
+ EXPECT_EQ(packet_buffer_size(), static_cast<size_t>(session_.InsertPacket(
+ packet_, frame_buffer_, frame_data)));
+
+ EXPECT_EQ(0U, session_.MakeDecodable());
+ EXPECT_EQ(3 * packet_buffer_size(), session_.SessionLength());
+ SCOPED_TRACE("Calling VerifyNalu");
+ EXPECT_TRUE(VerifyNalu(0, 1, 0));
+}
+
+TEST_F(TestNalUnits, WrapLosses) {
+ packet_.seqNum = 0xFFFF;
+ packet_.video_header.is_first_packet_in_frame = false;
+ packet_.completeNALU = kNaluIncomplete;
+ packet_.markerBit = false;
+ FillPacket(1);
+ EXPECT_EQ(packet_buffer_size(), static_cast<size_t>(session_.InsertPacket(
+ packet_, frame_buffer_, frame_data)));
+
+ packet_.video_header.is_first_packet_in_frame = false;
+ packet_.completeNALU = kNaluEnd;
+ packet_.seqNum += 2;
+ packet_.markerBit = true;
+ FillPacket(2);
+ EXPECT_EQ(packet_buffer_size(), static_cast<size_t>(session_.InsertPacket(
+ packet_, frame_buffer_, frame_data)));
+
+ EXPECT_EQ(2 * packet_buffer_size(), session_.MakeDecodable());
+ EXPECT_EQ(0U, session_.SessionLength());
+}
+
+TEST_F(TestNalUnits, ReorderWrapLosses) {
+ packet_.seqNum = 0xFFFF;
+
+ packet_.video_header.is_first_packet_in_frame = false;
+ packet_.completeNALU = kNaluEnd;
+ packet_.seqNum += 2;
+ packet_.markerBit = true;
+ FillPacket(2);
+ EXPECT_EQ(packet_buffer_size(), static_cast<size_t>(session_.InsertPacket(
+ packet_, frame_buffer_, frame_data)));
+
+ packet_.seqNum -= 2;
+ packet_.video_header.is_first_packet_in_frame = false;
+ packet_.completeNALU = kNaluIncomplete;
+ packet_.markerBit = false;
+ FillPacket(1);
+ EXPECT_EQ(packet_buffer_size(), static_cast<size_t>(session_.InsertPacket(
+ packet_, frame_buffer_, frame_data)));
+
+ EXPECT_EQ(2 * packet_buffer_size(), session_.MakeDecodable());
+ EXPECT_EQ(0U, session_.SessionLength());
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/svc/BUILD.gn b/third_party/libwebrtc/modules/video_coding/svc/BUILD.gn
new file mode 100644
index 0000000000..b8ce91d99a
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/svc/BUILD.gn
@@ -0,0 +1,135 @@
+# Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+import("../../../webrtc.gni")
+
+rtc_source_set("scalability_mode_util") {
+ sources = [
+ "scalability_mode_util.cc",
+ "scalability_mode_util.h",
+ ]
+ deps = [
+ "../../../api/video_codecs:scalability_mode",
+ "../../../api/video_codecs:video_codecs_api",
+ "../../../rtc_base:checks",
+ ]
+ absl_deps = [
+ "//third_party/abseil-cpp/absl/strings",
+ "//third_party/abseil-cpp/absl/types:optional",
+ ]
+}
+
+rtc_source_set("scalable_video_controller") {
+ sources = [
+ "scalable_video_controller.h",
+ "scalable_video_controller_no_layering.cc",
+ "scalable_video_controller_no_layering.h",
+ ]
+ deps = [
+ "../../../api/transport/rtp:dependency_descriptor",
+ "../../../api/video:video_bitrate_allocation",
+ "../../../common_video/generic_frame_descriptor",
+ "../../../rtc_base:checks",
+ ]
+ absl_deps = [
+ "//third_party/abseil-cpp/absl/container:inlined_vector",
+ "//third_party/abseil-cpp/absl/types:optional",
+ ]
+}
+
+rtc_source_set("scalability_structures") {
+ sources = [
+ "create_scalability_structure.cc",
+ "create_scalability_structure.h",
+ "scalability_structure_full_svc.cc",
+ "scalability_structure_full_svc.h",
+ "scalability_structure_key_svc.cc",
+ "scalability_structure_key_svc.h",
+ "scalability_structure_l2t2_key_shift.cc",
+ "scalability_structure_l2t2_key_shift.h",
+ "scalability_structure_simulcast.cc",
+ "scalability_structure_simulcast.h",
+ ]
+ deps = [
+ ":scalable_video_controller",
+ "../../../api/transport/rtp:dependency_descriptor",
+ "../../../api/video:video_bitrate_allocation",
+ "../../../api/video_codecs:scalability_mode",
+ "../../../common_video/generic_frame_descriptor",
+ "../../../rtc_base:checks",
+ "../../../rtc_base:logging",
+ ]
+ absl_deps = [
+ "//third_party/abseil-cpp/absl/base:core_headers",
+ "//third_party/abseil-cpp/absl/strings",
+ "//third_party/abseil-cpp/absl/types:optional",
+ ]
+}
+
+rtc_source_set("svc_rate_allocator") {
+ sources = [
+ "svc_rate_allocator.cc",
+ "svc_rate_allocator.h",
+ ]
+ deps = [
+ ":scalability_structures",
+ "../../../api/video:video_bitrate_allocation",
+ "../../../api/video:video_bitrate_allocator",
+ "../../../api/video:video_codec_constants",
+ "../../../api/video_codecs:video_codecs_api",
+ "../../../rtc_base:checks",
+ "../../../rtc_base/experiments:stable_target_rate_experiment",
+ ]
+ absl_deps = [ "//third_party/abseil-cpp/absl/container:inlined_vector" ]
+}
+
+if (rtc_include_tests) {
+ rtc_source_set("scalability_structure_tests") {
+ testonly = true
+ sources = [
+ "scalability_mode_util_unittest.cc",
+ "scalability_structure_full_svc_unittest.cc",
+ "scalability_structure_key_svc_unittest.cc",
+ "scalability_structure_l2t2_key_shift_unittest.cc",
+ "scalability_structure_test_helpers.cc",
+ "scalability_structure_test_helpers.h",
+ "scalability_structure_unittest.cc",
+ ]
+ deps = [
+ ":scalability_mode_util",
+ ":scalability_structures",
+ ":scalable_video_controller",
+ "..:chain_diff_calculator",
+ "..:frame_dependencies_calculator",
+ "../../../api:array_view",
+ "../../../api/transport/rtp:dependency_descriptor",
+ "../../../api/video:video_bitrate_allocation",
+ "../../../api/video:video_frame_type",
+ "../../../api/video_codecs:scalability_mode",
+ "../../../common_video/generic_frame_descriptor",
+ "../../../rtc_base:stringutils",
+ "../../../test:test_support",
+ ]
+ absl_deps = [
+ "//third_party/abseil-cpp/absl/strings",
+ "//third_party/abseil-cpp/absl/types:optional",
+ ]
+ }
+
+ rtc_source_set("svc_rate_allocator_tests") {
+ testonly = true
+ sources = [ "svc_rate_allocator_unittest.cc" ]
+ deps = [
+ ":svc_rate_allocator",
+ "..:webrtc_vp9_helpers",
+ "../../../rtc_base:checks",
+ "../../../test:field_trial",
+ "../../../test:test_support",
+ ]
+ }
+}
diff --git a/third_party/libwebrtc/modules/video_coding/svc/create_scalability_structure.cc b/third_party/libwebrtc/modules/video_coding/svc/create_scalability_structure.cc
new file mode 100644
index 0000000000..fbcd27b139
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/svc/create_scalability_structure.cc
@@ -0,0 +1,295 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include "modules/video_coding/svc/create_scalability_structure.h"
+
+#include <memory>
+
+#include "api/video_codecs/scalability_mode.h"
+#include "modules/video_coding/svc/scalability_structure_full_svc.h"
+#include "modules/video_coding/svc/scalability_structure_key_svc.h"
+#include "modules/video_coding/svc/scalability_structure_l2t2_key_shift.h"
+#include "modules/video_coding/svc/scalability_structure_simulcast.h"
+#include "modules/video_coding/svc/scalable_video_controller.h"
+#include "modules/video_coding/svc/scalable_video_controller_no_layering.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+namespace {
+
+struct NamedStructureFactory {
+ ScalabilityMode name;
+ // Use function pointer to make NamedStructureFactory trivally destructable.
+ std::unique_ptr<ScalableVideoController> (*factory)();
+ ScalableVideoController::StreamLayersConfig config;
+};
+
+// Wrap std::make_unique function to have correct return type.
+template <typename T>
+std::unique_ptr<ScalableVideoController> Create() {
+ return std::make_unique<T>();
+}
+
+template <typename T>
+std::unique_ptr<ScalableVideoController> CreateH() {
+ // 1.5:1 scaling, see https://w3c.github.io/webrtc-svc/#scalabilitymodes*
+ typename T::ScalingFactor factor;
+ factor.num = 2;
+ factor.den = 3;
+ return std::make_unique<T>(factor);
+}
+
+constexpr ScalableVideoController::StreamLayersConfig kConfigL1T1 = {
+ /*num_spatial_layers=*/1, /*num_temporal_layers=*/1,
+ /*uses_reference_scaling=*/false};
+
+constexpr ScalableVideoController::StreamLayersConfig kConfigL1T2 = {
+ /*num_spatial_layers=*/1, /*num_temporal_layers=*/2,
+ /*uses_reference_scaling=*/false};
+
+constexpr ScalableVideoController::StreamLayersConfig kConfigL1T3 = {
+ /*num_spatial_layers=*/1, /*num_temporal_layers=*/3,
+ /*uses_reference_scaling=*/false};
+
+constexpr ScalableVideoController::StreamLayersConfig kConfigL2T1 = {
+ /*num_spatial_layers=*/2,
+ /*num_temporal_layers=*/1,
+ /*uses_reference_scaling=*/true,
+ {1, 1},
+ {2, 1}};
+
+constexpr ScalableVideoController::StreamLayersConfig kConfigL2T1h = {
+ /*num_spatial_layers=*/2,
+ /*num_temporal_layers=*/1,
+ /*uses_reference_scaling=*/true,
+ {2, 1},
+ {3, 1}};
+
+constexpr ScalableVideoController::StreamLayersConfig kConfigL2T2 = {
+ /*num_spatial_layers=*/2,
+ /*num_temporal_layers=*/2,
+ /*uses_reference_scaling=*/true,
+ {1, 1},
+ {2, 1}};
+
+constexpr ScalableVideoController::StreamLayersConfig kConfigL2T2h = {
+ /*num_spatial_layers=*/2,
+ /*num_temporal_layers=*/2,
+ /*uses_reference_scaling=*/true,
+ {2, 1},
+ {3, 1}};
+
+constexpr ScalableVideoController::StreamLayersConfig kConfigL2T3 = {
+ /*num_spatial_layers=*/2,
+ /*num_temporal_layers=*/3,
+ /*uses_reference_scaling=*/true,
+ {1, 1},
+ {2, 1}};
+
+constexpr ScalableVideoController::StreamLayersConfig kConfigL2T3h = {
+ /*num_spatial_layers=*/2,
+ /*num_temporal_layers=*/3,
+ /*uses_reference_scaling=*/true,
+ {2, 1},
+ {3, 1}};
+
+constexpr ScalableVideoController::StreamLayersConfig kConfigL3T1 = {
+ /*num_spatial_layers=*/3,
+ /*num_temporal_layers=*/1,
+ /*uses_reference_scaling=*/true,
+ {1, 1, 1},
+ {4, 2, 1}};
+
+constexpr ScalableVideoController::StreamLayersConfig kConfigL3T1h = {
+ /*num_spatial_layers=*/3,
+ /*num_temporal_layers=*/1,
+ /*uses_reference_scaling=*/true,
+ {4, 2, 1},
+ {9, 3, 1}};
+
+constexpr ScalableVideoController::StreamLayersConfig kConfigL3T2 = {
+ /*num_spatial_layers=*/3,
+ /*num_temporal_layers=*/2,
+ /*uses_reference_scaling=*/true,
+ {1, 1, 1},
+ {4, 2, 1}};
+
+constexpr ScalableVideoController::StreamLayersConfig kConfigL3T2h = {
+ /*num_spatial_layers=*/3,
+ /*num_temporal_layers=*/2,
+ /*uses_reference_scaling=*/true,
+ {4, 2, 1},
+ {9, 3, 1}};
+
+constexpr ScalableVideoController::StreamLayersConfig kConfigL3T3 = {
+ /*num_spatial_layers=*/3,
+ /*num_temporal_layers=*/3,
+ /*uses_reference_scaling=*/true,
+ {1, 1, 1},
+ {4, 2, 1}};
+
+constexpr ScalableVideoController::StreamLayersConfig kConfigL3T3h = {
+ /*num_spatial_layers=*/3,
+ /*num_temporal_layers=*/3,
+ /*uses_reference_scaling=*/true,
+ {4, 2, 1},
+ {9, 3, 1}};
+
+constexpr ScalableVideoController::StreamLayersConfig kConfigS2T1 = {
+ /*num_spatial_layers=*/2,
+ /*num_temporal_layers=*/1,
+ /*uses_reference_scaling=*/false,
+ {1, 1},
+ {2, 1}};
+
+constexpr ScalableVideoController::StreamLayersConfig kConfigS2T1h = {
+ /*num_spatial_layers=*/2,
+ /*num_temporal_layers=*/1,
+ /*uses_reference_scaling=*/false,
+ {2, 1},
+ {3, 1}};
+
+constexpr ScalableVideoController::StreamLayersConfig kConfigS2T2 = {
+ /*num_spatial_layers=*/2,
+ /*num_temporal_layers=*/2,
+ /*uses_reference_scaling=*/false,
+ {1, 1},
+ {2, 1}};
+
+constexpr ScalableVideoController::StreamLayersConfig kConfigS2T2h = {
+ /*num_spatial_layers=*/2,
+ /*num_temporal_layers=*/2,
+ /*uses_reference_scaling=*/false,
+ {2, 1},
+ {3, 1}};
+
+constexpr ScalableVideoController::StreamLayersConfig kConfigS2T3 = {
+ /*num_spatial_layers=*/2,
+ /*num_temporal_layers=*/3,
+ /*uses_reference_scaling=*/false,
+ {1, 1},
+ {2, 1}};
+
+constexpr ScalableVideoController::StreamLayersConfig kConfigS2T3h = {
+ /*num_spatial_layers=*/2,
+ /*num_temporal_layers=*/3,
+ /*uses_reference_scaling=*/false,
+ {2, 1},
+ {3, 1}};
+
+constexpr ScalableVideoController::StreamLayersConfig kConfigS3T1 = {
+ /*num_spatial_layers=*/3,
+ /*num_temporal_layers=*/1,
+ /*uses_reference_scaling=*/false,
+ {1, 1, 1},
+ {4, 2, 1}};
+
+constexpr ScalableVideoController::StreamLayersConfig kConfigS3T1h = {
+ /*num_spatial_layers=*/3,
+ /*num_temporal_layers=*/1,
+ /*uses_reference_scaling=*/false,
+ {4, 2, 1},
+ {9, 3, 1}};
+
+constexpr ScalableVideoController::StreamLayersConfig kConfigS3T2 = {
+ /*num_spatial_layers=*/3,
+ /*num_temporal_layers=*/2,
+ /*uses_reference_scaling=*/false,
+ {1, 1, 1},
+ {4, 2, 1}};
+
+constexpr ScalableVideoController::StreamLayersConfig kConfigS3T2h = {
+ /*num_spatial_layers=*/3,
+ /*num_temporal_layers=*/2,
+ /*uses_reference_scaling=*/false,
+ {4, 2, 1},
+ {9, 3, 1}};
+
+constexpr ScalableVideoController::StreamLayersConfig kConfigS3T3 = {
+ /*num_spatial_layers=*/3,
+ /*num_temporal_layers=*/3,
+ /*uses_reference_scaling=*/false,
+ {1, 1, 1},
+ {4, 2, 1}};
+
+constexpr ScalableVideoController::StreamLayersConfig kConfigS3T3h = {
+ /*num_spatial_layers=*/3,
+ /*num_temporal_layers=*/3,
+ /*uses_reference_scaling=*/false,
+ {4, 2, 1},
+ {9, 3, 1}};
+
+constexpr NamedStructureFactory kFactories[] = {
+ {ScalabilityMode::kL1T1, Create<ScalableVideoControllerNoLayering>,
+ kConfigL1T1},
+ {ScalabilityMode::kL1T2, Create<ScalabilityStructureL1T2>, kConfigL1T2},
+ {ScalabilityMode::kL1T3, Create<ScalabilityStructureL1T3>, kConfigL1T3},
+ {ScalabilityMode::kL2T1, Create<ScalabilityStructureL2T1>, kConfigL2T1},
+ {ScalabilityMode::kL2T1h, CreateH<ScalabilityStructureL2T1>, kConfigL2T1h},
+ {ScalabilityMode::kL2T1_KEY, Create<ScalabilityStructureL2T1Key>,
+ kConfigL2T1},
+ {ScalabilityMode::kL2T2, Create<ScalabilityStructureL2T2>, kConfigL2T2},
+ {ScalabilityMode::kL2T2h, CreateH<ScalabilityStructureL2T2>, kConfigL2T2h},
+ {ScalabilityMode::kL2T2_KEY, Create<ScalabilityStructureL2T2Key>,
+ kConfigL2T2},
+ {ScalabilityMode::kL2T2_KEY_SHIFT, Create<ScalabilityStructureL2T2KeyShift>,
+ kConfigL2T2},
+ {ScalabilityMode::kL2T3, Create<ScalabilityStructureL2T3>, kConfigL2T3},
+ {ScalabilityMode::kL2T3h, CreateH<ScalabilityStructureL2T3>, kConfigL2T3h},
+ {ScalabilityMode::kL2T3_KEY, Create<ScalabilityStructureL2T3Key>,
+ kConfigL2T3},
+ {ScalabilityMode::kL3T1, Create<ScalabilityStructureL3T1>, kConfigL3T1},
+ {ScalabilityMode::kL3T1h, CreateH<ScalabilityStructureL3T1>, kConfigL3T1h},
+ {ScalabilityMode::kL3T1_KEY, Create<ScalabilityStructureL3T1Key>,
+ kConfigL3T1},
+ {ScalabilityMode::kL3T2, Create<ScalabilityStructureL3T2>, kConfigL3T2},
+ {ScalabilityMode::kL3T2h, CreateH<ScalabilityStructureL3T2>, kConfigL3T2h},
+ {ScalabilityMode::kL3T2_KEY, Create<ScalabilityStructureL3T2Key>,
+ kConfigL3T2},
+ {ScalabilityMode::kL3T3, Create<ScalabilityStructureL3T3>, kConfigL3T3},
+ {ScalabilityMode::kL3T3h, CreateH<ScalabilityStructureL3T3>, kConfigL3T3h},
+ {ScalabilityMode::kL3T3_KEY, Create<ScalabilityStructureL3T3Key>,
+ kConfigL3T3},
+ {ScalabilityMode::kS2T1, Create<ScalabilityStructureS2T1>, kConfigS2T1},
+ {ScalabilityMode::kS2T1h, CreateH<ScalabilityStructureS2T1>, kConfigS2T1h},
+ {ScalabilityMode::kS2T2, Create<ScalabilityStructureS2T2>, kConfigS2T2},
+ {ScalabilityMode::kS2T2h, CreateH<ScalabilityStructureS2T2>, kConfigS2T2h},
+ {ScalabilityMode::kS2T3, Create<ScalabilityStructureS2T3>, kConfigS2T3},
+ {ScalabilityMode::kS2T3h, CreateH<ScalabilityStructureS2T3>, kConfigS2T3h},
+ {ScalabilityMode::kS3T1, Create<ScalabilityStructureS3T1>, kConfigS3T1},
+ {ScalabilityMode::kS3T1h, CreateH<ScalabilityStructureS3T1>, kConfigS3T1h},
+ {ScalabilityMode::kS3T2, Create<ScalabilityStructureS3T2>, kConfigS3T2},
+ {ScalabilityMode::kS3T2h, CreateH<ScalabilityStructureS3T2>, kConfigS3T2h},
+ {ScalabilityMode::kS3T3, Create<ScalabilityStructureS3T3>, kConfigS3T3},
+ {ScalabilityMode::kS3T3h, CreateH<ScalabilityStructureS3T3>, kConfigS3T3h},
+};
+
+} // namespace
+
+std::unique_ptr<ScalableVideoController> CreateScalabilityStructure(
+ ScalabilityMode name) {
+ for (const auto& entry : kFactories) {
+ if (entry.name == name) {
+ return entry.factory();
+ }
+ }
+ return nullptr;
+}
+
+absl::optional<ScalableVideoController::StreamLayersConfig>
+ScalabilityStructureConfig(ScalabilityMode name) {
+ for (const auto& entry : kFactories) {
+ if (entry.name == name) {
+ return entry.config;
+ }
+ }
+ return absl::nullopt;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/svc/create_scalability_structure.h b/third_party/libwebrtc/modules/video_coding/svc/create_scalability_structure.h
new file mode 100644
index 0000000000..3b67443693
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/svc/create_scalability_structure.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#ifndef MODULES_VIDEO_CODING_SVC_CREATE_SCALABILITY_STRUCTURE_H_
+#define MODULES_VIDEO_CODING_SVC_CREATE_SCALABILITY_STRUCTURE_H_
+
+#include <memory>
+#include <vector>
+
+#include "absl/types/optional.h"
+#include "api/video_codecs/scalability_mode.h"
+#include "modules/video_coding/svc/scalable_video_controller.h"
+
+namespace webrtc {
+
+// Creates a structure by name according to
+// https://w3c.github.io/webrtc-svc/#scalabilitymodes*
+// Returns nullptr for unknown name.
+std::unique_ptr<ScalableVideoController> CreateScalabilityStructure(
+ ScalabilityMode name);
+
+// Returns description of the scalability structure identified by 'name',
+// Return nullopt for unknown name.
+absl::optional<ScalableVideoController::StreamLayersConfig>
+ScalabilityStructureConfig(ScalabilityMode name);
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_SVC_CREATE_SCALABILITY_STRUCTURE_H_
diff --git a/third_party/libwebrtc/modules/video_coding/svc/scalability_mode_util.cc b/third_party/libwebrtc/modules/video_coding/svc/scalability_mode_util.cc
new file mode 100644
index 0000000000..35d66df203
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/svc/scalability_mode_util.cc
@@ -0,0 +1,390 @@
+/*
+ * Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/svc/scalability_mode_util.h"
+
+#include "absl/strings/string_view.h"
+#include "absl/types/optional.h"
+#include "api/video_codecs/scalability_mode.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+absl::optional<ScalabilityMode> ScalabilityModeFromString(
+ absl::string_view mode_string) {
+ if (mode_string == "L1T1")
+ return ScalabilityMode::kL1T1;
+ if (mode_string == "L1T2")
+ return ScalabilityMode::kL1T2;
+ if (mode_string == "L1T3")
+ return ScalabilityMode::kL1T3;
+
+ if (mode_string == "L2T1")
+ return ScalabilityMode::kL2T1;
+ if (mode_string == "L2T1h")
+ return ScalabilityMode::kL2T1h;
+ if (mode_string == "L2T1_KEY")
+ return ScalabilityMode::kL2T1_KEY;
+
+ if (mode_string == "L2T2")
+ return ScalabilityMode::kL2T2;
+ if (mode_string == "L2T2h")
+ return ScalabilityMode::kL2T2h;
+ if (mode_string == "L2T2_KEY")
+ return ScalabilityMode::kL2T2_KEY;
+ if (mode_string == "L2T2_KEY_SHIFT")
+ return ScalabilityMode::kL2T2_KEY_SHIFT;
+ if (mode_string == "L2T3")
+ return ScalabilityMode::kL2T3;
+ if (mode_string == "L2T3h")
+ return ScalabilityMode::kL2T3h;
+ if (mode_string == "L2T3_KEY")
+ return ScalabilityMode::kL2T3_KEY;
+
+ if (mode_string == "L3T1")
+ return ScalabilityMode::kL3T1;
+ if (mode_string == "L3T1h")
+ return ScalabilityMode::kL3T1h;
+ if (mode_string == "L3T1_KEY")
+ return ScalabilityMode::kL3T1_KEY;
+
+ if (mode_string == "L3T2")
+ return ScalabilityMode::kL3T2;
+ if (mode_string == "L3T2h")
+ return ScalabilityMode::kL3T2h;
+ if (mode_string == "L3T2_KEY")
+ return ScalabilityMode::kL3T2_KEY;
+
+ if (mode_string == "L3T3")
+ return ScalabilityMode::kL3T3;
+ if (mode_string == "L3T3h")
+ return ScalabilityMode::kL3T3h;
+ if (mode_string == "L3T3_KEY")
+ return ScalabilityMode::kL3T3_KEY;
+
+ if (mode_string == "S2T1")
+ return ScalabilityMode::kS2T1;
+ if (mode_string == "S2T1h")
+ return ScalabilityMode::kS2T1h;
+ if (mode_string == "S2T2")
+ return ScalabilityMode::kS2T2;
+ if (mode_string == "S2T2h")
+ return ScalabilityMode::kS2T2h;
+ if (mode_string == "S2T3")
+ return ScalabilityMode::kS2T3;
+ if (mode_string == "S2T3h")
+ return ScalabilityMode::kS2T3h;
+ if (mode_string == "S3T1")
+ return ScalabilityMode::kS3T1;
+ if (mode_string == "S3T1h")
+ return ScalabilityMode::kS3T1h;
+ if (mode_string == "S3T2")
+ return ScalabilityMode::kS3T2;
+ if (mode_string == "S3T2h")
+ return ScalabilityMode::kS3T2h;
+ if (mode_string == "S3T3")
+ return ScalabilityMode::kS3T3;
+ if (mode_string == "S3T3h")
+ return ScalabilityMode::kS3T3h;
+
+ return absl::nullopt;
+}
+
+InterLayerPredMode ScalabilityModeToInterLayerPredMode(
+ ScalabilityMode scalability_mode) {
+ switch (scalability_mode) {
+ case ScalabilityMode::kL1T1:
+ case ScalabilityMode::kL1T2:
+ case ScalabilityMode::kL1T3:
+ case ScalabilityMode::kL2T1:
+ case ScalabilityMode::kL2T1h:
+ return InterLayerPredMode::kOn;
+ case ScalabilityMode::kL2T1_KEY:
+ return InterLayerPredMode::kOnKeyPic;
+ case ScalabilityMode::kL2T2:
+ case ScalabilityMode::kL2T2h:
+ return InterLayerPredMode::kOn;
+ case ScalabilityMode::kL2T2_KEY:
+ case ScalabilityMode::kL2T2_KEY_SHIFT:
+ return InterLayerPredMode::kOnKeyPic;
+ case ScalabilityMode::kL2T3:
+ case ScalabilityMode::kL2T3h:
+ return InterLayerPredMode::kOn;
+ case ScalabilityMode::kL2T3_KEY:
+ return InterLayerPredMode::kOnKeyPic;
+ case ScalabilityMode::kL3T1:
+ case ScalabilityMode::kL3T1h:
+ return InterLayerPredMode::kOn;
+ case ScalabilityMode::kL3T1_KEY:
+ return InterLayerPredMode::kOnKeyPic;
+ case ScalabilityMode::kL3T2:
+ case ScalabilityMode::kL3T2h:
+ return InterLayerPredMode::kOn;
+ case ScalabilityMode::kL3T2_KEY:
+ return InterLayerPredMode::kOnKeyPic;
+ case ScalabilityMode::kL3T3:
+ case ScalabilityMode::kL3T3h:
+ return InterLayerPredMode::kOn;
+ case ScalabilityMode::kL3T3_KEY:
+ return InterLayerPredMode::kOnKeyPic;
+ case ScalabilityMode::kS2T1:
+ case ScalabilityMode::kS2T1h:
+ case ScalabilityMode::kS2T2:
+ case ScalabilityMode::kS2T2h:
+ case ScalabilityMode::kS2T3:
+ case ScalabilityMode::kS2T3h:
+ case ScalabilityMode::kS3T1:
+ case ScalabilityMode::kS3T1h:
+ case ScalabilityMode::kS3T2:
+ case ScalabilityMode::kS3T2h:
+ case ScalabilityMode::kS3T3:
+ case ScalabilityMode::kS3T3h:
+ return InterLayerPredMode::kOff;
+ }
+ RTC_CHECK_NOTREACHED();
+}
+
+int ScalabilityModeToNumSpatialLayers(ScalabilityMode scalability_mode) {
+ switch (scalability_mode) {
+ case ScalabilityMode::kL1T1:
+ case ScalabilityMode::kL1T2:
+ case ScalabilityMode::kL1T3:
+ return 1;
+ case ScalabilityMode::kL2T1:
+ case ScalabilityMode::kL2T1h:
+ case ScalabilityMode::kL2T1_KEY:
+ case ScalabilityMode::kL2T2:
+ case ScalabilityMode::kL2T2h:
+ case ScalabilityMode::kL2T2_KEY:
+ case ScalabilityMode::kL2T2_KEY_SHIFT:
+ case ScalabilityMode::kL2T3:
+ case ScalabilityMode::kL2T3h:
+ case ScalabilityMode::kL2T3_KEY:
+ return 2;
+ case ScalabilityMode::kL3T1:
+ case ScalabilityMode::kL3T1h:
+ case ScalabilityMode::kL3T1_KEY:
+ case ScalabilityMode::kL3T2:
+ case ScalabilityMode::kL3T2h:
+ case ScalabilityMode::kL3T2_KEY:
+ case ScalabilityMode::kL3T3:
+ case ScalabilityMode::kL3T3h:
+ case ScalabilityMode::kL3T3_KEY:
+ return 3;
+ case ScalabilityMode::kS2T1:
+ case ScalabilityMode::kS2T1h:
+ case ScalabilityMode::kS2T2:
+ case ScalabilityMode::kS2T2h:
+ case ScalabilityMode::kS2T3:
+ case ScalabilityMode::kS2T3h:
+ return 2;
+ case ScalabilityMode::kS3T1:
+ case ScalabilityMode::kS3T1h:
+ case ScalabilityMode::kS3T2:
+ case ScalabilityMode::kS3T2h:
+ case ScalabilityMode::kS3T3:
+ case ScalabilityMode::kS3T3h:
+ return 3;
+ }
+ RTC_CHECK_NOTREACHED();
+}
+
+int ScalabilityModeToNumTemporalLayers(ScalabilityMode scalability_mode) {
+ switch (scalability_mode) {
+ case ScalabilityMode::kL1T1:
+ return 1;
+ case ScalabilityMode::kL1T2:
+ return 2;
+ case ScalabilityMode::kL1T3:
+ return 3;
+ case ScalabilityMode::kL2T1:
+ case ScalabilityMode::kL2T1h:
+ case ScalabilityMode::kL2T1_KEY:
+ return 1;
+ case ScalabilityMode::kL2T2:
+ case ScalabilityMode::kL2T2h:
+ case ScalabilityMode::kL2T2_KEY:
+ case ScalabilityMode::kL2T2_KEY_SHIFT:
+ return 2;
+ case ScalabilityMode::kL2T3:
+ case ScalabilityMode::kL2T3h:
+ case ScalabilityMode::kL2T3_KEY:
+ return 3;
+ case ScalabilityMode::kL3T1:
+ case ScalabilityMode::kL3T1h:
+ case ScalabilityMode::kL3T1_KEY:
+ return 1;
+ case ScalabilityMode::kL3T2:
+ case ScalabilityMode::kL3T2h:
+ case ScalabilityMode::kL3T2_KEY:
+ return 2;
+ case ScalabilityMode::kL3T3:
+ case ScalabilityMode::kL3T3h:
+ case ScalabilityMode::kL3T3_KEY:
+ return 3;
+ case ScalabilityMode::kS2T1:
+ case ScalabilityMode::kS2T1h:
+ case ScalabilityMode::kS3T1:
+ case ScalabilityMode::kS3T1h:
+ return 1;
+ case ScalabilityMode::kS2T2:
+ case ScalabilityMode::kS2T2h:
+ case ScalabilityMode::kS3T2:
+ case ScalabilityMode::kS3T2h:
+ return 2;
+ case ScalabilityMode::kS2T3:
+ case ScalabilityMode::kS2T3h:
+ case ScalabilityMode::kS3T3:
+ case ScalabilityMode::kS3T3h:
+ return 3;
+ }
+ RTC_CHECK_NOTREACHED();
+}
+
+absl::optional<ScalabilityModeResolutionRatio> ScalabilityModeToResolutionRatio(
+ ScalabilityMode scalability_mode) {
+ switch (scalability_mode) {
+ case ScalabilityMode::kL1T1:
+ case ScalabilityMode::kL1T2:
+ case ScalabilityMode::kL1T3:
+ return absl::nullopt;
+ case ScalabilityMode::kL2T1:
+ case ScalabilityMode::kL2T1_KEY:
+ case ScalabilityMode::kL2T2:
+ case ScalabilityMode::kL2T2_KEY:
+ case ScalabilityMode::kL2T2_KEY_SHIFT:
+ case ScalabilityMode::kL2T3:
+ case ScalabilityMode::kL2T3_KEY:
+ case ScalabilityMode::kL3T1:
+ case ScalabilityMode::kL3T1_KEY:
+ case ScalabilityMode::kL3T2:
+ case ScalabilityMode::kL3T2_KEY:
+ case ScalabilityMode::kL3T3:
+ case ScalabilityMode::kL3T3_KEY:
+ case ScalabilityMode::kS2T1:
+ case ScalabilityMode::kS2T2:
+ case ScalabilityMode::kS2T3:
+ case ScalabilityMode::kS3T1:
+ case ScalabilityMode::kS3T2:
+ case ScalabilityMode::kS3T3:
+ return ScalabilityModeResolutionRatio::kTwoToOne;
+ case ScalabilityMode::kL2T1h:
+ case ScalabilityMode::kL2T2h:
+ case ScalabilityMode::kL2T3h:
+ case ScalabilityMode::kL3T1h:
+ case ScalabilityMode::kL3T2h:
+ case ScalabilityMode::kL3T3h:
+ case ScalabilityMode::kS2T1h:
+ case ScalabilityMode::kS2T2h:
+ case ScalabilityMode::kS2T3h:
+ case ScalabilityMode::kS3T1h:
+ case ScalabilityMode::kS3T2h:
+ case ScalabilityMode::kS3T3h:
+ return ScalabilityModeResolutionRatio::kThreeToTwo;
+ }
+ RTC_CHECK_NOTREACHED();
+}
+
+ScalabilityMode LimitNumSpatialLayers(ScalabilityMode scalability_mode,
+ int max_spatial_layers) {
+ int num_spatial_layers = ScalabilityModeToNumSpatialLayers(scalability_mode);
+ if (max_spatial_layers >= num_spatial_layers) {
+ return scalability_mode;
+ }
+
+ switch (scalability_mode) {
+ case ScalabilityMode::kL1T1:
+ return ScalabilityMode::kL1T1;
+ case ScalabilityMode::kL1T2:
+ return ScalabilityMode::kL1T2;
+ case ScalabilityMode::kL1T3:
+ return ScalabilityMode::kL1T3;
+ case ScalabilityMode::kL2T1:
+ return ScalabilityMode::kL1T1;
+ case ScalabilityMode::kL2T1h:
+ return ScalabilityMode::kL1T1;
+ case ScalabilityMode::kL2T1_KEY:
+ return ScalabilityMode::kL1T1;
+ case ScalabilityMode::kL2T2:
+ return ScalabilityMode::kL1T2;
+ case ScalabilityMode::kL2T2h:
+ return ScalabilityMode::kL1T2;
+ case ScalabilityMode::kL2T2_KEY:
+ return ScalabilityMode::kL1T2;
+ case ScalabilityMode::kL2T2_KEY_SHIFT:
+ return ScalabilityMode::kL1T2;
+ case ScalabilityMode::kL2T3:
+ return ScalabilityMode::kL1T3;
+ case ScalabilityMode::kL2T3h:
+ return ScalabilityMode::kL1T3;
+ case ScalabilityMode::kL2T3_KEY:
+ return ScalabilityMode::kL1T3;
+ case ScalabilityMode::kL3T1:
+ return max_spatial_layers == 2 ? ScalabilityMode::kL2T1
+ : ScalabilityMode::kL1T1;
+ case ScalabilityMode::kL3T1h:
+ return max_spatial_layers == 2 ? ScalabilityMode::kL2T1h
+ : ScalabilityMode::kL1T1;
+ case ScalabilityMode::kL3T1_KEY:
+ return max_spatial_layers == 2 ? ScalabilityMode::kL2T1_KEY
+ : ScalabilityMode::kL1T1;
+ case ScalabilityMode::kL3T2:
+ return max_spatial_layers == 2 ? ScalabilityMode::kL2T2
+ : ScalabilityMode::kL1T2;
+ case ScalabilityMode::kL3T2h:
+ return max_spatial_layers == 2 ? ScalabilityMode::kL2T2h
+ : ScalabilityMode::kL1T2;
+ case ScalabilityMode::kL3T2_KEY:
+ return max_spatial_layers == 2 ? ScalabilityMode::kL2T2_KEY
+ : ScalabilityMode::kL1T2;
+ case ScalabilityMode::kL3T3:
+ return max_spatial_layers == 2 ? ScalabilityMode::kL2T3
+ : ScalabilityMode::kL1T3;
+ case ScalabilityMode::kL3T3h:
+ return max_spatial_layers == 2 ? ScalabilityMode::kL2T3h
+ : ScalabilityMode::kL1T3;
+ case ScalabilityMode::kL3T3_KEY:
+ return max_spatial_layers == 2 ? ScalabilityMode::kL2T3_KEY
+ : ScalabilityMode::kL1T3;
+ case ScalabilityMode::kS2T1:
+ return ScalabilityMode::kL1T1;
+ case ScalabilityMode::kS2T1h:
+ return ScalabilityMode::kL1T1;
+ case ScalabilityMode::kS2T2:
+ return ScalabilityMode::kL1T2;
+ case ScalabilityMode::kS2T2h:
+ return ScalabilityMode::kL1T2;
+ case ScalabilityMode::kS2T3:
+ return ScalabilityMode::kL1T3;
+ case ScalabilityMode::kS2T3h:
+ return ScalabilityMode::kL1T3;
+ case ScalabilityMode::kS3T1:
+ return max_spatial_layers == 2 ? ScalabilityMode::kS2T1
+ : ScalabilityMode::kL1T1;
+ case ScalabilityMode::kS3T1h:
+ return max_spatial_layers == 2 ? ScalabilityMode::kS2T1h
+ : ScalabilityMode::kL1T1;
+ case ScalabilityMode::kS3T2:
+ return max_spatial_layers == 2 ? ScalabilityMode::kS2T2
+ : ScalabilityMode::kL1T2;
+ case ScalabilityMode::kS3T2h:
+ return max_spatial_layers == 2 ? ScalabilityMode::kS2T2h
+ : ScalabilityMode::kL1T2;
+ case ScalabilityMode::kS3T3:
+ return max_spatial_layers == 2 ? ScalabilityMode::kS2T3
+ : ScalabilityMode::kL1T3;
+ case ScalabilityMode::kS3T3h:
+ return max_spatial_layers == 2 ? ScalabilityMode::kS2T3h
+ : ScalabilityMode::kL1T3;
+ }
+ RTC_CHECK_NOTREACHED();
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/svc/scalability_mode_util.h b/third_party/libwebrtc/modules/video_coding/svc/scalability_mode_util.h
new file mode 100644
index 0000000000..9c8193e037
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/svc/scalability_mode_util.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_SVC_SCALABILITY_MODE_UTIL_H_
+#define MODULES_VIDEO_CODING_SVC_SCALABILITY_MODE_UTIL_H_
+
+#include "absl/strings/string_view.h"
+#include "absl/types/optional.h"
+#include "api/video_codecs/scalability_mode.h"
+#include "api/video_codecs/video_codec.h"
+
+namespace webrtc {
+
+enum class ScalabilityModeResolutionRatio {
+ kTwoToOne, // The resolution ratio between spatial layers is 2:1.
+ kThreeToTwo, // The resolution ratio between spatial layers is 1.5:1.
+};
+
+static constexpr char kDefaultScalabilityModeStr[] = "L1T2";
+
+absl::optional<ScalabilityMode> ScalabilityModeFromString(
+ absl::string_view scalability_mode_string);
+
+InterLayerPredMode ScalabilityModeToInterLayerPredMode(
+ ScalabilityMode scalability_mode);
+
+int ScalabilityModeToNumSpatialLayers(ScalabilityMode scalability_mode);
+
+int ScalabilityModeToNumTemporalLayers(ScalabilityMode scalability_mode);
+
+absl::optional<ScalabilityModeResolutionRatio> ScalabilityModeToResolutionRatio(
+ ScalabilityMode scalability_mode);
+
+ScalabilityMode LimitNumSpatialLayers(ScalabilityMode scalability_mode,
+ int max_spatial_layers);
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_SVC_SCALABILITY_MODE_UTIL_H_
diff --git a/third_party/libwebrtc/modules/video_coding/svc/scalability_mode_util_gn/moz.build b/third_party/libwebrtc/modules/video_coding/svc/scalability_mode_util_gn/moz.build
new file mode 100644
index 0000000000..f786b11616
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/svc/scalability_mode_util_gn/moz.build
@@ -0,0 +1,225 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+DEFINES["WEBRTC_STRICT_FIELD_TRIALS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "!/third_party/libwebrtc/gen",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/video_coding/svc/scalability_mode_util.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "mips32":
+
+ DEFINES["MIPS32_LE"] = True
+ DEFINES["MIPS_FPU_LE"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "mips64":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("scalability_mode_util_gn")
diff --git a/third_party/libwebrtc/modules/video_coding/svc/scalability_mode_util_unittest.cc b/third_party/libwebrtc/modules/video_coding/svc/scalability_mode_util_unittest.cc
new file mode 100644
index 0000000000..448494ffcc
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/svc/scalability_mode_util_unittest.cc
@@ -0,0 +1,116 @@
+/*
+ * Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/svc/scalability_mode_util.h"
+
+#include <string>
+#include <tuple>
+#include <vector>
+
+#include "absl/strings/string_view.h"
+#include "absl/types/optional.h"
+#include "api/video_codecs/scalability_mode.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+
+TEST(ScalabilityModeUtil, ConvertsL1T2) {
+ EXPECT_EQ(ScalabilityModeFromString("L1T2"), ScalabilityMode::kL1T2);
+ EXPECT_EQ(ScalabilityModeToString(ScalabilityMode::kL1T2), "L1T2");
+}
+
+TEST(ScalabilityModeUtil, RejectsUnknownString) {
+ EXPECT_EQ(ScalabilityModeFromString(""), absl::nullopt);
+ EXPECT_EQ(ScalabilityModeFromString("not-a-mode"), absl::nullopt);
+}
+
+// Check roundtrip conversion of all enum values.
+TEST(ScalabilityModeUtil, ConvertsAllToAndFromString) {
+ const ScalabilityMode kLastEnum = ScalabilityMode::kS3T3h;
+ for (int numerical_enum = 0; numerical_enum <= static_cast<int>(kLastEnum);
+ numerical_enum++) {
+ ScalabilityMode scalability_mode =
+ static_cast<ScalabilityMode>(numerical_enum);
+ absl::string_view scalability_mode_string =
+ ScalabilityModeToString(scalability_mode);
+ EXPECT_FALSE(scalability_mode_string.empty());
+ EXPECT_EQ(ScalabilityModeFromString(scalability_mode_string),
+ scalability_mode);
+ }
+}
+
+struct TestParams {
+ std::string scalability_mode;
+ std::vector<std::tuple<std::vector<int>, std::string>>
+ limited_scalability_mode;
+};
+
+class NumSpatialLayersTest : public ::testing::TestWithParam<TestParams> {};
+
+INSTANTIATE_TEST_SUITE_P(
+ MaxLayers,
+ NumSpatialLayersTest,
+ ::testing::ValuesIn<TestParams>(
+ {{"L1T1", {{{0, 1}, "L1T1"}, {{2}, "L1T1"}, {{3}, "L1T1"}}},
+ {"L1T2", {{{0, 1}, "L1T2"}, {{2}, "L1T2"}, {{3}, "L1T2"}}},
+ {"L1T3", {{{0, 1}, "L1T3"}, {{2}, "L1T3"}, {{3}, "L1T3"}}},
+ {"L2T1", {{{0, 1}, "L1T1"}, {{2}, "L2T1"}, {{3}, "L2T1"}}},
+ {"L2T1h", {{{0, 1}, "L1T1"}, {{2}, "L2T1h"}, {{3}, "L2T1h"}}},
+ {"L2T1_KEY", {{{0, 1}, "L1T1"}, {{2}, "L2T1_KEY"}, {{3}, "L2T1_KEY"}}},
+ {"L2T2", {{{0, 1}, "L1T2"}, {{2}, "L2T2"}, {{3}, "L2T2"}}},
+ {"L2T2h", {{{0, 1}, "L1T2"}, {{2}, "L2T2h"}, {{3}, "L2T2h"}}},
+ {"L2T2_KEY", {{{0, 1}, "L1T2"}, {{2}, "L2T2_KEY"}, {{3}, "L2T2_KEY"}}},
+ {"L2T2_KEY_SHIFT",
+ {{{0, 1}, "L1T2"}, {{2}, "L2T2_KEY_SHIFT"}, {{3}, "L2T2_KEY_SHIFT"}}},
+ {"L2T3", {{{0, 1}, "L1T3"}, {{2}, "L2T3"}, {{3}, "L2T3"}}},
+ {"L2T3h", {{{0, 1}, "L1T3"}, {{2}, "L2T3h"}, {{3}, "L2T3h"}}},
+ {"L2T3_KEY", {{{0, 1}, "L1T3"}, {{2}, "L2T3_KEY"}, {{3}, "L2T3_KEY"}}},
+ {"L3T1", {{{0, 1}, "L1T1"}, {{2}, "L2T1"}, {{3}, "L3T1"}}},
+ {"L3T1h", {{{0, 1}, "L1T1"}, {{2}, "L2T1h"}, {{3}, "L3T1h"}}},
+ {"L3T1_KEY", {{{0, 1}, "L1T1"}, {{2}, "L2T1_KEY"}, {{3}, "L3T1_KEY"}}},
+ {"L3T2", {{{0, 1}, "L1T2"}, {{2}, "L2T2"}, {{3}, "L3T2"}}},
+ {"L3T2h", {{{0, 1}, "L1T2"}, {{2}, "L2T2h"}, {{3}, "L3T2h"}}},
+ {"L3T2_KEY", {{{0, 1}, "L1T2"}, {{2}, "L2T2_KEY"}, {{3}, "L3T2_KEY"}}},
+ {"L3T3", {{{0, 1}, "L1T3"}, {{2}, "L2T3"}, {{3}, "L3T3"}}},
+ {"L3T3h", {{{0, 1}, "L1T3"}, {{2}, "L2T3h"}, {{3}, "L3T3h"}}},
+ {"L3T3_KEY", {{{0, 1}, "L1T3"}, {{2}, "L2T3_KEY"}, {{3}, "L3T3_KEY"}}},
+ {"S2T1", {{{0, 1}, "L1T1"}, {{2}, "S2T1"}, {{3}, "S2T1"}}},
+ {"S2T1h", {{{0, 1}, "L1T1"}, {{2}, "S2T1h"}, {{3}, "S2T1h"}}},
+ {"S2T2", {{{0, 1}, "L1T2"}, {{2}, "S2T2"}, {{3}, "S2T2"}}},
+ {"S2T2h", {{{0, 1}, "L1T2"}, {{2}, "S2T2h"}, {{3}, "S2T2h"}}},
+ {"S2T3", {{{0, 1}, "L1T3"}, {{2}, "S2T3"}, {{3}, "S2T3"}}},
+ {"S2T3h", {{{0, 1}, "L1T3"}, {{2}, "S2T3h"}, {{3}, "S2T3h"}}},
+ {"S3T1", {{{0, 1}, "L1T1"}, {{2}, "S2T1"}, {{3}, "S3T1"}}},
+ {"S3T1h", {{{0, 1}, "L1T1"}, {{2}, "S2T1h"}, {{3}, "S3T1h"}}},
+ {"S3T2", {{{0, 1}, "L1T2"}, {{2}, "S2T2"}, {{3}, "S3T2"}}},
+ {"S3T2h", {{{0, 1}, "L1T2"}, {{2}, "S2T2h"}, {{3}, "S3T2h"}}},
+ {"S3T3", {{{0, 1}, "L1T3"}, {{2}, "S2T3"}, {{3}, "S3T3"}}},
+ {"S3T3h", {{{0, 1}, "L1T3"}, {{2}, "S2T3h"}, {{3}, "S3T3h"}}}}),
+ [](const ::testing::TestParamInfo<TestParams>& info) {
+ return info.param.scalability_mode;
+ });
+
+TEST_P(NumSpatialLayersTest, LimitsSpatialLayers) {
+ const ScalabilityMode mode =
+ *ScalabilityModeFromString(GetParam().scalability_mode);
+ for (const auto& param : GetParam().limited_scalability_mode) {
+ const std::vector<int> max_num_spatial_layers =
+ std::get<std::vector<int>>(param);
+ const ScalabilityMode expected_mode =
+ *ScalabilityModeFromString(std::get<std::string>(param));
+ for (const auto& max_layers : max_num_spatial_layers) {
+ EXPECT_EQ(expected_mode, LimitNumSpatialLayers(mode, max_layers));
+ }
+ }
+}
+
+} // namespace
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/svc/scalability_structure_full_svc.cc b/third_party/libwebrtc/modules/video_coding/svc/scalability_structure_full_svc.cc
new file mode 100644
index 0000000000..a262317597
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/svc/scalability_structure_full_svc.cc
@@ -0,0 +1,444 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include "modules/video_coding/svc/scalability_structure_full_svc.h"
+
+#include <utility>
+#include <vector>
+
+#include "absl/strings/string_view.h"
+#include "absl/types/optional.h"
+#include "api/transport/rtp/dependency_descriptor.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+
+constexpr int ScalabilityStructureFullSvc::kMaxNumSpatialLayers;
+constexpr int ScalabilityStructureFullSvc::kMaxNumTemporalLayers;
+constexpr absl::string_view ScalabilityStructureFullSvc::kFramePatternNames[];
+
+ScalabilityStructureFullSvc::ScalabilityStructureFullSvc(
+ int num_spatial_layers,
+ int num_temporal_layers,
+ ScalingFactor resolution_factor)
+ : num_spatial_layers_(num_spatial_layers),
+ num_temporal_layers_(num_temporal_layers),
+ resolution_factor_(resolution_factor),
+ active_decode_targets_(
+ (uint32_t{1} << (num_spatial_layers * num_temporal_layers)) - 1) {
+ RTC_DCHECK_LE(num_spatial_layers, kMaxNumSpatialLayers);
+ RTC_DCHECK_LE(num_temporal_layers, kMaxNumTemporalLayers);
+}
+
+ScalabilityStructureFullSvc::~ScalabilityStructureFullSvc() = default;
+
+ScalabilityStructureFullSvc::StreamLayersConfig
+ScalabilityStructureFullSvc::StreamConfig() const {
+ StreamLayersConfig result;
+ result.num_spatial_layers = num_spatial_layers_;
+ result.num_temporal_layers = num_temporal_layers_;
+ result.scaling_factor_num[num_spatial_layers_ - 1] = 1;
+ result.scaling_factor_den[num_spatial_layers_ - 1] = 1;
+ for (int sid = num_spatial_layers_ - 1; sid > 0; --sid) {
+ result.scaling_factor_num[sid - 1] =
+ resolution_factor_.num * result.scaling_factor_num[sid];
+ result.scaling_factor_den[sid - 1] =
+ resolution_factor_.den * result.scaling_factor_den[sid];
+ }
+ result.uses_reference_scaling = num_spatial_layers_ > 1;
+ return result;
+}
+
+bool ScalabilityStructureFullSvc::TemporalLayerIsActive(int tid) const {
+ if (tid >= num_temporal_layers_) {
+ return false;
+ }
+ for (int sid = 0; sid < num_spatial_layers_; ++sid) {
+ if (DecodeTargetIsActive(sid, tid)) {
+ return true;
+ }
+ }
+ return false;
+}
+
+DecodeTargetIndication ScalabilityStructureFullSvc::Dti(
+ int sid,
+ int tid,
+ const LayerFrameConfig& config) {
+ if (sid < config.SpatialId() || tid < config.TemporalId()) {
+ return DecodeTargetIndication::kNotPresent;
+ }
+ if (sid == config.SpatialId()) {
+ if (tid == 0) {
+ RTC_DCHECK_EQ(config.TemporalId(), 0);
+ return DecodeTargetIndication::kSwitch;
+ }
+ if (tid == config.TemporalId()) {
+ return DecodeTargetIndication::kDiscardable;
+ }
+ if (tid > config.TemporalId()) {
+ RTC_DCHECK_GT(tid, config.TemporalId());
+ return DecodeTargetIndication::kSwitch;
+ }
+ }
+ RTC_DCHECK_GT(sid, config.SpatialId());
+ RTC_DCHECK_GE(tid, config.TemporalId());
+ if (config.IsKeyframe() || config.Id() == kKey) {
+ return DecodeTargetIndication::kSwitch;
+ }
+ return DecodeTargetIndication::kRequired;
+}
+
+ScalabilityStructureFullSvc::FramePattern
+ScalabilityStructureFullSvc::NextPattern() const {
+ switch (last_pattern_) {
+ case kNone:
+ return kKey;
+ case kDeltaT2B:
+ return kDeltaT0;
+ case kDeltaT2A:
+ if (TemporalLayerIsActive(1)) {
+ return kDeltaT1;
+ }
+ return kDeltaT0;
+ case kDeltaT1:
+ if (TemporalLayerIsActive(2)) {
+ return kDeltaT2B;
+ }
+ return kDeltaT0;
+ case kKey:
+ case kDeltaT0:
+ if (TemporalLayerIsActive(2)) {
+ return kDeltaT2A;
+ }
+ if (TemporalLayerIsActive(1)) {
+ return kDeltaT1;
+ }
+ return kDeltaT0;
+ }
+ RTC_DCHECK_NOTREACHED();
+ return kNone;
+}
+
+std::vector<ScalableVideoController::LayerFrameConfig>
+ScalabilityStructureFullSvc::NextFrameConfig(bool restart) {
+ std::vector<LayerFrameConfig> configs;
+ if (active_decode_targets_.none()) {
+ last_pattern_ = kNone;
+ return configs;
+ }
+ configs.reserve(num_spatial_layers_);
+
+ if (last_pattern_ == kNone || restart) {
+ can_reference_t0_frame_for_spatial_id_.reset();
+ last_pattern_ = kNone;
+ }
+ FramePattern current_pattern = NextPattern();
+
+ absl::optional<int> spatial_dependency_buffer_id;
+ switch (current_pattern) {
+ case kDeltaT0:
+ case kKey:
+ // Disallow temporal references cross T0 on higher temporal layers.
+ can_reference_t1_frame_for_spatial_id_.reset();
+ for (int sid = 0; sid < num_spatial_layers_; ++sid) {
+ if (!DecodeTargetIsActive(sid, /*tid=*/0)) {
+ // Next frame from the spatial layer `sid` shouldn't depend on
+ // potentially old previous frame from the spatial layer `sid`.
+ can_reference_t0_frame_for_spatial_id_.reset(sid);
+ continue;
+ }
+ configs.emplace_back();
+ ScalableVideoController::LayerFrameConfig& config = configs.back();
+ config.Id(current_pattern).S(sid).T(0);
+
+ if (spatial_dependency_buffer_id) {
+ config.Reference(*spatial_dependency_buffer_id);
+ } else if (current_pattern == kKey) {
+ config.Keyframe();
+ }
+
+ if (can_reference_t0_frame_for_spatial_id_[sid]) {
+ config.ReferenceAndUpdate(BufferIndex(sid, /*tid=*/0));
+ } else {
+ // TODO(bugs.webrtc.org/11999): Propagate chain restart on delta frame
+ // to ChainDiffCalculator
+ config.Update(BufferIndex(sid, /*tid=*/0));
+ }
+
+ spatial_dependency_buffer_id = BufferIndex(sid, /*tid=*/0);
+ }
+ break;
+ case kDeltaT1:
+ for (int sid = 0; sid < num_spatial_layers_; ++sid) {
+ if (!DecodeTargetIsActive(sid, /*tid=*/1) ||
+ !can_reference_t0_frame_for_spatial_id_[sid]) {
+ continue;
+ }
+ configs.emplace_back();
+ ScalableVideoController::LayerFrameConfig& config = configs.back();
+ config.Id(current_pattern).S(sid).T(1);
+ // Temporal reference.
+ config.Reference(BufferIndex(sid, /*tid=*/0));
+ // Spatial reference unless this is the lowest active spatial layer.
+ if (spatial_dependency_buffer_id) {
+ config.Reference(*spatial_dependency_buffer_id);
+ }
+ // No frame reference top layer frame, so no need save it into a buffer.
+ if (num_temporal_layers_ > 2 || sid < num_spatial_layers_ - 1) {
+ config.Update(BufferIndex(sid, /*tid=*/1));
+ }
+ spatial_dependency_buffer_id = BufferIndex(sid, /*tid=*/1);
+ }
+ break;
+ case kDeltaT2A:
+ case kDeltaT2B:
+ for (int sid = 0; sid < num_spatial_layers_; ++sid) {
+ if (!DecodeTargetIsActive(sid, /*tid=*/2) ||
+ !can_reference_t0_frame_for_spatial_id_[sid]) {
+ continue;
+ }
+ configs.emplace_back();
+ ScalableVideoController::LayerFrameConfig& config = configs.back();
+ config.Id(current_pattern).S(sid).T(2);
+ // Temporal reference.
+ if (current_pattern == kDeltaT2B &&
+ can_reference_t1_frame_for_spatial_id_[sid]) {
+ config.Reference(BufferIndex(sid, /*tid=*/1));
+ } else {
+ config.Reference(BufferIndex(sid, /*tid=*/0));
+ }
+ // Spatial reference unless this is the lowest active spatial layer.
+ if (spatial_dependency_buffer_id) {
+ config.Reference(*spatial_dependency_buffer_id);
+ }
+ // No frame reference top layer frame, so no need save it into a buffer.
+ if (sid < num_spatial_layers_ - 1) {
+ config.Update(BufferIndex(sid, /*tid=*/2));
+ }
+ spatial_dependency_buffer_id = BufferIndex(sid, /*tid=*/2);
+ }
+ break;
+ case kNone:
+ RTC_DCHECK_NOTREACHED();
+ break;
+ }
+
+ if (configs.empty() && !restart) {
+ RTC_LOG(LS_WARNING) << "Failed to generate configuration for L"
+ << num_spatial_layers_ << "T" << num_temporal_layers_
+ << " with active decode targets "
+ << active_decode_targets_.to_string('-').substr(
+ active_decode_targets_.size() -
+ num_spatial_layers_ * num_temporal_layers_)
+ << " and transition from "
+ << kFramePatternNames[last_pattern_] << " to "
+ << kFramePatternNames[current_pattern]
+ << ". Resetting.";
+ return NextFrameConfig(/*restart=*/true);
+ }
+
+ return configs;
+}
+
+GenericFrameInfo ScalabilityStructureFullSvc::OnEncodeDone(
+ const LayerFrameConfig& config) {
+ // When encoder drops all frames for a temporal unit, it is better to reuse
+ // old temporal pattern rather than switch to next one, thus switch to next
+ // pattern defered here from the `NextFrameConfig`.
+ // In particular creating VP9 references rely on this behavior.
+ last_pattern_ = static_cast<FramePattern>(config.Id());
+ if (config.TemporalId() == 0) {
+ can_reference_t0_frame_for_spatial_id_.set(config.SpatialId());
+ }
+ if (config.TemporalId() == 1) {
+ can_reference_t1_frame_for_spatial_id_.set(config.SpatialId());
+ }
+
+ GenericFrameInfo frame_info;
+ frame_info.spatial_id = config.SpatialId();
+ frame_info.temporal_id = config.TemporalId();
+ frame_info.encoder_buffers = config.Buffers();
+ frame_info.decode_target_indications.reserve(num_spatial_layers_ *
+ num_temporal_layers_);
+ for (int sid = 0; sid < num_spatial_layers_; ++sid) {
+ for (int tid = 0; tid < num_temporal_layers_; ++tid) {
+ frame_info.decode_target_indications.push_back(Dti(sid, tid, config));
+ }
+ }
+ if (config.TemporalId() == 0) {
+ frame_info.part_of_chain.resize(num_spatial_layers_);
+ for (int sid = 0; sid < num_spatial_layers_; ++sid) {
+ frame_info.part_of_chain[sid] = config.SpatialId() <= sid;
+ }
+ } else {
+ frame_info.part_of_chain.assign(num_spatial_layers_, false);
+ }
+ frame_info.active_decode_targets = active_decode_targets_;
+ return frame_info;
+}
+
+void ScalabilityStructureFullSvc::OnRatesUpdated(
+ const VideoBitrateAllocation& bitrates) {
+ for (int sid = 0; sid < num_spatial_layers_; ++sid) {
+ // Enable/disable spatial layers independetely.
+ bool active = true;
+ for (int tid = 0; tid < num_temporal_layers_; ++tid) {
+ // To enable temporal layer, require bitrates for lower temporal layers.
+ active = active && bitrates.GetBitrate(sid, tid) > 0;
+ SetDecodeTargetIsActive(sid, tid, active);
+ }
+ }
+}
+
+FrameDependencyStructure ScalabilityStructureL1T2::DependencyStructure() const {
+ FrameDependencyStructure structure;
+ structure.num_decode_targets = 2;
+ structure.num_chains = 1;
+ structure.decode_target_protected_by_chain = {0, 0};
+ structure.templates.resize(3);
+ structure.templates[0].T(0).Dtis("SS").ChainDiffs({0});
+ structure.templates[1].T(0).Dtis("SS").ChainDiffs({2}).FrameDiffs({2});
+ structure.templates[2].T(1).Dtis("-D").ChainDiffs({1}).FrameDiffs({1});
+ return structure;
+}
+
+FrameDependencyStructure ScalabilityStructureL1T3::DependencyStructure() const {
+ FrameDependencyStructure structure;
+ structure.num_decode_targets = 3;
+ structure.num_chains = 1;
+ structure.decode_target_protected_by_chain = {0, 0, 0};
+ structure.templates.resize(5);
+ structure.templates[0].T(0).Dtis("SSS").ChainDiffs({0});
+ structure.templates[1].T(0).Dtis("SSS").ChainDiffs({4}).FrameDiffs({4});
+ structure.templates[2].T(1).Dtis("-DS").ChainDiffs({2}).FrameDiffs({2});
+ structure.templates[3].T(2).Dtis("--D").ChainDiffs({1}).FrameDiffs({1});
+ structure.templates[4].T(2).Dtis("--D").ChainDiffs({3}).FrameDiffs({1});
+ return structure;
+}
+
+FrameDependencyStructure ScalabilityStructureL2T1::DependencyStructure() const {
+ FrameDependencyStructure structure;
+ structure.num_decode_targets = 2;
+ structure.num_chains = 2;
+ structure.decode_target_protected_by_chain = {0, 1};
+ structure.templates.resize(4);
+ structure.templates[0].S(0).Dtis("SR").ChainDiffs({2, 1}).FrameDiffs({2});
+ structure.templates[1].S(0).Dtis("SS").ChainDiffs({0, 0});
+ structure.templates[2].S(1).Dtis("-S").ChainDiffs({1, 1}).FrameDiffs({2, 1});
+ structure.templates[3].S(1).Dtis("-S").ChainDiffs({1, 1}).FrameDiffs({1});
+ return structure;
+}
+
+FrameDependencyStructure ScalabilityStructureL2T2::DependencyStructure() const {
+ FrameDependencyStructure structure;
+ structure.num_decode_targets = 4;
+ structure.num_chains = 2;
+ structure.decode_target_protected_by_chain = {0, 0, 1, 1};
+ structure.templates.resize(6);
+ auto& templates = structure.templates;
+ templates[0].S(0).T(0).Dtis("SSSS").ChainDiffs({0, 0});
+ templates[1].S(0).T(0).Dtis("SSRR").ChainDiffs({4, 3}).FrameDiffs({4});
+ templates[2].S(0).T(1).Dtis("-D-R").ChainDiffs({2, 1}).FrameDiffs({2});
+ templates[3].S(1).T(0).Dtis("--SS").ChainDiffs({1, 1}).FrameDiffs({1});
+ templates[4].S(1).T(0).Dtis("--SS").ChainDiffs({1, 1}).FrameDiffs({4, 1});
+ templates[5].S(1).T(1).Dtis("---D").ChainDiffs({3, 2}).FrameDiffs({2, 1});
+ return structure;
+}
+
+FrameDependencyStructure ScalabilityStructureL2T3::DependencyStructure() const {
+ FrameDependencyStructure structure;
+ structure.num_decode_targets = 6;
+ structure.num_chains = 2;
+ structure.decode_target_protected_by_chain = {0, 0, 0, 1, 1, 1};
+ auto& t = structure.templates;
+ t.resize(10);
+ t[1].S(0).T(0).Dtis("SSSSSS").ChainDiffs({0, 0});
+ t[6].S(1).T(0).Dtis("---SSS").ChainDiffs({1, 1}).FrameDiffs({1});
+ t[3].S(0).T(2).Dtis("--D--R").ChainDiffs({2, 1}).FrameDiffs({2});
+ t[8].S(1).T(2).Dtis("-----D").ChainDiffs({3, 2}).FrameDiffs({2, 1});
+ t[2].S(0).T(1).Dtis("-DS-RR").ChainDiffs({4, 3}).FrameDiffs({4});
+ t[7].S(1).T(1).Dtis("----DS").ChainDiffs({5, 4}).FrameDiffs({4, 1});
+ t[4].S(0).T(2).Dtis("--D--R").ChainDiffs({6, 5}).FrameDiffs({2});
+ t[9].S(1).T(2).Dtis("-----D").ChainDiffs({7, 6}).FrameDiffs({2, 1});
+ t[0].S(0).T(0).Dtis("SSSRRR").ChainDiffs({8, 7}).FrameDiffs({8});
+ t[5].S(1).T(0).Dtis("---SSS").ChainDiffs({1, 1}).FrameDiffs({8, 1});
+ return structure;
+}
+
+FrameDependencyStructure ScalabilityStructureL3T1::DependencyStructure() const {
+ FrameDependencyStructure structure;
+ structure.num_decode_targets = 3;
+ structure.num_chains = 3;
+ structure.decode_target_protected_by_chain = {0, 1, 2};
+ auto& templates = structure.templates;
+ templates.resize(6);
+ templates[0].S(0).Dtis("SRR").ChainDiffs({3, 2, 1}).FrameDiffs({3});
+ templates[1].S(0).Dtis("SSS").ChainDiffs({0, 0, 0});
+ templates[2].S(1).Dtis("-SR").ChainDiffs({1, 1, 1}).FrameDiffs({3, 1});
+ templates[3].S(1).Dtis("-SS").ChainDiffs({1, 1, 1}).FrameDiffs({1});
+ templates[4].S(2).Dtis("--S").ChainDiffs({2, 1, 1}).FrameDiffs({3, 1});
+ templates[5].S(2).Dtis("--S").ChainDiffs({2, 1, 1}).FrameDiffs({1});
+ return structure;
+}
+
+FrameDependencyStructure ScalabilityStructureL3T2::DependencyStructure() const {
+ FrameDependencyStructure structure;
+ structure.num_decode_targets = 6;
+ structure.num_chains = 3;
+ structure.decode_target_protected_by_chain = {0, 0, 1, 1, 2, 2};
+ auto& t = structure.templates;
+ t.resize(9);
+ // Templates are shown in the order frames following them appear in the
+ // stream, but in `structure.templates` array templates are sorted by
+ // (`spatial_id`, `temporal_id`) since that is a dependency descriptor
+ // requirement.
+ t[1].S(0).T(0).Dtis("SSSSSS").ChainDiffs({0, 0, 0});
+ t[4].S(1).T(0).Dtis("--SSSS").ChainDiffs({1, 1, 1}).FrameDiffs({1});
+ t[7].S(2).T(0).Dtis("----SS").ChainDiffs({2, 1, 1}).FrameDiffs({1});
+ t[2].S(0).T(1).Dtis("-D-R-R").ChainDiffs({3, 2, 1}).FrameDiffs({3});
+ t[5].S(1).T(1).Dtis("---D-R").ChainDiffs({4, 3, 2}).FrameDiffs({3, 1});
+ t[8].S(2).T(1).Dtis("-----D").ChainDiffs({5, 4, 3}).FrameDiffs({3, 1});
+ t[0].S(0).T(0).Dtis("SSRRRR").ChainDiffs({6, 5, 4}).FrameDiffs({6});
+ t[3].S(1).T(0).Dtis("--SSRR").ChainDiffs({1, 1, 1}).FrameDiffs({6, 1});
+ t[6].S(2).T(0).Dtis("----SS").ChainDiffs({2, 1, 1}).FrameDiffs({6, 1});
+ return structure;
+}
+
+FrameDependencyStructure ScalabilityStructureL3T3::DependencyStructure() const {
+ FrameDependencyStructure structure;
+ structure.num_decode_targets = 9;
+ structure.num_chains = 3;
+ structure.decode_target_protected_by_chain = {0, 0, 0, 1, 1, 1, 2, 2, 2};
+ auto& t = structure.templates;
+ t.resize(15);
+ // Templates are shown in the order frames following them appear in the
+ // stream, but in `structure.templates` array templates are sorted by
+ // (`spatial_id`, `temporal_id`) since that is a dependency descriptor
+ // requirement. Indexes are written in hex for nicer alignment.
+ t[0x1].S(0).T(0).Dtis("SSSSSSSSS").ChainDiffs({0, 0, 0});
+ t[0x6].S(1).T(0).Dtis("---SSSSSS").ChainDiffs({1, 1, 1}).FrameDiffs({1});
+ t[0xB].S(2).T(0).Dtis("------SSS").ChainDiffs({2, 1, 1}).FrameDiffs({1});
+ t[0x3].S(0).T(2).Dtis("--D--R--R").ChainDiffs({3, 2, 1}).FrameDiffs({3});
+ t[0x8].S(1).T(2).Dtis("-----D--R").ChainDiffs({4, 3, 2}).FrameDiffs({3, 1});
+ t[0xD].S(2).T(2).Dtis("--------D").ChainDiffs({5, 4, 3}).FrameDiffs({3, 1});
+ t[0x2].S(0).T(1).Dtis("-DS-RR-RR").ChainDiffs({6, 5, 4}).FrameDiffs({6});
+ t[0x7].S(1).T(1).Dtis("----DS-RR").ChainDiffs({7, 6, 5}).FrameDiffs({6, 1});
+ t[0xC].S(2).T(1).Dtis("-------DS").ChainDiffs({8, 7, 6}).FrameDiffs({6, 1});
+ t[0x4].S(0).T(2).Dtis("--D--R--R").ChainDiffs({9, 8, 7}).FrameDiffs({3});
+ t[0x9].S(1).T(2).Dtis("-----D--R").ChainDiffs({10, 9, 8}).FrameDiffs({3, 1});
+ t[0xE].S(2).T(2).Dtis("--------D").ChainDiffs({11, 10, 9}).FrameDiffs({3, 1});
+ t[0x0].S(0).T(0).Dtis("SSSRRRRRR").ChainDiffs({12, 11, 10}).FrameDiffs({12});
+ t[0x5].S(1).T(0).Dtis("---SSSRRR").ChainDiffs({1, 1, 1}).FrameDiffs({12, 1});
+ t[0xA].S(2).T(0).Dtis("------SSS").ChainDiffs({2, 1, 1}).FrameDiffs({12, 1});
+ return structure;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/svc/scalability_structure_full_svc.h b/third_party/libwebrtc/modules/video_coding/svc/scalability_structure_full_svc.h
new file mode 100644
index 0000000000..a4ede69342
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/svc/scalability_structure_full_svc.h
@@ -0,0 +1,190 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#ifndef MODULES_VIDEO_CODING_SVC_SCALABILITY_STRUCTURE_FULL_SVC_H_
+#define MODULES_VIDEO_CODING_SVC_SCALABILITY_STRUCTURE_FULL_SVC_H_
+
+#include <bitset>
+#include <vector>
+
+#include "api/transport/rtp/dependency_descriptor.h"
+#include "common_video/generic_frame_descriptor/generic_frame_info.h"
+#include "modules/video_coding/svc/scalable_video_controller.h"
+
+namespace webrtc {
+
+class ScalabilityStructureFullSvc : public ScalableVideoController {
+ public:
+ struct ScalingFactor {
+ int num = 1;
+ int den = 2;
+ };
+ ScalabilityStructureFullSvc(int num_spatial_layers,
+ int num_temporal_layers,
+ ScalingFactor resolution_factor);
+ ~ScalabilityStructureFullSvc() override;
+
+ StreamLayersConfig StreamConfig() const override;
+
+ std::vector<LayerFrameConfig> NextFrameConfig(bool restart) override;
+ GenericFrameInfo OnEncodeDone(const LayerFrameConfig& config) override;
+ void OnRatesUpdated(const VideoBitrateAllocation& bitrates) override;
+
+ private:
+ enum FramePattern {
+ kNone,
+ kKey,
+ kDeltaT2A,
+ kDeltaT1,
+ kDeltaT2B,
+ kDeltaT0,
+ };
+ static constexpr absl::string_view kFramePatternNames[] = {
+ "None", "Key", "DeltaT2A", "DeltaT1", "DeltaT2B", "DeltaT0"};
+ static constexpr int kMaxNumSpatialLayers = 3;
+ static constexpr int kMaxNumTemporalLayers = 3;
+
+ // Index of the buffer to store last frame for layer (`sid`, `tid`)
+ int BufferIndex(int sid, int tid) const {
+ return tid * num_spatial_layers_ + sid;
+ }
+ bool DecodeTargetIsActive(int sid, int tid) const {
+ return active_decode_targets_[sid * num_temporal_layers_ + tid];
+ }
+ void SetDecodeTargetIsActive(int sid, int tid, bool value) {
+ active_decode_targets_.set(sid * num_temporal_layers_ + tid, value);
+ }
+ FramePattern NextPattern() const;
+ bool TemporalLayerIsActive(int tid) const;
+ static DecodeTargetIndication Dti(int sid,
+ int tid,
+ const LayerFrameConfig& frame);
+
+ const int num_spatial_layers_;
+ const int num_temporal_layers_;
+ const ScalingFactor resolution_factor_;
+
+ FramePattern last_pattern_ = kNone;
+ std::bitset<kMaxNumSpatialLayers> can_reference_t0_frame_for_spatial_id_ = 0;
+ std::bitset<kMaxNumSpatialLayers> can_reference_t1_frame_for_spatial_id_ = 0;
+ std::bitset<32> active_decode_targets_;
+};
+
+// T1 0 0
+// / / / ...
+// T0 0---0---0--
+// Time-> 0 1 2 3 4
+class ScalabilityStructureL1T2 : public ScalabilityStructureFullSvc {
+ public:
+ explicit ScalabilityStructureL1T2(ScalingFactor resolution_factor = {})
+ : ScalabilityStructureFullSvc(1, 2, resolution_factor) {}
+ ~ScalabilityStructureL1T2() override = default;
+
+ FrameDependencyStructure DependencyStructure() const override;
+};
+
+// T2 0 0 0 0
+// | / | /
+// T1 / 0 / 0 ...
+// |_/ |_/
+// T0 0-------0------
+// Time-> 0 1 2 3 4 5 6 7
+class ScalabilityStructureL1T3 : public ScalabilityStructureFullSvc {
+ public:
+ explicit ScalabilityStructureL1T3(ScalingFactor resolution_factor = {})
+ : ScalabilityStructureFullSvc(1, 3, resolution_factor) {}
+ ~ScalabilityStructureL1T3() override = default;
+
+ FrameDependencyStructure DependencyStructure() const override;
+};
+
+// S1 0--0--0-
+// | | | ...
+// S0 0--0--0-
+class ScalabilityStructureL2T1 : public ScalabilityStructureFullSvc {
+ public:
+ explicit ScalabilityStructureL2T1(ScalingFactor resolution_factor = {})
+ : ScalabilityStructureFullSvc(2, 1, resolution_factor) {}
+ ~ScalabilityStructureL2T1() override = default;
+
+ FrameDependencyStructure DependencyStructure() const override;
+};
+
+// S1T1 0 0
+// /| /| /
+// S1T0 0-+-0-+-0
+// | | | | | ...
+// S0T1 | 0 | 0 |
+// |/ |/ |/
+// S0T0 0---0---0--
+// Time-> 0 1 2 3 4
+class ScalabilityStructureL2T2 : public ScalabilityStructureFullSvc {
+ public:
+ explicit ScalabilityStructureL2T2(ScalingFactor resolution_factor = {})
+ : ScalabilityStructureFullSvc(2, 2, resolution_factor) {}
+ ~ScalabilityStructureL2T2() override = default;
+
+ FrameDependencyStructure DependencyStructure() const override;
+};
+
+// S1T2 4 ,8
+// S1T1 / | 6' |
+// S1T0 2--+-'+--+-...
+// | | | |
+// S0T2 | 3 | ,7
+// S0T1 | / 5'
+// S0T0 1----'-----...
+// Time-> 0 1 2 3
+class ScalabilityStructureL2T3 : public ScalabilityStructureFullSvc {
+ public:
+ explicit ScalabilityStructureL2T3(ScalingFactor resolution_factor = {})
+ : ScalabilityStructureFullSvc(2, 3, resolution_factor) {}
+ ~ScalabilityStructureL2T3() override = default;
+
+ FrameDependencyStructure DependencyStructure() const override;
+};
+
+// S2 0-0-0-
+// | | |
+// S1 0-0-0-...
+// | | |
+// S0 0-0-0-
+// Time-> 0 1 2
+class ScalabilityStructureL3T1 : public ScalabilityStructureFullSvc {
+ public:
+ explicit ScalabilityStructureL3T1(ScalingFactor resolution_factor = {})
+ : ScalabilityStructureFullSvc(3, 1, resolution_factor) {}
+ ~ScalabilityStructureL3T1() override = default;
+
+ FrameDependencyStructure DependencyStructure() const override;
+};
+
+// https://www.w3.org/TR/webrtc-svc/#L3T2*
+class ScalabilityStructureL3T2 : public ScalabilityStructureFullSvc {
+ public:
+ explicit ScalabilityStructureL3T2(ScalingFactor resolution_factor = {})
+ : ScalabilityStructureFullSvc(3, 2, resolution_factor) {}
+ ~ScalabilityStructureL3T2() override = default;
+
+ FrameDependencyStructure DependencyStructure() const override;
+};
+
+// https://www.w3.org/TR/webrtc-svc/#L3T3*
+class ScalabilityStructureL3T3 : public ScalabilityStructureFullSvc {
+ public:
+ explicit ScalabilityStructureL3T3(ScalingFactor resolution_factor = {})
+ : ScalabilityStructureFullSvc(3, 3, resolution_factor) {}
+ ~ScalabilityStructureL3T3() override = default;
+
+ FrameDependencyStructure DependencyStructure() const override;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_SVC_SCALABILITY_STRUCTURE_FULL_SVC_H_
diff --git a/third_party/libwebrtc/modules/video_coding/svc/scalability_structure_full_svc_unittest.cc b/third_party/libwebrtc/modules/video_coding/svc/scalability_structure_full_svc_unittest.cc
new file mode 100644
index 0000000000..1c0a8be8f1
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/svc/scalability_structure_full_svc_unittest.cc
@@ -0,0 +1,123 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include "modules/video_coding/svc/scalability_structure_full_svc.h"
+
+#include <vector>
+
+#include "modules/video_coding/svc/scalability_structure_test_helpers.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+
+using ::testing::IsEmpty;
+using ::testing::SizeIs;
+
+TEST(ScalabilityStructureL3T3Test, SkipT0FrameByEncoderKeepsReferencesValid) {
+ std::vector<GenericFrameInfo> frames;
+ ScalabilityStructureL3T3 structure;
+ ScalabilityStructureWrapper wrapper(structure);
+
+ // Only S0T0 decode target is enabled.
+ structure.OnRatesUpdated(EnableTemporalLayers(/*s0=*/1, /*s1=*/0));
+ // Encoder generates S0T0 key frame.
+ wrapper.GenerateFrames(/*num_temporal_units=*/1, frames);
+ EXPECT_THAT(frames, SizeIs(1));
+ // Spatial layers 1 is enabled.
+ structure.OnRatesUpdated(EnableTemporalLayers(/*s0=*/1, /*s1=*/1));
+ // Encoder tries to generate S0T0 and S1T0 delta frames but they are dropped.
+ structure.NextFrameConfig(/*restart=*/false);
+ // Encoder successfully generates S0T0 and S1T0 delta frames.
+ wrapper.GenerateFrames(/*num_temporal_units=*/1, frames);
+ EXPECT_THAT(frames, SizeIs(3));
+
+ EXPECT_TRUE(wrapper.FrameReferencesAreValid(frames));
+}
+
+TEST(ScalabilityStructureL3T3Test, SkipS1T1FrameKeepsStructureValid) {
+ ScalabilityStructureL3T3 structure;
+ ScalabilityStructureWrapper wrapper(structure);
+
+ structure.OnRatesUpdated(EnableTemporalLayers(/*s0=*/3, /*s1=*/3));
+ auto frames = wrapper.GenerateFrames(/*num_temporal_units=*/1);
+ EXPECT_THAT(frames, SizeIs(2));
+ EXPECT_EQ(frames[0].temporal_id, 0);
+
+ frames = wrapper.GenerateFrames(/*num_temporal_units=*/1);
+ EXPECT_THAT(frames, SizeIs(2));
+ EXPECT_EQ(frames[0].temporal_id, 2);
+
+ structure.OnRatesUpdated(EnableTemporalLayers(/*s0=*/3, /*s1=*/0));
+ frames = wrapper.GenerateFrames(/*num_temporal_units=*/1);
+ EXPECT_THAT(frames, SizeIs(1));
+ EXPECT_EQ(frames[0].temporal_id, 1);
+
+ structure.OnRatesUpdated(EnableTemporalLayers(/*s0=*/3, /*s1=*/3));
+ // Rely on checks inside GenerateFrames frame references are valid.
+ frames = wrapper.GenerateFrames(/*num_temporal_units=*/1);
+ EXPECT_THAT(frames, SizeIs(2));
+ EXPECT_EQ(frames[0].temporal_id, 2);
+}
+
+TEST(ScalabilityStructureL3T3Test, SkipT1FrameByEncoderKeepsReferencesValid) {
+ std::vector<GenericFrameInfo> frames;
+ ScalabilityStructureL3T3 structure;
+ ScalabilityStructureWrapper wrapper(structure);
+
+ // 1st 2 temporal units (T0 and T2)
+ wrapper.GenerateFrames(/*num_temporal_units=*/2, frames);
+ // Simulate T1 frame dropped by the encoder,
+ // i.e. retrieve config, but skip calling OnEncodeDone.
+ structure.NextFrameConfig(/*restart=*/false);
+ // one more temporal units (T2)
+ wrapper.GenerateFrames(/*num_temporal_units=*/1, frames);
+
+ EXPECT_TRUE(wrapper.FrameReferencesAreValid(frames));
+}
+
+TEST(ScalabilityStructureL3T3Test,
+ SkippingFrameReusePreviousFrameConfiguration) {
+ std::vector<GenericFrameInfo> frames;
+ ScalabilityStructureL3T3 structure;
+ ScalabilityStructureWrapper wrapper(structure);
+
+ // 1st 2 temporal units (T0 and T2)
+ wrapper.GenerateFrames(/*num_temporal_units=*/2, frames);
+ ASSERT_THAT(frames, SizeIs(6));
+ ASSERT_EQ(frames[0].temporal_id, 0);
+ ASSERT_EQ(frames[3].temporal_id, 2);
+
+ // Simulate a frame dropped by the encoder,
+ // i.e. retrieve config, but skip calling OnEncodeDone.
+ structure.NextFrameConfig(/*restart=*/false);
+ // two more temporal unit, expect temporal pattern continues
+ wrapper.GenerateFrames(/*num_temporal_units=*/2, frames);
+ ASSERT_THAT(frames, SizeIs(12));
+ // Expect temporal pattern continues as if there were no dropped frames.
+ EXPECT_EQ(frames[6].temporal_id, 1);
+ EXPECT_EQ(frames[9].temporal_id, 2);
+}
+
+TEST(ScalabilityStructureL3T3Test, SwitchSpatialLayerBeforeT1Frame) {
+ ScalabilityStructureL3T3 structure;
+ ScalabilityStructureWrapper wrapper(structure);
+
+ structure.OnRatesUpdated(EnableTemporalLayers(/*s0=*/2, /*s1=*/0));
+ EXPECT_THAT(wrapper.GenerateFrames(1), SizeIs(1));
+ structure.OnRatesUpdated(EnableTemporalLayers(/*s0=*/0, /*s1=*/2));
+ auto frames = wrapper.GenerateFrames(1);
+ ASSERT_THAT(frames, SizeIs(1));
+ EXPECT_THAT(frames[0].frame_diffs, IsEmpty());
+ EXPECT_EQ(frames[0].temporal_id, 0);
+}
+
+} // namespace
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/svc/scalability_structure_key_svc.cc b/third_party/libwebrtc/modules/video_coding/svc/scalability_structure_key_svc.cc
new file mode 100644
index 0000000000..0e6fecfae9
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/svc/scalability_structure_key_svc.cc
@@ -0,0 +1,427 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include "modules/video_coding/svc/scalability_structure_key_svc.h"
+
+#include <bitset>
+#include <utility>
+#include <vector>
+
+#include "absl/types/optional.h"
+#include "api/transport/rtp/dependency_descriptor.h"
+#include "api/video/video_bitrate_allocation.h"
+#include "common_video/generic_frame_descriptor/generic_frame_info.h"
+#include "modules/video_coding/svc/scalable_video_controller.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+
+constexpr int ScalabilityStructureKeySvc::kMaxNumSpatialLayers;
+constexpr int ScalabilityStructureKeySvc::kMaxNumTemporalLayers;
+
+ScalabilityStructureKeySvc::ScalabilityStructureKeySvc(int num_spatial_layers,
+ int num_temporal_layers)
+ : num_spatial_layers_(num_spatial_layers),
+ num_temporal_layers_(num_temporal_layers),
+ active_decode_targets_(
+ (uint32_t{1} << (num_spatial_layers * num_temporal_layers)) - 1) {
+ // There is no point to use this structure without spatial scalability.
+ RTC_DCHECK_GT(num_spatial_layers, 1);
+ RTC_DCHECK_LE(num_spatial_layers, kMaxNumSpatialLayers);
+ RTC_DCHECK_LE(num_temporal_layers, kMaxNumTemporalLayers);
+}
+
+ScalabilityStructureKeySvc::~ScalabilityStructureKeySvc() = default;
+
+ScalableVideoController::StreamLayersConfig
+ScalabilityStructureKeySvc::StreamConfig() const {
+ StreamLayersConfig result;
+ result.num_spatial_layers = num_spatial_layers_;
+ result.num_temporal_layers = num_temporal_layers_;
+ result.scaling_factor_num[num_spatial_layers_ - 1] = 1;
+ result.scaling_factor_den[num_spatial_layers_ - 1] = 1;
+ for (int sid = num_spatial_layers_ - 1; sid > 0; --sid) {
+ result.scaling_factor_num[sid - 1] = 1;
+ result.scaling_factor_den[sid - 1] = 2 * result.scaling_factor_den[sid];
+ }
+ result.uses_reference_scaling = true;
+ return result;
+}
+
+bool ScalabilityStructureKeySvc::TemporalLayerIsActive(int tid) const {
+ if (tid >= num_temporal_layers_) {
+ return false;
+ }
+ for (int sid = 0; sid < num_spatial_layers_; ++sid) {
+ if (DecodeTargetIsActive(sid, tid)) {
+ return true;
+ }
+ }
+ return false;
+}
+
+DecodeTargetIndication ScalabilityStructureKeySvc::Dti(
+ int sid,
+ int tid,
+ const LayerFrameConfig& config) {
+ if (config.IsKeyframe() || config.Id() == kKey) {
+ RTC_DCHECK_EQ(config.TemporalId(), 0);
+ return sid < config.SpatialId() ? DecodeTargetIndication::kNotPresent
+ : DecodeTargetIndication::kSwitch;
+ }
+
+ if (sid != config.SpatialId() || tid < config.TemporalId()) {
+ return DecodeTargetIndication::kNotPresent;
+ }
+ if (tid == config.TemporalId() && tid > 0) {
+ return DecodeTargetIndication::kDiscardable;
+ }
+ return DecodeTargetIndication::kSwitch;
+}
+
+std::vector<ScalableVideoController::LayerFrameConfig>
+ScalabilityStructureKeySvc::KeyframeConfig() {
+ std::vector<LayerFrameConfig> configs;
+ configs.reserve(num_spatial_layers_);
+ absl::optional<int> spatial_dependency_buffer_id;
+ spatial_id_is_enabled_.reset();
+ // Disallow temporal references cross T0 on higher temporal layers.
+ can_reference_t1_frame_for_spatial_id_.reset();
+ for (int sid = 0; sid < num_spatial_layers_; ++sid) {
+ if (!DecodeTargetIsActive(sid, /*tid=*/0)) {
+ continue;
+ }
+ configs.emplace_back();
+ ScalableVideoController::LayerFrameConfig& config = configs.back();
+ config.Id(kKey).S(sid).T(0);
+
+ if (spatial_dependency_buffer_id) {
+ config.Reference(*spatial_dependency_buffer_id);
+ } else {
+ config.Keyframe();
+ }
+ config.Update(BufferIndex(sid, /*tid=*/0));
+
+ spatial_id_is_enabled_.set(sid);
+ spatial_dependency_buffer_id = BufferIndex(sid, /*tid=*/0);
+ }
+ return configs;
+}
+
+std::vector<ScalableVideoController::LayerFrameConfig>
+ScalabilityStructureKeySvc::T0Config() {
+ std::vector<LayerFrameConfig> configs;
+ configs.reserve(num_spatial_layers_);
+ // Disallow temporal references cross T0 on higher temporal layers.
+ can_reference_t1_frame_for_spatial_id_.reset();
+ for (int sid = 0; sid < num_spatial_layers_; ++sid) {
+ if (!DecodeTargetIsActive(sid, /*tid=*/0)) {
+ spatial_id_is_enabled_.reset(sid);
+ continue;
+ }
+ configs.emplace_back();
+ configs.back().Id(kDeltaT0).S(sid).T(0).ReferenceAndUpdate(
+ BufferIndex(sid, /*tid=*/0));
+ }
+ return configs;
+}
+
+std::vector<ScalableVideoController::LayerFrameConfig>
+ScalabilityStructureKeySvc::T1Config() {
+ std::vector<LayerFrameConfig> configs;
+ configs.reserve(num_spatial_layers_);
+ for (int sid = 0; sid < num_spatial_layers_; ++sid) {
+ if (!DecodeTargetIsActive(sid, /*tid=*/1)) {
+ continue;
+ }
+ configs.emplace_back();
+ ScalableVideoController::LayerFrameConfig& config = configs.back();
+ config.Id(kDeltaT1).S(sid).T(1).Reference(BufferIndex(sid, /*tid=*/0));
+ if (num_temporal_layers_ > 2) {
+ config.Update(BufferIndex(sid, /*tid=*/1));
+ }
+ }
+ return configs;
+}
+
+std::vector<ScalableVideoController::LayerFrameConfig>
+ScalabilityStructureKeySvc::T2Config(FramePattern pattern) {
+ std::vector<LayerFrameConfig> configs;
+ configs.reserve(num_spatial_layers_);
+ for (int sid = 0; sid < num_spatial_layers_; ++sid) {
+ if (!DecodeTargetIsActive(sid, /*tid=*/2)) {
+ continue;
+ }
+ configs.emplace_back();
+ ScalableVideoController::LayerFrameConfig& config = configs.back();
+ config.Id(pattern).S(sid).T(2);
+ if (can_reference_t1_frame_for_spatial_id_[sid]) {
+ config.Reference(BufferIndex(sid, /*tid=*/1));
+ } else {
+ config.Reference(BufferIndex(sid, /*tid=*/0));
+ }
+ }
+ return configs;
+}
+
+ScalabilityStructureKeySvc::FramePattern
+ScalabilityStructureKeySvc::NextPattern(FramePattern last_pattern) const {
+ switch (last_pattern) {
+ case kNone:
+ return kKey;
+ case kDeltaT2B:
+ return kDeltaT0;
+ case kDeltaT2A:
+ if (TemporalLayerIsActive(1)) {
+ return kDeltaT1;
+ }
+ return kDeltaT0;
+ case kDeltaT1:
+ if (TemporalLayerIsActive(2)) {
+ return kDeltaT2B;
+ }
+ return kDeltaT0;
+ case kDeltaT0:
+ case kKey:
+ if (TemporalLayerIsActive(2)) {
+ return kDeltaT2A;
+ }
+ if (TemporalLayerIsActive(1)) {
+ return kDeltaT1;
+ }
+ return kDeltaT0;
+ }
+ RTC_DCHECK_NOTREACHED();
+ return kNone;
+}
+
+std::vector<ScalableVideoController::LayerFrameConfig>
+ScalabilityStructureKeySvc::NextFrameConfig(bool restart) {
+ if (active_decode_targets_.none()) {
+ last_pattern_ = kNone;
+ return {};
+ }
+
+ if (restart) {
+ last_pattern_ = kNone;
+ }
+
+ FramePattern current_pattern = NextPattern(last_pattern_);
+ switch (current_pattern) {
+ case kKey:
+ return KeyframeConfig();
+ case kDeltaT0:
+ return T0Config();
+ case kDeltaT1:
+ return T1Config();
+ case kDeltaT2A:
+ case kDeltaT2B:
+ return T2Config(current_pattern);
+ case kNone:
+ break;
+ }
+ RTC_DCHECK_NOTREACHED();
+ return {};
+}
+
+GenericFrameInfo ScalabilityStructureKeySvc::OnEncodeDone(
+ const LayerFrameConfig& config) {
+ // When encoder drops all frames for a temporal unit, it is better to reuse
+ // old temporal pattern rather than switch to next one, thus switch to next
+ // pattern defered here from the `NextFrameConfig`.
+ // In particular creating VP9 references rely on this behavior.
+ last_pattern_ = static_cast<FramePattern>(config.Id());
+ if (config.TemporalId() == 1) {
+ can_reference_t1_frame_for_spatial_id_.set(config.SpatialId());
+ }
+
+ GenericFrameInfo frame_info;
+ frame_info.spatial_id = config.SpatialId();
+ frame_info.temporal_id = config.TemporalId();
+ frame_info.encoder_buffers = config.Buffers();
+ frame_info.decode_target_indications.reserve(num_spatial_layers_ *
+ num_temporal_layers_);
+ for (int sid = 0; sid < num_spatial_layers_; ++sid) {
+ for (int tid = 0; tid < num_temporal_layers_; ++tid) {
+ frame_info.decode_target_indications.push_back(Dti(sid, tid, config));
+ }
+ }
+ frame_info.part_of_chain.assign(num_spatial_layers_, false);
+ if (config.IsKeyframe() || config.Id() == kKey) {
+ RTC_DCHECK_EQ(config.TemporalId(), 0);
+ for (int sid = config.SpatialId(); sid < num_spatial_layers_; ++sid) {
+ frame_info.part_of_chain[sid] = true;
+ }
+ } else if (config.TemporalId() == 0) {
+ frame_info.part_of_chain[config.SpatialId()] = true;
+ }
+ frame_info.active_decode_targets = active_decode_targets_;
+ return frame_info;
+}
+
+void ScalabilityStructureKeySvc::OnRatesUpdated(
+ const VideoBitrateAllocation& bitrates) {
+ for (int sid = 0; sid < num_spatial_layers_; ++sid) {
+ // Enable/disable spatial layers independetely.
+ bool active = bitrates.GetBitrate(sid, /*tid=*/0) > 0;
+ SetDecodeTargetIsActive(sid, /*tid=*/0, active);
+ if (!spatial_id_is_enabled_[sid] && active) {
+ // Key frame is required to reenable any spatial layer.
+ last_pattern_ = kNone;
+ }
+
+ for (int tid = 1; tid < num_temporal_layers_; ++tid) {
+ // To enable temporal layer, require bitrates for lower temporal layers.
+ active = active && bitrates.GetBitrate(sid, tid) > 0;
+ SetDecodeTargetIsActive(sid, tid, active);
+ }
+ }
+}
+
+ScalabilityStructureL2T1Key::~ScalabilityStructureL2T1Key() = default;
+
+FrameDependencyStructure ScalabilityStructureL2T1Key::DependencyStructure()
+ const {
+ FrameDependencyStructure structure;
+ structure.num_decode_targets = 2;
+ structure.num_chains = 2;
+ structure.decode_target_protected_by_chain = {0, 1};
+ structure.templates.resize(4);
+ structure.templates[0].S(0).Dtis("S-").ChainDiffs({2, 1}).FrameDiffs({2});
+ structure.templates[1].S(0).Dtis("SS").ChainDiffs({0, 0});
+ structure.templates[2].S(1).Dtis("-S").ChainDiffs({1, 2}).FrameDiffs({2});
+ structure.templates[3].S(1).Dtis("-S").ChainDiffs({1, 1}).FrameDiffs({1});
+ return structure;
+}
+
+ScalabilityStructureL2T2Key::~ScalabilityStructureL2T2Key() = default;
+
+FrameDependencyStructure ScalabilityStructureL2T2Key::DependencyStructure()
+ const {
+ FrameDependencyStructure structure;
+ structure.num_decode_targets = 4;
+ structure.num_chains = 2;
+ structure.decode_target_protected_by_chain = {0, 0, 1, 1};
+ structure.templates.resize(6);
+ auto& templates = structure.templates;
+ templates[0].S(0).T(0).Dtis("SSSS").ChainDiffs({0, 0});
+ templates[1].S(0).T(0).Dtis("SS--").ChainDiffs({4, 3}).FrameDiffs({4});
+ templates[2].S(0).T(1).Dtis("-D--").ChainDiffs({2, 1}).FrameDiffs({2});
+ templates[3].S(1).T(0).Dtis("--SS").ChainDiffs({1, 1}).FrameDiffs({1});
+ templates[4].S(1).T(0).Dtis("--SS").ChainDiffs({1, 4}).FrameDiffs({4});
+ templates[5].S(1).T(1).Dtis("---D").ChainDiffs({3, 2}).FrameDiffs({2});
+ return structure;
+}
+
+ScalabilityStructureL2T3Key::~ScalabilityStructureL2T3Key() = default;
+
+FrameDependencyStructure ScalabilityStructureL2T3Key::DependencyStructure()
+ const {
+ FrameDependencyStructure structure;
+ structure.num_decode_targets = 6;
+ structure.num_chains = 2;
+ structure.decode_target_protected_by_chain = {0, 0, 0, 1, 1, 1};
+ auto& templates = structure.templates;
+ templates.resize(10);
+ templates[0].S(0).T(0).Dtis("SSSSSS").ChainDiffs({0, 0});
+ templates[1].S(0).T(0).Dtis("SSS---").ChainDiffs({8, 7}).FrameDiffs({8});
+ templates[2].S(0).T(1).Dtis("-DS---").ChainDiffs({4, 3}).FrameDiffs({4});
+ templates[3].S(0).T(2).Dtis("--D---").ChainDiffs({2, 1}).FrameDiffs({2});
+ templates[4].S(0).T(2).Dtis("--D---").ChainDiffs({6, 5}).FrameDiffs({2});
+ templates[5].S(1).T(0).Dtis("---SSS").ChainDiffs({1, 1}).FrameDiffs({1});
+ templates[6].S(1).T(0).Dtis("---SSS").ChainDiffs({1, 8}).FrameDiffs({8});
+ templates[7].S(1).T(1).Dtis("----DS").ChainDiffs({5, 4}).FrameDiffs({4});
+ templates[8].S(1).T(2).Dtis("-----D").ChainDiffs({3, 2}).FrameDiffs({2});
+ templates[9].S(1).T(2).Dtis("-----D").ChainDiffs({7, 6}).FrameDiffs({2});
+ return structure;
+}
+
+ScalabilityStructureL3T1Key::~ScalabilityStructureL3T1Key() = default;
+
+FrameDependencyStructure ScalabilityStructureL3T1Key::DependencyStructure()
+ const {
+ FrameDependencyStructure structure;
+ structure.num_decode_targets = 3;
+ structure.num_chains = 3;
+ structure.decode_target_protected_by_chain = {0, 1, 2};
+ auto& t = structure.templates;
+ t.resize(6);
+ // Templates are shown in the order frames following them appear in the
+ // stream, but in `structure.templates` array templates are sorted by
+ // (`spatial_id`, `temporal_id`) since that is a dependency descriptor
+ // requirement.
+ t[1].S(0).Dtis("SSS").ChainDiffs({0, 0, 0});
+ t[3].S(1).Dtis("-SS").ChainDiffs({1, 1, 1}).FrameDiffs({1});
+ t[5].S(2).Dtis("--S").ChainDiffs({2, 1, 1}).FrameDiffs({1});
+ t[0].S(0).Dtis("S--").ChainDiffs({3, 2, 1}).FrameDiffs({3});
+ t[2].S(1).Dtis("-S-").ChainDiffs({1, 3, 2}).FrameDiffs({3});
+ t[4].S(2).Dtis("--S").ChainDiffs({2, 1, 3}).FrameDiffs({3});
+ return structure;
+}
+
+ScalabilityStructureL3T2Key::~ScalabilityStructureL3T2Key() = default;
+
+FrameDependencyStructure ScalabilityStructureL3T2Key::DependencyStructure()
+ const {
+ FrameDependencyStructure structure;
+ structure.num_decode_targets = 6;
+ structure.num_chains = 3;
+ structure.decode_target_protected_by_chain = {0, 0, 1, 1, 2, 2};
+ auto& t = structure.templates;
+ t.resize(9);
+ // Templates are shown in the order frames following them appear in the
+ // stream, but in `structure.templates` array templates are sorted by
+ // (`spatial_id`, `temporal_id`) since that is a dependency descriptor
+ // requirement.
+ t[1].S(0).T(0).Dtis("SSSSSS").ChainDiffs({0, 0, 0});
+ t[4].S(1).T(0).Dtis("--SSSS").ChainDiffs({1, 1, 1}).FrameDiffs({1});
+ t[7].S(2).T(0).Dtis("----SS").ChainDiffs({2, 1, 1}).FrameDiffs({1});
+ t[2].S(0).T(1).Dtis("-D----").ChainDiffs({3, 2, 1}).FrameDiffs({3});
+ t[5].S(1).T(1).Dtis("---D--").ChainDiffs({4, 3, 2}).FrameDiffs({3});
+ t[8].S(2).T(1).Dtis("-----D").ChainDiffs({5, 4, 3}).FrameDiffs({3});
+ t[0].S(0).T(0).Dtis("SS----").ChainDiffs({6, 5, 4}).FrameDiffs({6});
+ t[3].S(1).T(0).Dtis("--SS--").ChainDiffs({1, 6, 5}).FrameDiffs({6});
+ t[6].S(2).T(0).Dtis("----SS").ChainDiffs({2, 1, 6}).FrameDiffs({6});
+ return structure;
+}
+
+ScalabilityStructureL3T3Key::~ScalabilityStructureL3T3Key() = default;
+
+FrameDependencyStructure ScalabilityStructureL3T3Key::DependencyStructure()
+ const {
+ FrameDependencyStructure structure;
+ structure.num_decode_targets = 9;
+ structure.num_chains = 3;
+ structure.decode_target_protected_by_chain = {0, 0, 0, 1, 1, 1, 2, 2, 2};
+ auto& t = structure.templates;
+ t.resize(15);
+ // Templates are shown in the order frames following them appear in the
+ // stream, but in `structure.templates` array templates are sorted by
+ // (`spatial_id`, `temporal_id`) since that is a dependency descriptor
+ // requirement. Indexes are written in hex for nicer alignment.
+ t[0x0].S(0).T(0).Dtis("SSSSSSSSS").ChainDiffs({0, 0, 0});
+ t[0x5].S(1).T(0).Dtis("---SSSSSS").ChainDiffs({1, 1, 1}).FrameDiffs({1});
+ t[0xA].S(2).T(0).Dtis("------SSS").ChainDiffs({2, 1, 1}).FrameDiffs({1});
+ t[0x3].S(0).T(2).Dtis("--D------").ChainDiffs({3, 2, 1}).FrameDiffs({3});
+ t[0x8].S(1).T(2).Dtis("-----D---").ChainDiffs({4, 3, 2}).FrameDiffs({3});
+ t[0xD].S(2).T(2).Dtis("--------D").ChainDiffs({5, 4, 3}).FrameDiffs({3});
+ t[0x2].S(0).T(1).Dtis("-DS------").ChainDiffs({6, 5, 4}).FrameDiffs({6});
+ t[0x7].S(1).T(1).Dtis("----DS---").ChainDiffs({7, 6, 5}).FrameDiffs({6});
+ t[0xC].S(2).T(1).Dtis("-------DS").ChainDiffs({8, 7, 6}).FrameDiffs({6});
+ t[0x4].S(0).T(2).Dtis("--D------").ChainDiffs({9, 8, 7}).FrameDiffs({3});
+ t[0x9].S(1).T(2).Dtis("-----D---").ChainDiffs({10, 9, 8}).FrameDiffs({3});
+ t[0xE].S(2).T(2).Dtis("--------D").ChainDiffs({11, 10, 9}).FrameDiffs({3});
+ t[0x1].S(0).T(0).Dtis("SSS------").ChainDiffs({12, 11, 10}).FrameDiffs({12});
+ t[0x6].S(1).T(0).Dtis("---SSS---").ChainDiffs({1, 12, 11}).FrameDiffs({12});
+ t[0xB].S(2).T(0).Dtis("------SSS").ChainDiffs({2, 1, 12}).FrameDiffs({12});
+ return structure;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/svc/scalability_structure_key_svc.h b/third_party/libwebrtc/modules/video_coding/svc/scalability_structure_key_svc.h
new file mode 100644
index 0000000000..54760da431
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/svc/scalability_structure_key_svc.h
@@ -0,0 +1,138 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#ifndef MODULES_VIDEO_CODING_SVC_SCALABILITY_STRUCTURE_KEY_SVC_H_
+#define MODULES_VIDEO_CODING_SVC_SCALABILITY_STRUCTURE_KEY_SVC_H_
+
+#include <bitset>
+#include <vector>
+
+#include "api/transport/rtp/dependency_descriptor.h"
+#include "api/video/video_bitrate_allocation.h"
+#include "common_video/generic_frame_descriptor/generic_frame_info.h"
+#include "modules/video_coding/svc/scalable_video_controller.h"
+
+namespace webrtc {
+
+class ScalabilityStructureKeySvc : public ScalableVideoController {
+ public:
+ ScalabilityStructureKeySvc(int num_spatial_layers, int num_temporal_layers);
+ ~ScalabilityStructureKeySvc() override;
+
+ StreamLayersConfig StreamConfig() const override;
+
+ std::vector<LayerFrameConfig> NextFrameConfig(bool restart) override;
+ GenericFrameInfo OnEncodeDone(const LayerFrameConfig& config) override;
+ void OnRatesUpdated(const VideoBitrateAllocation& bitrates) override;
+
+ private:
+ enum FramePattern : int {
+ kNone,
+ kKey,
+ kDeltaT0,
+ kDeltaT2A,
+ kDeltaT1,
+ kDeltaT2B,
+ };
+ static constexpr int kMaxNumSpatialLayers = 3;
+ static constexpr int kMaxNumTemporalLayers = 3;
+
+ // Index of the buffer to store last frame for layer (`sid`, `tid`)
+ int BufferIndex(int sid, int tid) const {
+ return tid * num_spatial_layers_ + sid;
+ }
+ bool DecodeTargetIsActive(int sid, int tid) const {
+ return active_decode_targets_[sid * num_temporal_layers_ + tid];
+ }
+ void SetDecodeTargetIsActive(int sid, int tid, bool value) {
+ active_decode_targets_.set(sid * num_temporal_layers_ + tid, value);
+ }
+ bool TemporalLayerIsActive(int tid) const;
+ static DecodeTargetIndication Dti(int sid,
+ int tid,
+ const LayerFrameConfig& config);
+
+ std::vector<LayerFrameConfig> KeyframeConfig();
+ std::vector<LayerFrameConfig> T0Config();
+ std::vector<LayerFrameConfig> T1Config();
+ std::vector<LayerFrameConfig> T2Config(FramePattern pattern);
+
+ FramePattern NextPattern(FramePattern last_pattern) const;
+
+ const int num_spatial_layers_;
+ const int num_temporal_layers_;
+
+ FramePattern last_pattern_ = kNone;
+ std::bitset<kMaxNumSpatialLayers> spatial_id_is_enabled_;
+ std::bitset<kMaxNumSpatialLayers> can_reference_t1_frame_for_spatial_id_;
+ std::bitset<32> active_decode_targets_;
+};
+
+// S1 0--0--0-
+// | ...
+// S0 0--0--0-
+class ScalabilityStructureL2T1Key : public ScalabilityStructureKeySvc {
+ public:
+ ScalabilityStructureL2T1Key() : ScalabilityStructureKeySvc(2, 1) {}
+ ~ScalabilityStructureL2T1Key() override;
+
+ FrameDependencyStructure DependencyStructure() const override;
+};
+
+// S1T1 0 0
+// / / /
+// S1T0 0---0---0
+// | ...
+// S0T1 | 0 0
+// |/ / /
+// S0T0 0---0---0
+// Time-> 0 1 2 3 4
+class ScalabilityStructureL2T2Key : public ScalabilityStructureKeySvc {
+ public:
+ ScalabilityStructureL2T2Key() : ScalabilityStructureKeySvc(2, 2) {}
+ ~ScalabilityStructureL2T2Key() override;
+
+ FrameDependencyStructure DependencyStructure() const override;
+};
+
+class ScalabilityStructureL2T3Key : public ScalabilityStructureKeySvc {
+ public:
+ ScalabilityStructureL2T3Key() : ScalabilityStructureKeySvc(2, 3) {}
+ ~ScalabilityStructureL2T3Key() override;
+
+ FrameDependencyStructure DependencyStructure() const override;
+};
+
+class ScalabilityStructureL3T1Key : public ScalabilityStructureKeySvc {
+ public:
+ ScalabilityStructureL3T1Key() : ScalabilityStructureKeySvc(3, 1) {}
+ ~ScalabilityStructureL3T1Key() override;
+
+ FrameDependencyStructure DependencyStructure() const override;
+};
+
+class ScalabilityStructureL3T2Key : public ScalabilityStructureKeySvc {
+ public:
+ ScalabilityStructureL3T2Key() : ScalabilityStructureKeySvc(3, 2) {}
+ ~ScalabilityStructureL3T2Key() override;
+
+ FrameDependencyStructure DependencyStructure() const override;
+};
+
+class ScalabilityStructureL3T3Key : public ScalabilityStructureKeySvc {
+ public:
+ ScalabilityStructureL3T3Key() : ScalabilityStructureKeySvc(3, 3) {}
+ ~ScalabilityStructureL3T3Key() override;
+
+ FrameDependencyStructure DependencyStructure() const override;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_SVC_SCALABILITY_STRUCTURE_KEY_SVC_H_
diff --git a/third_party/libwebrtc/modules/video_coding/svc/scalability_structure_key_svc_unittest.cc b/third_party/libwebrtc/modules/video_coding/svc/scalability_structure_key_svc_unittest.cc
new file mode 100644
index 0000000000..5f923bb487
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/svc/scalability_structure_key_svc_unittest.cc
@@ -0,0 +1,245 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include "modules/video_coding/svc/scalability_structure_key_svc.h"
+
+#include <vector>
+
+#include "api/array_view.h"
+#include "api/transport/rtp/dependency_descriptor.h"
+#include "common_video/generic_frame_descriptor/generic_frame_info.h"
+#include "modules/video_coding/svc/scalability_structure_test_helpers.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+
+using ::testing::ElementsAre;
+using ::testing::IsEmpty;
+using ::testing::SizeIs;
+
+TEST(ScalabilityStructureL3T3KeyTest,
+ SkipingT1FrameOnOneSpatialLayerKeepsStructureValid) {
+ ScalabilityStructureL3T3Key structure;
+ ScalabilityStructureWrapper wrapper(structure);
+ std::vector<GenericFrameInfo> frames;
+
+ structure.OnRatesUpdated(EnableTemporalLayers(/*s0=*/3, /*s1=*/3));
+ wrapper.GenerateFrames(/*num_temporal_units=*/2, frames);
+ EXPECT_THAT(frames, SizeIs(4));
+ structure.OnRatesUpdated(EnableTemporalLayers(/*s0=*/3, /*s1=*/1));
+ wrapper.GenerateFrames(/*num_temporal_units=*/1, frames);
+ EXPECT_THAT(frames, SizeIs(5));
+ structure.OnRatesUpdated(EnableTemporalLayers(/*s0=*/3, /*s1=*/3));
+ wrapper.GenerateFrames(/*num_temporal_units=*/1, frames);
+ ASSERT_THAT(frames, SizeIs(7));
+
+ EXPECT_EQ(frames[0].temporal_id, 0);
+ EXPECT_EQ(frames[1].temporal_id, 0);
+ EXPECT_EQ(frames[2].temporal_id, 2);
+ EXPECT_EQ(frames[3].temporal_id, 2);
+ EXPECT_EQ(frames[4].temporal_id, 1);
+ EXPECT_EQ(frames[5].temporal_id, 2);
+ EXPECT_EQ(frames[6].temporal_id, 2);
+ EXPECT_TRUE(wrapper.FrameReferencesAreValid(frames));
+}
+
+TEST(ScalabilityStructureL3T3KeyTest,
+ SkipT1FrameByEncoderKeepsReferencesValid) {
+ std::vector<GenericFrameInfo> frames;
+ ScalabilityStructureL3T3Key structure;
+ ScalabilityStructureWrapper wrapper(structure);
+
+ // 1st 2 temporal units (T0 and T2)
+ wrapper.GenerateFrames(/*num_temporal_units=*/2, frames);
+ // Simulate T1 frame dropped by the encoder,
+ // i.e. retrieve config, but skip calling OnEncodeDone.
+ structure.NextFrameConfig(/*restart=*/false);
+ // one more temporal unit.
+ wrapper.GenerateFrames(/*num_temporal_units=*/1, frames);
+
+ EXPECT_THAT(frames, SizeIs(9));
+ EXPECT_TRUE(wrapper.FrameReferencesAreValid(frames));
+}
+
+TEST(ScalabilityStructureL3T3KeyTest,
+ SkippingFrameReusePreviousFrameConfiguration) {
+ std::vector<GenericFrameInfo> frames;
+ ScalabilityStructureL3T3Key structure;
+ ScalabilityStructureWrapper wrapper(structure);
+
+ // 1st 2 temporal units (T0 and T2)
+ wrapper.GenerateFrames(/*num_temporal_units=*/2, frames);
+ ASSERT_THAT(frames, SizeIs(6));
+ ASSERT_EQ(frames[0].temporal_id, 0);
+ ASSERT_EQ(frames[3].temporal_id, 2);
+
+ // Simulate a frame dropped by the encoder,
+ // i.e. retrieve config, but skip calling OnEncodeDone.
+ structure.NextFrameConfig(/*restart=*/false);
+ // two more temporal unit, expect temporal pattern continues
+ wrapper.GenerateFrames(/*num_temporal_units=*/2, frames);
+ ASSERT_THAT(frames, SizeIs(12));
+ // Expect temporal pattern continues as if there were no dropped frames.
+ EXPECT_EQ(frames[6].temporal_id, 1);
+ EXPECT_EQ(frames[9].temporal_id, 2);
+}
+
+TEST(ScalabilityStructureL3T3KeyTest, SkippingKeyFrameTriggersNewKeyFrame) {
+ std::vector<GenericFrameInfo> frames;
+ ScalabilityStructureL3T3Key structure;
+ ScalabilityStructureWrapper wrapper(structure);
+
+ // Ask for a key frame config, but do not return any frames
+ structure.NextFrameConfig(/*restart=*/false);
+
+ // Ask for more frames, expect they start with a key frame.
+ wrapper.GenerateFrames(/*num_temporal_units=*/2, frames);
+ ASSERT_THAT(frames, SizeIs(6));
+ ASSERT_EQ(frames[0].temporal_id, 0);
+ ASSERT_EQ(frames[3].temporal_id, 2);
+ EXPECT_TRUE(wrapper.FrameReferencesAreValid(frames));
+}
+
+TEST(ScalabilityStructureL3T3KeyTest,
+ SkippingT2FrameAndDisablingT2LayerProduceT1AsNextFrame) {
+ std::vector<GenericFrameInfo> frames;
+ ScalabilityStructureL3T3Key structure;
+ ScalabilityStructureWrapper wrapper(structure);
+
+ wrapper.GenerateFrames(/*num_temporal_units=*/1, frames);
+ // Ask for next (T2) frame config, but do not return any frames
+ auto config = structure.NextFrameConfig(/*restart=*/false);
+ ASSERT_THAT(config, Not(IsEmpty()));
+ ASSERT_EQ(config.front().TemporalId(), 2);
+
+ // Disable T2 layer,
+ structure.OnRatesUpdated(EnableTemporalLayers(/*s0=*/2, /*s1=*/2, /*s2=*/2));
+ // Expect instead of reusing unused config, T1 config is generated.
+ config = structure.NextFrameConfig(/*restart=*/false);
+ ASSERT_THAT(config, Not(IsEmpty()));
+ EXPECT_EQ(config.front().TemporalId(), 1);
+}
+
+TEST(ScalabilityStructureL3T3KeyTest, EnableT2LayerWhileProducingT1Frame) {
+ std::vector<GenericFrameInfo> frames;
+ ScalabilityStructureL3T3Key structure;
+ ScalabilityStructureWrapper wrapper(structure);
+
+ // Disable T2 layer,
+ structure.OnRatesUpdated(EnableTemporalLayers(/*s0=*/2, /*s1=*/2, /*s2=*/2));
+
+ // Generate the key frame.
+ wrapper.GenerateFrames(/*num_temporal_units=*/1, frames);
+ ASSERT_THAT(frames, SizeIs(3));
+ EXPECT_EQ(frames[0].temporal_id, 0);
+
+ // Ask for next (T1) frame config, but do not return any frames yet.
+ auto config = structure.NextFrameConfig(/*restart=*/false);
+ ASSERT_THAT(config, Not(IsEmpty()));
+ ASSERT_EQ(config.front().TemporalId(), 1);
+
+ // Reenable T2 layer.
+ structure.OnRatesUpdated(EnableTemporalLayers(/*s0=*/3, /*s1=*/3, /*s2=*/3));
+
+ // Finish encoding previously requested config.
+ for (auto layer_config : config) {
+ GenericFrameInfo info = structure.OnEncodeDone(layer_config);
+ EXPECT_EQ(info.temporal_id, 1);
+ frames.push_back(info);
+ }
+ ASSERT_THAT(frames, SizeIs(6));
+
+ // Generate more frames, expect T2 pattern resumes.
+ wrapper.GenerateFrames(/*num_temporal_units=*/4, frames);
+ ASSERT_THAT(frames, SizeIs(18));
+ EXPECT_EQ(frames[6].temporal_id, 2);
+ EXPECT_EQ(frames[9].temporal_id, 0);
+ EXPECT_EQ(frames[12].temporal_id, 2);
+ EXPECT_EQ(frames[15].temporal_id, 1);
+
+ EXPECT_TRUE(wrapper.FrameReferencesAreValid(frames));
+}
+
+TEST(ScalabilityStructureL3T3KeyTest,
+ ReenablingSpatialLayerBeforeMissedT0FrameDoesntTriggerAKeyFrame) {
+ ScalabilityStructureL3T3Key structure;
+ ScalabilityStructureWrapper wrapper(structure);
+ std::vector<GenericFrameInfo> frames;
+
+ structure.OnRatesUpdated(EnableTemporalLayers(/*s0=*/2, /*s1=*/2));
+ wrapper.GenerateFrames(1, frames);
+ EXPECT_THAT(frames, SizeIs(2));
+ // Drop a spatial layer.
+ structure.OnRatesUpdated(EnableTemporalLayers(/*s0=*/2, /*s1=*/0));
+ wrapper.GenerateFrames(1, frames);
+ EXPECT_THAT(frames, SizeIs(3));
+ // Reenable a spatial layer before T0 frame is encoded.
+ structure.OnRatesUpdated(EnableTemporalLayers(/*s0=*/2, /*s1=*/2));
+ wrapper.GenerateFrames(1, frames);
+ EXPECT_THAT(frames, SizeIs(5));
+
+ EXPECT_EQ(frames[0].temporal_id, 0);
+ EXPECT_EQ(frames[1].temporal_id, 0);
+ EXPECT_EQ(frames[2].temporal_id, 1);
+ EXPECT_EQ(frames[3].temporal_id, 0);
+ EXPECT_EQ(frames[4].temporal_id, 0);
+ EXPECT_THAT(frames[3].frame_diffs, SizeIs(1));
+ EXPECT_THAT(frames[4].frame_diffs, SizeIs(1));
+ EXPECT_TRUE(wrapper.FrameReferencesAreValid(frames));
+}
+
+TEST(ScalabilityStructureL3T3KeyTest, ReenablingSpatialLayerTriggersKeyFrame) {
+ ScalabilityStructureL3T3Key structure;
+ ScalabilityStructureWrapper wrapper(structure);
+ std::vector<GenericFrameInfo> frames;
+
+ // Start with all spatial layers enabled.
+ structure.OnRatesUpdated(EnableTemporalLayers(/*s0=*/2, /*s1=*/2, /*s2=*/2));
+ wrapper.GenerateFrames(3, frames);
+ EXPECT_THAT(frames, SizeIs(9));
+ // Drop a spatial layer. Two remaining spatial layers should just continue.
+ structure.OnRatesUpdated(EnableTemporalLayers(/*s0=*/2, /*s1=*/0, /*s2=*/2));
+ wrapper.GenerateFrames(2, frames);
+ EXPECT_THAT(frames, SizeIs(13));
+ // Reenable spatial layer, expect a full restart.
+ structure.OnRatesUpdated(EnableTemporalLayers(/*s0=*/2, /*s1=*/2, /*s2=*/2));
+ wrapper.GenerateFrames(1, frames);
+ ASSERT_THAT(frames, SizeIs(16));
+
+ // First 3 temporal units with all spatial layers enabled.
+ EXPECT_EQ(frames[0].temporal_id, 0);
+ EXPECT_EQ(frames[3].temporal_id, 1);
+ EXPECT_EQ(frames[6].temporal_id, 0);
+ // 2 temporal units with spatial layer 1 disabled.
+ EXPECT_EQ(frames[9].spatial_id, 0);
+ EXPECT_EQ(frames[9].temporal_id, 1);
+ EXPECT_EQ(frames[10].spatial_id, 2);
+ EXPECT_EQ(frames[10].temporal_id, 1);
+ // T0 frames were encoded while spatial layer 1 is disabled.
+ EXPECT_EQ(frames[11].spatial_id, 0);
+ EXPECT_EQ(frames[11].temporal_id, 0);
+ EXPECT_EQ(frames[12].spatial_id, 2);
+ EXPECT_EQ(frames[12].temporal_id, 0);
+ // Key frame to reenable spatial layer 1.
+ EXPECT_THAT(frames[13].frame_diffs, IsEmpty());
+ EXPECT_THAT(frames[14].frame_diffs, ElementsAre(1));
+ EXPECT_THAT(frames[15].frame_diffs, ElementsAre(1));
+ EXPECT_EQ(frames[13].temporal_id, 0);
+ EXPECT_EQ(frames[14].temporal_id, 0);
+ EXPECT_EQ(frames[15].temporal_id, 0);
+ auto all_frames = rtc::MakeArrayView(frames.data(), frames.size());
+ EXPECT_TRUE(wrapper.FrameReferencesAreValid(all_frames.subview(0, 13)));
+ // Frames starting from the frame#13 should not reference any earlier frames.
+ EXPECT_TRUE(wrapper.FrameReferencesAreValid(all_frames.subview(13)));
+}
+
+} // namespace
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/svc/scalability_structure_l2t2_key_shift.cc b/third_party/libwebrtc/modules/video_coding/svc/scalability_structure_l2t2_key_shift.cc
new file mode 100644
index 0000000000..4d15942d3e
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/svc/scalability_structure_l2t2_key_shift.cc
@@ -0,0 +1,177 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include "modules/video_coding/svc/scalability_structure_l2t2_key_shift.h"
+
+#include <utility>
+#include <vector>
+
+#include "absl/base/macros.h"
+#include "api/transport/rtp/dependency_descriptor.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+namespace {
+
+DecodeTargetIndication
+Dti(int sid, int tid, const ScalableVideoController::LayerFrameConfig& config) {
+ if (config.IsKeyframe()) {
+ RTC_DCHECK_EQ(config.TemporalId(), 0);
+ return sid < config.SpatialId() ? DecodeTargetIndication::kNotPresent
+ : DecodeTargetIndication::kSwitch;
+ }
+
+ if (sid != config.SpatialId() || tid < config.TemporalId()) {
+ return DecodeTargetIndication::kNotPresent;
+ }
+ if (tid == config.TemporalId() && tid > 0) {
+ return DecodeTargetIndication::kDiscardable;
+ }
+ return DecodeTargetIndication::kSwitch;
+}
+
+} // namespace
+
+constexpr int ScalabilityStructureL2T2KeyShift::kNumSpatialLayers;
+constexpr int ScalabilityStructureL2T2KeyShift::kNumTemporalLayers;
+
+ScalabilityStructureL2T2KeyShift::~ScalabilityStructureL2T2KeyShift() = default;
+
+ScalableVideoController::StreamLayersConfig
+ScalabilityStructureL2T2KeyShift::StreamConfig() const {
+ StreamLayersConfig result;
+ result.num_spatial_layers = 2;
+ result.num_temporal_layers = 2;
+ result.scaling_factor_num[0] = 1;
+ result.scaling_factor_den[0] = 2;
+ result.uses_reference_scaling = true;
+ return result;
+}
+
+FrameDependencyStructure ScalabilityStructureL2T2KeyShift::DependencyStructure()
+ const {
+ FrameDependencyStructure structure;
+ structure.num_decode_targets = 4;
+ structure.num_chains = 2;
+ structure.decode_target_protected_by_chain = {0, 0, 1, 1};
+ structure.templates.resize(7);
+ auto& templates = structure.templates;
+ templates[0].S(0).T(0).Dtis("SSSS").ChainDiffs({0, 0});
+ templates[1].S(0).T(0).Dtis("SS--").ChainDiffs({2, 1}).FrameDiffs({2});
+ templates[2].S(0).T(0).Dtis("SS--").ChainDiffs({4, 1}).FrameDiffs({4});
+ templates[3].S(0).T(1).Dtis("-D--").ChainDiffs({2, 3}).FrameDiffs({2});
+ templates[4].S(1).T(0).Dtis("--SS").ChainDiffs({1, 1}).FrameDiffs({1});
+ templates[5].S(1).T(0).Dtis("--SS").ChainDiffs({3, 4}).FrameDiffs({4});
+ templates[6].S(1).T(1).Dtis("---D").ChainDiffs({1, 2}).FrameDiffs({2});
+ return structure;
+}
+
+std::vector<ScalableVideoController::LayerFrameConfig>
+ScalabilityStructureL2T2KeyShift::NextFrameConfig(bool restart) {
+ std::vector<LayerFrameConfig> configs;
+ configs.reserve(2);
+ if (restart) {
+ next_pattern_ = kKey;
+ }
+
+ // Buffer0 keeps latest S0T0 frame,
+ // Buffer1 keeps latest S1T0 frame.
+ switch (next_pattern_) {
+ case kKey:
+ if (DecodeTargetIsActive(/*sid=*/0, /*tid=*/0)) {
+ configs.emplace_back();
+ configs.back().S(0).T(0).Update(0).Keyframe();
+ }
+ if (DecodeTargetIsActive(/*sid=*/1, /*tid=*/0)) {
+ configs.emplace_back();
+ configs.back().S(1).T(0).Update(1);
+ if (DecodeTargetIsActive(/*sid=*/0, /*tid=*/0)) {
+ configs.back().Reference(0);
+ } else {
+ configs.back().Keyframe();
+ }
+ }
+ next_pattern_ = kDelta0;
+ break;
+ case kDelta0:
+ if (DecodeTargetIsActive(/*sid=*/0, /*tid=*/0)) {
+ configs.emplace_back();
+ configs.back().S(0).T(0).ReferenceAndUpdate(0);
+ }
+ if (DecodeTargetIsActive(/*sid=*/1, /*tid=*/1)) {
+ configs.emplace_back();
+ configs.back().S(1).T(1).Reference(1);
+ }
+ if (configs.empty() && DecodeTargetIsActive(/*sid=*/1, /*tid=*/0)) {
+ configs.emplace_back();
+ configs.back().S(1).T(0).ReferenceAndUpdate(1);
+ }
+ next_pattern_ = kDelta1;
+ break;
+ case kDelta1:
+ if (DecodeTargetIsActive(/*sid=*/0, /*tid=*/1)) {
+ configs.emplace_back();
+ configs.back().S(0).T(1).Reference(0);
+ }
+ if (DecodeTargetIsActive(/*sid=*/1, /*tid=*/0)) {
+ configs.emplace_back();
+ configs.back().S(1).T(0).ReferenceAndUpdate(1);
+ }
+ if (configs.empty() && DecodeTargetIsActive(/*sid=*/0, /*tid=*/0)) {
+ configs.emplace_back();
+ configs.back().S(0).T(0).ReferenceAndUpdate(0);
+ }
+ next_pattern_ = kDelta0;
+ break;
+ }
+
+ RTC_DCHECK(!configs.empty() || active_decode_targets_.none());
+ return configs;
+}
+
+GenericFrameInfo ScalabilityStructureL2T2KeyShift::OnEncodeDone(
+ const LayerFrameConfig& config) {
+ GenericFrameInfo frame_info;
+ frame_info.spatial_id = config.SpatialId();
+ frame_info.temporal_id = config.TemporalId();
+ frame_info.encoder_buffers = config.Buffers();
+ for (int sid = 0; sid < kNumSpatialLayers; ++sid) {
+ for (int tid = 0; tid < kNumTemporalLayers; ++tid) {
+ frame_info.decode_target_indications.push_back(Dti(sid, tid, config));
+ }
+ }
+ if (config.IsKeyframe()) {
+ frame_info.part_of_chain = {true, true};
+ } else if (config.TemporalId() == 0) {
+ frame_info.part_of_chain = {config.SpatialId() == 0,
+ config.SpatialId() == 1};
+ } else {
+ frame_info.part_of_chain = {false, false};
+ }
+ return frame_info;
+}
+
+void ScalabilityStructureL2T2KeyShift::OnRatesUpdated(
+ const VideoBitrateAllocation& bitrates) {
+ for (int sid = 0; sid < kNumSpatialLayers; ++sid) {
+ // Enable/disable spatial layers independetely.
+ bool active = bitrates.GetBitrate(sid, /*tid=*/0) > 0;
+ if (!DecodeTargetIsActive(sid, /*tid=*/0) && active) {
+ // Key frame is required to reenable any spatial layer.
+ next_pattern_ = kKey;
+ }
+
+ SetDecodeTargetIsActive(sid, /*tid=*/0, active);
+ SetDecodeTargetIsActive(sid, /*tid=*/1,
+ active && bitrates.GetBitrate(sid, /*tid=*/1) > 0);
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/svc/scalability_structure_l2t2_key_shift.h b/third_party/libwebrtc/modules/video_coding/svc/scalability_structure_l2t2_key_shift.h
new file mode 100644
index 0000000000..26d1afcb29
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/svc/scalability_structure_l2t2_key_shift.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#ifndef MODULES_VIDEO_CODING_SVC_SCALABILITY_STRUCTURE_L2T2_KEY_SHIFT_H_
+#define MODULES_VIDEO_CODING_SVC_SCALABILITY_STRUCTURE_L2T2_KEY_SHIFT_H_
+
+#include <vector>
+
+#include "api/transport/rtp/dependency_descriptor.h"
+#include "api/video/video_bitrate_allocation.h"
+#include "common_video/generic_frame_descriptor/generic_frame_info.h"
+#include "modules/video_coding/svc/scalable_video_controller.h"
+
+namespace webrtc {
+
+// S1T1 0 0
+// / / /
+// S1T0 0---0---0
+// | ...
+// S0T1 | 0 0
+// | / /
+// S0T0 0-0---0--
+// Time-> 0 1 2 3 4
+class ScalabilityStructureL2T2KeyShift : public ScalableVideoController {
+ public:
+ ~ScalabilityStructureL2T2KeyShift() override;
+
+ StreamLayersConfig StreamConfig() const override;
+ FrameDependencyStructure DependencyStructure() const override;
+
+ std::vector<LayerFrameConfig> NextFrameConfig(bool restart) override;
+ GenericFrameInfo OnEncodeDone(const LayerFrameConfig& config) override;
+ void OnRatesUpdated(const VideoBitrateAllocation& bitrates) override;
+
+ private:
+ enum FramePattern {
+ kKey,
+ kDelta0,
+ kDelta1,
+ };
+
+ static constexpr int kNumSpatialLayers = 2;
+ static constexpr int kNumTemporalLayers = 2;
+
+ bool DecodeTargetIsActive(int sid, int tid) const {
+ return active_decode_targets_[sid * kNumTemporalLayers + tid];
+ }
+ void SetDecodeTargetIsActive(int sid, int tid, bool value) {
+ active_decode_targets_.set(sid * kNumTemporalLayers + tid, value);
+ }
+
+ FramePattern next_pattern_ = kKey;
+ std::bitset<32> active_decode_targets_ = 0b1111;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_SVC_SCALABILITY_STRUCTURE_L2T2_KEY_SHIFT_H_
diff --git a/third_party/libwebrtc/modules/video_coding/svc/scalability_structure_l2t2_key_shift_unittest.cc b/third_party/libwebrtc/modules/video_coding/svc/scalability_structure_l2t2_key_shift_unittest.cc
new file mode 100644
index 0000000000..40fecf1812
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/svc/scalability_structure_l2t2_key_shift_unittest.cc
@@ -0,0 +1,358 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include "modules/video_coding/svc/scalability_structure_l2t2_key_shift.h"
+
+#include <vector>
+
+#include "api/array_view.h"
+#include "api/transport/rtp/dependency_descriptor.h"
+#include "common_video/generic_frame_descriptor/generic_frame_info.h"
+#include "modules/video_coding/svc/scalability_structure_test_helpers.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+
+using ::testing::ElementsAre;
+using ::testing::IsEmpty;
+using ::testing::SizeIs;
+
+// S1T1 3 7
+// / /
+// S1T0 1---5---9
+// |
+// S0T1 | 4 8
+// | / /
+// S0T0 0-2---6
+// Time-> 0 1 2 3 4
+TEST(ScalabilityStructureL2T2KeyShiftTest, DecodeTargetsAreEnabledByDefault) {
+ ScalabilityStructureL2T2KeyShift structure;
+ ScalabilityStructureWrapper wrapper(structure);
+ std::vector<GenericFrameInfo> frames;
+ wrapper.GenerateFrames(/*num_temporal_units=*/5, frames);
+ ASSERT_THAT(frames, SizeIs(10));
+
+ EXPECT_EQ(frames[0].spatial_id, 0);
+ EXPECT_EQ(frames[1].spatial_id, 1);
+ EXPECT_EQ(frames[2].spatial_id, 0);
+ EXPECT_EQ(frames[3].spatial_id, 1);
+ EXPECT_EQ(frames[4].spatial_id, 0);
+ EXPECT_EQ(frames[5].spatial_id, 1);
+ EXPECT_EQ(frames[6].spatial_id, 0);
+ EXPECT_EQ(frames[7].spatial_id, 1);
+ EXPECT_EQ(frames[8].spatial_id, 0);
+ EXPECT_EQ(frames[9].spatial_id, 1);
+
+ // spatial_id = 0 has the temporal shift.
+ EXPECT_EQ(frames[0].temporal_id, 0);
+ EXPECT_EQ(frames[2].temporal_id, 0);
+ EXPECT_EQ(frames[4].temporal_id, 1);
+ EXPECT_EQ(frames[6].temporal_id, 0);
+ EXPECT_EQ(frames[8].temporal_id, 1);
+
+ // spatial_id = 1 hasn't temporal shift.
+ EXPECT_EQ(frames[1].temporal_id, 0);
+ EXPECT_EQ(frames[3].temporal_id, 1);
+ EXPECT_EQ(frames[5].temporal_id, 0);
+ EXPECT_EQ(frames[7].temporal_id, 1);
+ EXPECT_EQ(frames[9].temporal_id, 0);
+
+ // Key frame diff.
+ EXPECT_THAT(frames[0].frame_diffs, IsEmpty());
+ EXPECT_THAT(frames[1].frame_diffs, ElementsAre(1));
+ // S0T0 frame diffs
+ EXPECT_THAT(frames[2].frame_diffs, ElementsAre(2));
+ EXPECT_THAT(frames[6].frame_diffs, ElementsAre(4));
+ // S1T0 frame diffs
+ EXPECT_THAT(frames[5].frame_diffs, ElementsAre(4));
+ EXPECT_THAT(frames[9].frame_diffs, ElementsAre(4));
+ // T1 frames refer T0 frame of same spatial layer which is 2 frame ids away.
+ EXPECT_THAT(frames[3].frame_diffs, ElementsAre(2));
+ EXPECT_THAT(frames[4].frame_diffs, ElementsAre(2));
+ EXPECT_THAT(frames[7].frame_diffs, ElementsAre(2));
+ EXPECT_THAT(frames[8].frame_diffs, ElementsAre(2));
+}
+
+// S1T0 1---4---7
+// |
+// S0T1 | 3 6
+// | / /
+// S0T0 0-2---5--
+// Time-> 0 1 2 3 4
+TEST(ScalabilityStructureL2T2KeyShiftTest, DisableS1T1Layer) {
+ ScalabilityStructureL2T2KeyShift structure;
+ structure.OnRatesUpdated(EnableTemporalLayers(/*s0=*/2, /*s1=*/1));
+ ScalabilityStructureWrapper wrapper(structure);
+ std::vector<GenericFrameInfo> frames;
+ wrapper.GenerateFrames(/*num_temporal_units=*/5, frames);
+ ASSERT_THAT(frames, SizeIs(8));
+
+ EXPECT_EQ(frames[0].spatial_id, 0);
+ EXPECT_EQ(frames[1].spatial_id, 1);
+ EXPECT_EQ(frames[2].spatial_id, 0);
+ EXPECT_EQ(frames[3].spatial_id, 0);
+ EXPECT_EQ(frames[4].spatial_id, 1);
+ EXPECT_EQ(frames[5].spatial_id, 0);
+ EXPECT_EQ(frames[6].spatial_id, 0);
+ EXPECT_EQ(frames[7].spatial_id, 1);
+
+ // spatial_id = 0 has the temporal shift.
+ EXPECT_EQ(frames[0].temporal_id, 0);
+ EXPECT_EQ(frames[2].temporal_id, 0);
+ EXPECT_EQ(frames[3].temporal_id, 1);
+ EXPECT_EQ(frames[5].temporal_id, 0);
+ EXPECT_EQ(frames[6].temporal_id, 1);
+
+ // spatial_id = 1 has single temporal layer.
+ EXPECT_EQ(frames[1].temporal_id, 0);
+ EXPECT_EQ(frames[4].temporal_id, 0);
+ EXPECT_EQ(frames[5].temporal_id, 0);
+}
+
+// S1T1 3 |
+// / |
+// S1T0 1---5+--7
+// | |
+// S0T1 | 4|
+// | / |
+// S0T0 0-2--+6---8
+// Time-> 0 1 2 3 4 5
+TEST(ScalabilityStructureL2T2KeyShiftTest, DisableT1LayersAfterFewFrames) {
+ ScalabilityStructureL2T2KeyShift structure;
+ ScalabilityStructureWrapper wrapper(structure);
+ std::vector<GenericFrameInfo> frames;
+
+ wrapper.GenerateFrames(/*num_temporal_units=*/3, frames);
+ EXPECT_THAT(frames, SizeIs(6));
+ structure.OnRatesUpdated(EnableTemporalLayers(/*s0=*/1, /*s1=*/1));
+ wrapper.GenerateFrames(/*num_temporal_units=*/3, frames);
+ ASSERT_THAT(frames, SizeIs(9));
+
+ // Skip validation before T1 was disabled as that is covered by the test
+ // where no layers are disabled.
+ EXPECT_EQ(frames[6].spatial_id, 0);
+ EXPECT_EQ(frames[7].spatial_id, 1);
+ EXPECT_EQ(frames[8].spatial_id, 0);
+
+ EXPECT_EQ(frames[6].temporal_id, 0);
+ EXPECT_EQ(frames[7].temporal_id, 0);
+ EXPECT_EQ(frames[8].temporal_id, 0);
+
+ EXPECT_TRUE(wrapper.FrameReferencesAreValid(frames));
+}
+
+// S1T1 1 3
+// / /
+// S1T0 0---2
+// Time-> 0 1 2 3 4 5
+TEST(ScalabilityStructureL2T2KeyShiftTest, DisableS0FromTheStart) {
+ ScalabilityStructureL2T2KeyShift structure;
+ ScalabilityStructureWrapper wrapper(structure);
+ std::vector<GenericFrameInfo> frames;
+
+ structure.OnRatesUpdated(EnableTemporalLayers(/*s0=*/0, /*s1=*/2));
+ wrapper.GenerateFrames(/*num_temporal_units=*/4, frames);
+ EXPECT_THAT(frames, SizeIs(4));
+
+ EXPECT_EQ(frames[0].spatial_id, 1);
+ EXPECT_EQ(frames[1].spatial_id, 1);
+ EXPECT_EQ(frames[2].spatial_id, 1);
+ EXPECT_EQ(frames[3].spatial_id, 1);
+
+ EXPECT_EQ(frames[0].temporal_id, 0);
+ EXPECT_EQ(frames[1].temporal_id, 1);
+ EXPECT_EQ(frames[2].temporal_id, 0);
+ EXPECT_EQ(frames[3].temporal_id, 1);
+
+ EXPECT_TRUE(wrapper.FrameReferencesAreValid(frames));
+}
+
+// S1T1 3 |6 8
+// / / /
+// S1T0 1---5+--7
+// | |
+// S0T1 | 4|
+// | / |
+// S0T0 0-2 |
+// Time-> 0 1 2 3 4 5
+TEST(ScalabilityStructureL2T2KeyShiftTest, DisableS0AfterFewFrames) {
+ ScalabilityStructureL2T2KeyShift structure;
+ ScalabilityStructureWrapper wrapper(structure);
+ std::vector<GenericFrameInfo> frames;
+
+ wrapper.GenerateFrames(/*num_temporal_units=*/3, frames);
+ EXPECT_THAT(frames, SizeIs(6));
+ structure.OnRatesUpdated(EnableTemporalLayers(/*s0=*/0, /*s1=*/2));
+ wrapper.GenerateFrames(/*num_temporal_units=*/3, frames);
+ ASSERT_THAT(frames, SizeIs(9));
+
+ // Expect frame[6] is delta frame.
+ EXPECT_THAT(frames[6].frame_diffs, ElementsAre(1));
+ // Skip validation before S0 was disabled as that should be covered by
+ // test where no layers are disabled.
+ EXPECT_EQ(frames[6].spatial_id, 1);
+ EXPECT_EQ(frames[7].spatial_id, 1);
+ EXPECT_EQ(frames[8].spatial_id, 1);
+
+ EXPECT_EQ(frames[6].temporal_id, 1);
+ EXPECT_EQ(frames[7].temporal_id, 0);
+ EXPECT_EQ(frames[8].temporal_id, 1);
+
+ EXPECT_TRUE(wrapper.FrameReferencesAreValid(frames));
+}
+
+// S1T1 3| | 8
+// / | | /
+// S1T0 1 | |6
+// | | ||
+// S0T1 | |4||
+// | / ||
+// S0T0 0-2| |5-7
+// Time-> 0 1 2 3 4 5
+TEST(ScalabilityStructureL2T2KeyShiftTest, ReenableS1TriggersKeyFrame) {
+ ScalabilityStructureL2T2KeyShift structure;
+ ScalabilityStructureWrapper wrapper(structure);
+ std::vector<GenericFrameInfo> frames;
+
+ wrapper.GenerateFrames(/*num_temporal_units=*/2, frames);
+ EXPECT_THAT(frames, SizeIs(4));
+
+ structure.OnRatesUpdated(EnableTemporalLayers(/*s0=*/2, /*s1=*/0));
+ wrapper.GenerateFrames(/*num_temporal_units=*/1, frames);
+ EXPECT_THAT(frames, SizeIs(5));
+
+ structure.OnRatesUpdated(EnableTemporalLayers(/*s0=*/2, /*s1=*/2));
+ wrapper.GenerateFrames(/*num_temporal_units=*/2, frames);
+ ASSERT_THAT(frames, SizeIs(9));
+
+ EXPECT_THAT(frames[4].spatial_id, 0);
+ EXPECT_THAT(frames[4].temporal_id, 1);
+
+ // Expect frame[5] to be a key frame.
+ EXPECT_TRUE(wrapper.FrameReferencesAreValid(
+ rtc::MakeArrayView(frames.data() + 5, 4)));
+
+ EXPECT_THAT(frames[5].spatial_id, 0);
+ EXPECT_THAT(frames[6].spatial_id, 1);
+ EXPECT_THAT(frames[7].spatial_id, 0);
+ EXPECT_THAT(frames[8].spatial_id, 1);
+
+ // S0 should do temporal shift after the key frame.
+ EXPECT_THAT(frames[5].temporal_id, 0);
+ EXPECT_THAT(frames[7].temporal_id, 0);
+
+ // No temporal shift for the top spatial layer.
+ EXPECT_THAT(frames[6].temporal_id, 0);
+ EXPECT_THAT(frames[8].temporal_id, 1);
+}
+
+TEST(ScalabilityStructureL2T2KeyShiftTest, EnableOnlyS0T0FromTheStart) {
+ ScalabilityStructureL2T2KeyShift structure;
+ ScalabilityStructureWrapper wrapper(structure);
+ std::vector<GenericFrameInfo> frames;
+
+ structure.OnRatesUpdated(EnableTemporalLayers(/*s0=*/1, /*s1=*/0));
+ wrapper.GenerateFrames(/*num_temporal_units=*/3, frames);
+ ASSERT_THAT(frames, SizeIs(3));
+
+ EXPECT_EQ(frames[0].spatial_id, 0);
+ EXPECT_EQ(frames[1].spatial_id, 0);
+ EXPECT_EQ(frames[2].spatial_id, 0);
+
+ EXPECT_EQ(frames[0].temporal_id, 0);
+ EXPECT_EQ(frames[1].temporal_id, 0);
+ EXPECT_EQ(frames[2].temporal_id, 0);
+
+ EXPECT_TRUE(wrapper.FrameReferencesAreValid(frames));
+}
+
+// S1T1 3|
+// / |
+// S1T0 1 |
+// | |
+// S0T1 | |
+// | |
+// S0T0 0-2+4-5-6
+// Time-> 0 1 2 3 4
+TEST(ScalabilityStructureL2T2KeyShiftTest, EnableOnlyS0T0AfterFewFrames) {
+ ScalabilityStructureL2T2KeyShift structure;
+ ScalabilityStructureWrapper wrapper(structure);
+ std::vector<GenericFrameInfo> frames;
+
+ wrapper.GenerateFrames(/*num_temporal_units=*/2, frames);
+ EXPECT_THAT(frames, SizeIs(4));
+ structure.OnRatesUpdated(EnableTemporalLayers(/*s0=*/1, /*s1=*/0));
+ wrapper.GenerateFrames(/*num_temporal_units=*/3, frames);
+ ASSERT_THAT(frames, SizeIs(7));
+
+ EXPECT_EQ(frames[4].spatial_id, 0);
+ EXPECT_EQ(frames[5].spatial_id, 0);
+ EXPECT_EQ(frames[6].spatial_id, 0);
+
+ EXPECT_EQ(frames[4].temporal_id, 0);
+ EXPECT_EQ(frames[5].temporal_id, 0);
+ EXPECT_EQ(frames[6].temporal_id, 0);
+
+ EXPECT_TRUE(wrapper.FrameReferencesAreValid(frames));
+}
+
+TEST(ScalabilityStructureL2T2KeyShiftTest, EnableOnlyS1T0FromTheStart) {
+ ScalabilityStructureL2T2KeyShift structure;
+ ScalabilityStructureWrapper wrapper(structure);
+ std::vector<GenericFrameInfo> frames;
+
+ structure.OnRatesUpdated(EnableTemporalLayers(/*s0=*/0, /*s1=*/1));
+ wrapper.GenerateFrames(/*num_temporal_units=*/3, frames);
+ ASSERT_THAT(frames, SizeIs(3));
+
+ EXPECT_EQ(frames[0].spatial_id, 1);
+ EXPECT_EQ(frames[1].spatial_id, 1);
+ EXPECT_EQ(frames[2].spatial_id, 1);
+
+ EXPECT_EQ(frames[0].temporal_id, 0);
+ EXPECT_EQ(frames[1].temporal_id, 0);
+ EXPECT_EQ(frames[2].temporal_id, 0);
+
+ EXPECT_TRUE(wrapper.FrameReferencesAreValid(frames));
+}
+
+// S1T1 3|
+// / |
+// S1T0 1--+4-5-6
+// | |
+// S0T1 | |
+// | |
+// S0T0 0-2|
+// Time-> 0 1 2 3 4
+TEST(ScalabilityStructureL2T2KeyShiftTest, EnableOnlyS1T0AfterFewFrames) {
+ ScalabilityStructureL2T2KeyShift structure;
+ ScalabilityStructureWrapper wrapper(structure);
+ std::vector<GenericFrameInfo> frames;
+
+ wrapper.GenerateFrames(/*num_temporal_units=*/2, frames);
+ EXPECT_THAT(frames, SizeIs(4));
+ structure.OnRatesUpdated(EnableTemporalLayers(/*s0=*/0, /*s1=*/1));
+ wrapper.GenerateFrames(/*num_temporal_units=*/3, frames);
+ ASSERT_THAT(frames, SizeIs(7));
+
+ EXPECT_EQ(frames[4].spatial_id, 1);
+ EXPECT_EQ(frames[5].spatial_id, 1);
+ EXPECT_EQ(frames[6].spatial_id, 1);
+
+ EXPECT_EQ(frames[4].temporal_id, 0);
+ EXPECT_EQ(frames[5].temporal_id, 0);
+ EXPECT_EQ(frames[6].temporal_id, 0);
+
+ EXPECT_TRUE(wrapper.FrameReferencesAreValid(frames));
+}
+
+} // namespace
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/svc/scalability_structure_simulcast.cc b/third_party/libwebrtc/modules/video_coding/svc/scalability_structure_simulcast.cc
new file mode 100644
index 0000000000..54e27fda5c
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/svc/scalability_structure_simulcast.cc
@@ -0,0 +1,353 @@
+/*
+ * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include "modules/video_coding/svc/scalability_structure_simulcast.h"
+
+#include <utility>
+#include <vector>
+
+#include "absl/base/macros.h"
+#include "api/transport/rtp/dependency_descriptor.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+namespace {
+
+DecodeTargetIndication
+Dti(int sid, int tid, const ScalableVideoController::LayerFrameConfig& config) {
+ if (sid != config.SpatialId() || tid < config.TemporalId()) {
+ return DecodeTargetIndication::kNotPresent;
+ }
+ if (tid == 0) {
+ RTC_DCHECK_EQ(config.TemporalId(), 0);
+ return DecodeTargetIndication::kSwitch;
+ }
+ if (tid == config.TemporalId()) {
+ return DecodeTargetIndication::kDiscardable;
+ }
+ RTC_DCHECK_GT(tid, config.TemporalId());
+ return DecodeTargetIndication::kSwitch;
+}
+
+} // namespace
+
+constexpr int ScalabilityStructureSimulcast::kMaxNumSpatialLayers;
+constexpr int ScalabilityStructureSimulcast::kMaxNumTemporalLayers;
+
+ScalabilityStructureSimulcast::ScalabilityStructureSimulcast(
+ int num_spatial_layers,
+ int num_temporal_layers,
+ ScalingFactor resolution_factor)
+ : num_spatial_layers_(num_spatial_layers),
+ num_temporal_layers_(num_temporal_layers),
+ resolution_factor_(resolution_factor),
+ active_decode_targets_(
+ (uint32_t{1} << (num_spatial_layers * num_temporal_layers)) - 1) {
+ RTC_DCHECK_LE(num_spatial_layers, kMaxNumSpatialLayers);
+ RTC_DCHECK_LE(num_temporal_layers, kMaxNumTemporalLayers);
+}
+
+ScalabilityStructureSimulcast::~ScalabilityStructureSimulcast() = default;
+
+ScalableVideoController::StreamLayersConfig
+ScalabilityStructureSimulcast::StreamConfig() const {
+ StreamLayersConfig result;
+ result.num_spatial_layers = num_spatial_layers_;
+ result.num_temporal_layers = num_temporal_layers_;
+ result.scaling_factor_num[num_spatial_layers_ - 1] = 1;
+ result.scaling_factor_den[num_spatial_layers_ - 1] = 1;
+ for (int sid = num_spatial_layers_ - 1; sid > 0; --sid) {
+ result.scaling_factor_num[sid - 1] =
+ resolution_factor_.num * result.scaling_factor_num[sid];
+ result.scaling_factor_den[sid - 1] =
+ resolution_factor_.den * result.scaling_factor_den[sid];
+ }
+ result.uses_reference_scaling = false;
+ return result;
+}
+
+bool ScalabilityStructureSimulcast::TemporalLayerIsActive(int tid) const {
+ if (tid >= num_temporal_layers_) {
+ return false;
+ }
+ for (int sid = 0; sid < num_spatial_layers_; ++sid) {
+ if (DecodeTargetIsActive(sid, tid)) {
+ return true;
+ }
+ }
+ return false;
+}
+
+ScalabilityStructureSimulcast::FramePattern
+ScalabilityStructureSimulcast::NextPattern() const {
+ switch (last_pattern_) {
+ case kNone:
+ case kDeltaT2B:
+ return kDeltaT0;
+ case kDeltaT2A:
+ if (TemporalLayerIsActive(1)) {
+ return kDeltaT1;
+ }
+ return kDeltaT0;
+ case kDeltaT1:
+ if (TemporalLayerIsActive(2)) {
+ return kDeltaT2B;
+ }
+ return kDeltaT0;
+ case kDeltaT0:
+ if (TemporalLayerIsActive(2)) {
+ return kDeltaT2A;
+ }
+ if (TemporalLayerIsActive(1)) {
+ return kDeltaT1;
+ }
+ return kDeltaT0;
+ }
+ RTC_DCHECK_NOTREACHED();
+ return kDeltaT0;
+}
+
+std::vector<ScalableVideoController::LayerFrameConfig>
+ScalabilityStructureSimulcast::NextFrameConfig(bool restart) {
+ std::vector<LayerFrameConfig> configs;
+ if (active_decode_targets_.none()) {
+ last_pattern_ = kNone;
+ return configs;
+ }
+ configs.reserve(num_spatial_layers_);
+
+ if (last_pattern_ == kNone || restart) {
+ can_reference_t0_frame_for_spatial_id_.reset();
+ last_pattern_ = kNone;
+ }
+ FramePattern current_pattern = NextPattern();
+
+ switch (current_pattern) {
+ case kDeltaT0:
+ // Disallow temporal references cross T0 on higher temporal layers.
+ can_reference_t1_frame_for_spatial_id_.reset();
+ for (int sid = 0; sid < num_spatial_layers_; ++sid) {
+ if (!DecodeTargetIsActive(sid, /*tid=*/0)) {
+ // Next frame from the spatial layer `sid` shouldn't depend on
+ // potentially old previous frame from the spatial layer `sid`.
+ can_reference_t0_frame_for_spatial_id_.reset(sid);
+ continue;
+ }
+ configs.emplace_back();
+ ScalableVideoController::LayerFrameConfig& config = configs.back();
+ config.Id(current_pattern).S(sid).T(0);
+
+ if (can_reference_t0_frame_for_spatial_id_[sid]) {
+ config.ReferenceAndUpdate(BufferIndex(sid, /*tid=*/0));
+ } else {
+ config.Keyframe().Update(BufferIndex(sid, /*tid=*/0));
+ }
+ can_reference_t0_frame_for_spatial_id_.set(sid);
+ }
+ break;
+ case kDeltaT1:
+ for (int sid = 0; sid < num_spatial_layers_; ++sid) {
+ if (!DecodeTargetIsActive(sid, /*tid=*/1) ||
+ !can_reference_t0_frame_for_spatial_id_[sid]) {
+ continue;
+ }
+ configs.emplace_back();
+ ScalableVideoController::LayerFrameConfig& config = configs.back();
+ config.Id(current_pattern)
+ .S(sid)
+ .T(1)
+ .Reference(BufferIndex(sid, /*tid=*/0));
+ // Save frame only if there is a higher temporal layer that may need it.
+ if (num_temporal_layers_ > 2) {
+ config.Update(BufferIndex(sid, /*tid=*/1));
+ }
+ }
+ break;
+ case kDeltaT2A:
+ case kDeltaT2B:
+ for (int sid = 0; sid < num_spatial_layers_; ++sid) {
+ if (!DecodeTargetIsActive(sid, /*tid=*/2) ||
+ !can_reference_t0_frame_for_spatial_id_[sid]) {
+ continue;
+ }
+ configs.emplace_back();
+ ScalableVideoController::LayerFrameConfig& config = configs.back();
+ config.Id(current_pattern).S(sid).T(2);
+ if (can_reference_t1_frame_for_spatial_id_[sid]) {
+ config.Reference(BufferIndex(sid, /*tid=*/1));
+ } else {
+ config.Reference(BufferIndex(sid, /*tid=*/0));
+ }
+ }
+ break;
+ case kNone:
+ RTC_DCHECK_NOTREACHED();
+ break;
+ }
+
+ return configs;
+}
+
+GenericFrameInfo ScalabilityStructureSimulcast::OnEncodeDone(
+ const LayerFrameConfig& config) {
+ last_pattern_ = static_cast<FramePattern>(config.Id());
+ if (config.TemporalId() == 1) {
+ can_reference_t1_frame_for_spatial_id_.set(config.SpatialId());
+ }
+ GenericFrameInfo frame_info;
+ frame_info.spatial_id = config.SpatialId();
+ frame_info.temporal_id = config.TemporalId();
+ frame_info.encoder_buffers = config.Buffers();
+ frame_info.decode_target_indications.reserve(num_spatial_layers_ *
+ num_temporal_layers_);
+ for (int sid = 0; sid < num_spatial_layers_; ++sid) {
+ for (int tid = 0; tid < num_temporal_layers_; ++tid) {
+ frame_info.decode_target_indications.push_back(Dti(sid, tid, config));
+ }
+ }
+ frame_info.part_of_chain.assign(num_spatial_layers_, false);
+ if (config.TemporalId() == 0) {
+ frame_info.part_of_chain[config.SpatialId()] = true;
+ }
+ frame_info.active_decode_targets = active_decode_targets_;
+ return frame_info;
+}
+
+void ScalabilityStructureSimulcast::OnRatesUpdated(
+ const VideoBitrateAllocation& bitrates) {
+ for (int sid = 0; sid < num_spatial_layers_; ++sid) {
+ // Enable/disable spatial layers independetely.
+ bool active = true;
+ for (int tid = 0; tid < num_temporal_layers_; ++tid) {
+ // To enable temporal layer, require bitrates for lower temporal layers.
+ active = active && bitrates.GetBitrate(sid, tid) > 0;
+ SetDecodeTargetIsActive(sid, tid, active);
+ }
+ }
+}
+
+FrameDependencyStructure ScalabilityStructureS2T1::DependencyStructure() const {
+ FrameDependencyStructure structure;
+ structure.num_decode_targets = 2;
+ structure.num_chains = 2;
+ structure.decode_target_protected_by_chain = {0, 1};
+ structure.templates.resize(4);
+ structure.templates[0].S(0).Dtis("S-").ChainDiffs({2, 1}).FrameDiffs({2});
+ structure.templates[1].S(0).Dtis("S-").ChainDiffs({0, 0});
+ structure.templates[2].S(1).Dtis("-S").ChainDiffs({1, 2}).FrameDiffs({2});
+ structure.templates[3].S(1).Dtis("-S").ChainDiffs({1, 0});
+ return structure;
+}
+
+FrameDependencyStructure ScalabilityStructureS2T2::DependencyStructure() const {
+ FrameDependencyStructure structure;
+ structure.num_decode_targets = 4;
+ structure.num_chains = 2;
+ structure.decode_target_protected_by_chain = {0, 0, 1, 1};
+ auto& t = structure.templates;
+ t.resize(6);
+ t[1].S(0).T(0).Dtis("SS--").ChainDiffs({0, 0});
+ t[4].S(1).T(0).Dtis("--SS").ChainDiffs({1, 0});
+ t[2].S(0).T(1).Dtis("-D--").ChainDiffs({2, 1}).FrameDiffs({2});
+ t[5].S(1).T(1).Dtis("---D").ChainDiffs({3, 2}).FrameDiffs({2});
+ t[0].S(0).T(0).Dtis("SS--").ChainDiffs({4, 3}).FrameDiffs({4});
+ t[3].S(1).T(0).Dtis("--SS").ChainDiffs({1, 4}).FrameDiffs({4});
+ return structure;
+}
+
+FrameDependencyStructure ScalabilityStructureS2T3::DependencyStructure() const {
+ FrameDependencyStructure structure;
+ structure.num_decode_targets = 6;
+ structure.num_chains = 2;
+ structure.decode_target_protected_by_chain = {0, 0, 0, 1, 1, 1};
+ auto& t = structure.templates;
+ t.resize(10);
+ t[1].S(0).T(0).Dtis("SSS---").ChainDiffs({0, 0});
+ t[6].S(1).T(0).Dtis("---SSS").ChainDiffs({1, 0});
+ t[3].S(0).T(2).Dtis("--D---").ChainDiffs({2, 1}).FrameDiffs({2});
+ t[8].S(1).T(2).Dtis("-----D").ChainDiffs({3, 2}).FrameDiffs({2});
+ t[2].S(0).T(1).Dtis("-DS---").ChainDiffs({4, 3}).FrameDiffs({4});
+ t[7].S(1).T(1).Dtis("----DS").ChainDiffs({5, 4}).FrameDiffs({4});
+ t[4].S(0).T(2).Dtis("--D---").ChainDiffs({6, 5}).FrameDiffs({2});
+ t[9].S(1).T(2).Dtis("-----D").ChainDiffs({7, 6}).FrameDiffs({2});
+ t[0].S(0).T(0).Dtis("SSS---").ChainDiffs({8, 7}).FrameDiffs({8});
+ t[5].S(1).T(0).Dtis("---SSS").ChainDiffs({1, 8}).FrameDiffs({8});
+ return structure;
+}
+
+FrameDependencyStructure ScalabilityStructureS3T1::DependencyStructure() const {
+ FrameDependencyStructure structure;
+ structure.num_decode_targets = 3;
+ structure.num_chains = 3;
+ structure.decode_target_protected_by_chain = {0, 1, 2};
+ auto& t = structure.templates;
+ t.resize(6);
+ t[1].S(0).T(0).Dtis("S--").ChainDiffs({0, 0, 0});
+ t[3].S(1).T(0).Dtis("-S-").ChainDiffs({1, 0, 0});
+ t[5].S(2).T(0).Dtis("--S").ChainDiffs({2, 1, 0});
+ t[0].S(0).T(0).Dtis("S--").ChainDiffs({3, 2, 1}).FrameDiffs({3});
+ t[2].S(1).T(0).Dtis("-S-").ChainDiffs({1, 3, 2}).FrameDiffs({3});
+ t[4].S(2).T(0).Dtis("--S").ChainDiffs({2, 1, 3}).FrameDiffs({3});
+ return structure;
+}
+
+FrameDependencyStructure ScalabilityStructureS3T2::DependencyStructure() const {
+ FrameDependencyStructure structure;
+ structure.num_decode_targets = 6;
+ structure.num_chains = 3;
+ structure.decode_target_protected_by_chain = {0, 0, 1, 1, 2, 2};
+ auto& t = structure.templates;
+ t.resize(9);
+ // Templates are shown in the order frames following them appear in the
+ // stream, but in `structure.templates` array templates are sorted by
+ // (`spatial_id`, `temporal_id`) since that is a dependency descriptor
+ // requirement.
+ t[1].S(0).T(0).Dtis("SS----").ChainDiffs({0, 0, 0});
+ t[4].S(1).T(0).Dtis("--SS--").ChainDiffs({1, 0, 0});
+ t[7].S(2).T(0).Dtis("----SS").ChainDiffs({2, 1, 0});
+ t[2].S(0).T(1).Dtis("-D----").ChainDiffs({3, 2, 1}).FrameDiffs({3});
+ t[5].S(1).T(1).Dtis("---D--").ChainDiffs({4, 3, 2}).FrameDiffs({3});
+ t[8].S(2).T(1).Dtis("-----D").ChainDiffs({5, 4, 3}).FrameDiffs({3});
+ t[0].S(0).T(0).Dtis("SS----").ChainDiffs({6, 5, 4}).FrameDiffs({6});
+ t[3].S(1).T(0).Dtis("--SS--").ChainDiffs({1, 6, 5}).FrameDiffs({6});
+ t[6].S(2).T(0).Dtis("----SS").ChainDiffs({2, 1, 6}).FrameDiffs({6});
+ return structure;
+}
+
+FrameDependencyStructure ScalabilityStructureS3T3::DependencyStructure() const {
+ FrameDependencyStructure structure;
+ structure.num_decode_targets = 9;
+ structure.num_chains = 3;
+ structure.decode_target_protected_by_chain = {0, 0, 0, 1, 1, 1, 2, 2, 2};
+ auto& t = structure.templates;
+ t.resize(15);
+ // Templates are shown in the order frames following them appear in the
+ // stream, but in `structure.templates` array templates are sorted by
+ // (`spatial_id`, `temporal_id`) since that is a dependency descriptor
+ // requirement. Indexes are written in hex for nicer alignment.
+ t[0x1].S(0).T(0).Dtis("SSS------").ChainDiffs({0, 0, 0});
+ t[0x6].S(1).T(0).Dtis("---SSS---").ChainDiffs({1, 0, 0});
+ t[0xB].S(2).T(0).Dtis("------SSS").ChainDiffs({2, 1, 0});
+ t[0x3].S(0).T(2).Dtis("--D------").ChainDiffs({3, 2, 1}).FrameDiffs({3});
+ t[0x8].S(1).T(2).Dtis("-----D---").ChainDiffs({4, 3, 2}).FrameDiffs({3});
+ t[0xD].S(2).T(2).Dtis("--------D").ChainDiffs({5, 4, 3}).FrameDiffs({3});
+ t[0x2].S(0).T(1).Dtis("-DS------").ChainDiffs({6, 5, 4}).FrameDiffs({6});
+ t[0x7].S(1).T(1).Dtis("----DS---").ChainDiffs({7, 6, 5}).FrameDiffs({6});
+ t[0xC].S(2).T(1).Dtis("-------DS").ChainDiffs({8, 7, 6}).FrameDiffs({6});
+ t[0x4].S(0).T(2).Dtis("--D------").ChainDiffs({9, 8, 7}).FrameDiffs({3});
+ t[0x9].S(1).T(2).Dtis("-----D---").ChainDiffs({10, 9, 8}).FrameDiffs({3});
+ t[0xE].S(2).T(2).Dtis("--------D").ChainDiffs({11, 10, 9}).FrameDiffs({3});
+ t[0x0].S(0).T(0).Dtis("SSS------").ChainDiffs({12, 11, 10}).FrameDiffs({12});
+ t[0x5].S(1).T(0).Dtis("---SSS---").ChainDiffs({1, 12, 11}).FrameDiffs({12});
+ t[0xA].S(2).T(0).Dtis("------SSS").ChainDiffs({2, 1, 12}).FrameDiffs({12});
+ return structure;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/svc/scalability_structure_simulcast.h b/third_party/libwebrtc/modules/video_coding/svc/scalability_structure_simulcast.h
new file mode 100644
index 0000000000..99be9f0d58
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/svc/scalability_structure_simulcast.h
@@ -0,0 +1,145 @@
+/*
+ * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#ifndef MODULES_VIDEO_CODING_SVC_SCALABILITY_STRUCTURE_SIMULCAST_H_
+#define MODULES_VIDEO_CODING_SVC_SCALABILITY_STRUCTURE_SIMULCAST_H_
+
+#include <vector>
+
+#include "api/transport/rtp/dependency_descriptor.h"
+#include "api/video/video_bitrate_allocation.h"
+#include "common_video/generic_frame_descriptor/generic_frame_info.h"
+#include "modules/video_coding/svc/scalable_video_controller.h"
+
+namespace webrtc {
+
+// Scalability structure with multiple independent spatial layers each with the
+// same temporal layering.
+class ScalabilityStructureSimulcast : public ScalableVideoController {
+ public:
+ struct ScalingFactor {
+ int num = 1;
+ int den = 2;
+ };
+ ScalabilityStructureSimulcast(int num_spatial_layers,
+ int num_temporal_layers,
+ ScalingFactor resolution_factor);
+ ~ScalabilityStructureSimulcast() override;
+
+ StreamLayersConfig StreamConfig() const override;
+ std::vector<LayerFrameConfig> NextFrameConfig(bool restart) override;
+ GenericFrameInfo OnEncodeDone(const LayerFrameConfig& config) override;
+ void OnRatesUpdated(const VideoBitrateAllocation& bitrates) override;
+
+ private:
+ enum FramePattern {
+ kNone,
+ kDeltaT2A,
+ kDeltaT1,
+ kDeltaT2B,
+ kDeltaT0,
+ };
+ static constexpr int kMaxNumSpatialLayers = 3;
+ static constexpr int kMaxNumTemporalLayers = 3;
+
+ // Index of the buffer to store last frame for layer (`sid`, `tid`)
+ int BufferIndex(int sid, int tid) const {
+ return tid * num_spatial_layers_ + sid;
+ }
+ bool DecodeTargetIsActive(int sid, int tid) const {
+ return active_decode_targets_[sid * num_temporal_layers_ + tid];
+ }
+ void SetDecodeTargetIsActive(int sid, int tid, bool value) {
+ active_decode_targets_.set(sid * num_temporal_layers_ + tid, value);
+ }
+ FramePattern NextPattern() const;
+ bool TemporalLayerIsActive(int tid) const;
+
+ const int num_spatial_layers_;
+ const int num_temporal_layers_;
+ const ScalingFactor resolution_factor_;
+
+ FramePattern last_pattern_ = kNone;
+ std::bitset<kMaxNumSpatialLayers> can_reference_t0_frame_for_spatial_id_ = 0;
+ std::bitset<kMaxNumSpatialLayers> can_reference_t1_frame_for_spatial_id_ = 0;
+ std::bitset<32> active_decode_targets_;
+};
+
+// S1 0--0--0-
+// ...
+// S0 0--0--0-
+class ScalabilityStructureS2T1 : public ScalabilityStructureSimulcast {
+ public:
+ explicit ScalabilityStructureS2T1(ScalingFactor resolution_factor = {})
+ : ScalabilityStructureSimulcast(2, 1, resolution_factor) {}
+ ~ScalabilityStructureS2T1() override = default;
+
+ FrameDependencyStructure DependencyStructure() const override;
+};
+
+class ScalabilityStructureS2T2 : public ScalabilityStructureSimulcast {
+ public:
+ explicit ScalabilityStructureS2T2(ScalingFactor resolution_factor = {})
+ : ScalabilityStructureSimulcast(2, 2, resolution_factor) {}
+ ~ScalabilityStructureS2T2() override = default;
+
+ FrameDependencyStructure DependencyStructure() const override;
+};
+
+// S1T2 3 7
+// | /
+// S1T1 / 5
+// |_/
+// S1T0 1-------9...
+//
+// S0T2 2 6
+// | /
+// S0T1 / 4
+// |_/
+// S0T0 0-------8...
+// Time-> 0 1 2 3 4
+class ScalabilityStructureS2T3 : public ScalabilityStructureSimulcast {
+ public:
+ explicit ScalabilityStructureS2T3(ScalingFactor resolution_factor = {})
+ : ScalabilityStructureSimulcast(2, 3, resolution_factor) {}
+ ~ScalabilityStructureS2T3() override = default;
+
+ FrameDependencyStructure DependencyStructure() const override;
+};
+
+class ScalabilityStructureS3T1 : public ScalabilityStructureSimulcast {
+ public:
+ explicit ScalabilityStructureS3T1(ScalingFactor resolution_factor = {})
+ : ScalabilityStructureSimulcast(3, 1, resolution_factor) {}
+ ~ScalabilityStructureS3T1() override = default;
+
+ FrameDependencyStructure DependencyStructure() const override;
+};
+
+class ScalabilityStructureS3T2 : public ScalabilityStructureSimulcast {
+ public:
+ explicit ScalabilityStructureS3T2(ScalingFactor resolution_factor = {})
+ : ScalabilityStructureSimulcast(3, 2, resolution_factor) {}
+ ~ScalabilityStructureS3T2() override = default;
+
+ FrameDependencyStructure DependencyStructure() const override;
+};
+
+class ScalabilityStructureS3T3 : public ScalabilityStructureSimulcast {
+ public:
+ explicit ScalabilityStructureS3T3(ScalingFactor resolution_factor = {})
+ : ScalabilityStructureSimulcast(3, 3, resolution_factor) {}
+ ~ScalabilityStructureS3T3() override = default;
+
+ FrameDependencyStructure DependencyStructure() const override;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_SVC_SCALABILITY_STRUCTURE_SIMULCAST_H_
diff --git a/third_party/libwebrtc/modules/video_coding/svc/scalability_structure_test_helpers.cc b/third_party/libwebrtc/modules/video_coding/svc/scalability_structure_test_helpers.cc
new file mode 100644
index 0000000000..aeb4d88f1a
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/svc/scalability_structure_test_helpers.cc
@@ -0,0 +1,101 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include "modules/video_coding/svc/scalability_structure_test_helpers.h"
+
+#include <stdint.h>
+
+#include <utility>
+#include <vector>
+
+#include "api/array_view.h"
+#include "api/transport/rtp/dependency_descriptor.h"
+#include "api/video/video_bitrate_allocation.h"
+#include "modules/video_coding/chain_diff_calculator.h"
+#include "modules/video_coding/frame_dependencies_calculator.h"
+#include "modules/video_coding/svc/scalable_video_controller.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+VideoBitrateAllocation EnableTemporalLayers(int s0, int s1, int s2) {
+ VideoBitrateAllocation bitrate;
+ for (int tid = 0; tid < s0; ++tid) {
+ bitrate.SetBitrate(0, tid, 1'000'000);
+ }
+ for (int tid = 0; tid < s1; ++tid) {
+ bitrate.SetBitrate(1, tid, 1'000'000);
+ }
+ for (int tid = 0; tid < s2; ++tid) {
+ bitrate.SetBitrate(2, tid, 1'000'000);
+ }
+ return bitrate;
+}
+
+void ScalabilityStructureWrapper::GenerateFrames(
+ int num_temporal_units,
+ std::vector<GenericFrameInfo>& frames) {
+ for (int i = 0; i < num_temporal_units; ++i) {
+ for (auto& layer_frame :
+ structure_controller_.NextFrameConfig(/*restart=*/false)) {
+ int64_t frame_id = ++frame_id_;
+ bool is_keyframe = layer_frame.IsKeyframe();
+
+ GenericFrameInfo frame_info =
+ structure_controller_.OnEncodeDone(layer_frame);
+ if (is_keyframe) {
+ chain_diff_calculator_.Reset(frame_info.part_of_chain);
+ }
+ frame_info.chain_diffs =
+ chain_diff_calculator_.From(frame_id, frame_info.part_of_chain);
+ for (int64_t base_frame_id : frame_deps_calculator_.FromBuffersUsage(
+ frame_id, frame_info.encoder_buffers)) {
+ frame_info.frame_diffs.push_back(frame_id - base_frame_id);
+ }
+
+ frames.push_back(std::move(frame_info));
+ }
+ }
+}
+
+bool ScalabilityStructureWrapper::FrameReferencesAreValid(
+ rtc::ArrayView<const GenericFrameInfo> frames) const {
+ bool valid = true;
+ // VP9 and AV1 supports up to 8 buffers. Expect no more buffers are not used.
+ std::bitset<8> buffer_contains_frame;
+ for (size_t i = 0; i < frames.size(); ++i) {
+ const GenericFrameInfo& frame = frames[i];
+ for (const CodecBufferUsage& buffer_usage : frame.encoder_buffers) {
+ if (buffer_usage.id < 0 || buffer_usage.id >= 8) {
+ ADD_FAILURE() << "Invalid buffer id " << buffer_usage.id
+ << " for frame#" << i
+ << ". Up to 8 buffers are supported.";
+ valid = false;
+ continue;
+ }
+ if (buffer_usage.referenced && !buffer_contains_frame[buffer_usage.id]) {
+ ADD_FAILURE() << "buffer " << buffer_usage.id << " for frame#" << i
+ << " was reference before updated.";
+ valid = false;
+ }
+ if (buffer_usage.updated) {
+ buffer_contains_frame.set(buffer_usage.id);
+ }
+ }
+ for (int fdiff : frame.frame_diffs) {
+ if (fdiff <= 0 || static_cast<size_t>(fdiff) > i) {
+ ADD_FAILURE() << "Invalid frame diff " << fdiff << " for frame#" << i;
+ valid = false;
+ }
+ }
+ }
+ return valid;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/svc/scalability_structure_test_helpers.h b/third_party/libwebrtc/modules/video_coding/svc/scalability_structure_test_helpers.h
new file mode 100644
index 0000000000..d183be4766
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/svc/scalability_structure_test_helpers.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#ifndef MODULES_VIDEO_CODING_SVC_SCALABILITY_STRUCTURE_TEST_HELPERS_H_
+#define MODULES_VIDEO_CODING_SVC_SCALABILITY_STRUCTURE_TEST_HELPERS_H_
+
+#include <stdint.h>
+
+#include <vector>
+
+#include "api/array_view.h"
+#include "api/transport/rtp/dependency_descriptor.h"
+#include "api/video/video_bitrate_allocation.h"
+#include "common_video/generic_frame_descriptor/generic_frame_info.h"
+#include "modules/video_coding/chain_diff_calculator.h"
+#include "modules/video_coding/frame_dependencies_calculator.h"
+#include "modules/video_coding/svc/scalable_video_controller.h"
+
+namespace webrtc {
+
+// Creates bitrate allocation with non-zero bitrate for given number of temporal
+// layers for each spatial layer.
+VideoBitrateAllocation EnableTemporalLayers(int s0, int s1 = 0, int s2 = 0);
+
+class ScalabilityStructureWrapper {
+ public:
+ explicit ScalabilityStructureWrapper(ScalableVideoController& structure)
+ : structure_controller_(structure) {}
+
+ std::vector<GenericFrameInfo> GenerateFrames(int num_temporal_units) {
+ std::vector<GenericFrameInfo> frames;
+ GenerateFrames(num_temporal_units, frames);
+ return frames;
+ }
+ void GenerateFrames(int num_temporal_units,
+ std::vector<GenericFrameInfo>& frames);
+
+ // Returns false and ADD_FAILUREs for frames with invalid references.
+ // In particular validates no frame frame reference to frame before frames[0].
+ // In error messages frames are indexed starting with 0.
+ bool FrameReferencesAreValid(
+ rtc::ArrayView<const GenericFrameInfo> frames) const;
+
+ private:
+ ScalableVideoController& structure_controller_;
+ FrameDependenciesCalculator frame_deps_calculator_;
+ ChainDiffCalculator chain_diff_calculator_;
+ int64_t frame_id_ = 0;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_SVC_SCALABILITY_STRUCTURE_TEST_HELPERS_H_
diff --git a/third_party/libwebrtc/modules/video_coding/svc/scalability_structure_unittest.cc b/third_party/libwebrtc/modules/video_coding/svc/scalability_structure_unittest.cc
new file mode 100644
index 0000000000..2d517c5825
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/svc/scalability_structure_unittest.cc
@@ -0,0 +1,395 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <memory>
+#include <ostream>
+#include <string>
+
+#include "absl/types/optional.h"
+#include "api/array_view.h"
+#include "api/transport/rtp/dependency_descriptor.h"
+#include "modules/video_coding/svc/create_scalability_structure.h"
+#include "modules/video_coding/svc/scalability_mode_util.h"
+#include "modules/video_coding/svc/scalability_structure_test_helpers.h"
+#include "modules/video_coding/svc/scalable_video_controller.h"
+#include "rtc_base/strings/string_builder.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+
+using ::testing::AllOf;
+using ::testing::Contains;
+using ::testing::Each;
+using ::testing::ElementsAreArray;
+using ::testing::Field;
+using ::testing::Ge;
+using ::testing::IsEmpty;
+using ::testing::Le;
+using ::testing::Lt;
+using ::testing::Not;
+using ::testing::NotNull;
+using ::testing::SizeIs;
+using ::testing::TestWithParam;
+using ::testing::Values;
+
+std::string FrameDependencyTemplateToString(const FrameDependencyTemplate& t) {
+ rtc::StringBuilder sb;
+ sb << "S" << t.spatial_id << "T" << t.temporal_id;
+ sb << ": dtis = ";
+ for (const auto dtis : t.decode_target_indications) {
+ switch (dtis) {
+ case DecodeTargetIndication::kNotPresent:
+ sb << "-";
+ break;
+ case DecodeTargetIndication::kDiscardable:
+ sb << "D";
+ break;
+ case DecodeTargetIndication::kSwitch:
+ sb << "S";
+ break;
+ case DecodeTargetIndication::kRequired:
+ sb << "R";
+ break;
+ default:
+ sb << "?";
+ break;
+ }
+ }
+ sb << ", frame diffs = { ";
+ for (int d : t.frame_diffs) {
+ sb << d << ", ";
+ }
+ sb << "}, chain diffs = { ";
+ for (int d : t.chain_diffs) {
+ sb << d << ", ";
+ }
+ sb << "}";
+ return sb.Release();
+}
+
+struct SvcTestParam {
+ friend std::ostream& operator<<(std::ostream& os, const SvcTestParam& param) {
+ return os << param.name;
+ }
+
+ ScalabilityMode GetScalabilityMode() const {
+ absl::optional<ScalabilityMode> scalability_mode =
+ ScalabilityModeFromString(name);
+ RTC_CHECK(scalability_mode.has_value());
+ return *scalability_mode;
+ }
+
+ std::string name;
+ int num_temporal_units;
+};
+
+class ScalabilityStructureTest : public TestWithParam<SvcTestParam> {};
+
+TEST_P(ScalabilityStructureTest,
+ StaticConfigMatchesConfigReturnedByController) {
+ std::unique_ptr<ScalableVideoController> controller =
+ CreateScalabilityStructure(GetParam().GetScalabilityMode());
+ absl::optional<ScalableVideoController::StreamLayersConfig> static_config =
+ ScalabilityStructureConfig(GetParam().GetScalabilityMode());
+ ASSERT_THAT(controller, NotNull());
+ ASSERT_NE(static_config, absl::nullopt);
+ ScalableVideoController::StreamLayersConfig config =
+ controller->StreamConfig();
+ EXPECT_EQ(config.num_spatial_layers, static_config->num_spatial_layers);
+ EXPECT_EQ(config.num_temporal_layers, static_config->num_temporal_layers);
+ EXPECT_THAT(
+ rtc::MakeArrayView(config.scaling_factor_num, config.num_spatial_layers),
+ ElementsAreArray(static_config->scaling_factor_num,
+ static_config->num_spatial_layers));
+ EXPECT_THAT(
+ rtc::MakeArrayView(config.scaling_factor_den, config.num_spatial_layers),
+ ElementsAreArray(static_config->scaling_factor_den,
+ static_config->num_spatial_layers));
+}
+
+TEST_P(ScalabilityStructureTest,
+ NumberOfDecodeTargetsAndChainsAreInRangeAndConsistent) {
+ FrameDependencyStructure structure =
+ CreateScalabilityStructure(GetParam().GetScalabilityMode())
+ ->DependencyStructure();
+ EXPECT_GT(structure.num_decode_targets, 0);
+ EXPECT_LE(structure.num_decode_targets,
+ DependencyDescriptor::kMaxDecodeTargets);
+ EXPECT_GE(structure.num_chains, 0);
+ EXPECT_LE(structure.num_chains, structure.num_decode_targets);
+ if (structure.num_chains == 0) {
+ EXPECT_THAT(structure.decode_target_protected_by_chain, IsEmpty());
+ } else {
+ EXPECT_THAT(structure.decode_target_protected_by_chain,
+ AllOf(SizeIs(structure.num_decode_targets), Each(Ge(0)),
+ Each(Lt(structure.num_chains))));
+ }
+ EXPECT_THAT(structure.templates,
+ SizeIs(Lt(size_t{DependencyDescriptor::kMaxTemplates})));
+}
+
+TEST_P(ScalabilityStructureTest, TemplatesAreSortedByLayerId) {
+ FrameDependencyStructure structure =
+ CreateScalabilityStructure(GetParam().GetScalabilityMode())
+ ->DependencyStructure();
+ ASSERT_THAT(structure.templates, Not(IsEmpty()));
+ const auto& first_templates = structure.templates.front();
+ EXPECT_EQ(first_templates.spatial_id, 0);
+ EXPECT_EQ(first_templates.temporal_id, 0);
+ for (size_t i = 1; i < structure.templates.size(); ++i) {
+ const auto& prev_template = structure.templates[i - 1];
+ const auto& next_template = structure.templates[i];
+ if (next_template.spatial_id == prev_template.spatial_id &&
+ next_template.temporal_id == prev_template.temporal_id) {
+ // Same layer, next_layer_idc == 0
+ } else if (next_template.spatial_id == prev_template.spatial_id &&
+ next_template.temporal_id == prev_template.temporal_id + 1) {
+ // Next temporal layer, next_layer_idc == 1
+ } else if (next_template.spatial_id == prev_template.spatial_id + 1 &&
+ next_template.temporal_id == 0) {
+ // Next spatial layer, next_layer_idc == 2
+ } else {
+ // everything else is invalid.
+ ADD_FAILURE() << "Invalid templates order. Template #" << i
+ << " with layer (" << next_template.spatial_id << ","
+ << next_template.temporal_id
+ << ") follows template with layer ("
+ << prev_template.spatial_id << ","
+ << prev_template.temporal_id << ").";
+ }
+ }
+}
+
+TEST_P(ScalabilityStructureTest, TemplatesMatchNumberOfDecodeTargetsAndChains) {
+ FrameDependencyStructure structure =
+ CreateScalabilityStructure(GetParam().GetScalabilityMode())
+ ->DependencyStructure();
+ EXPECT_THAT(
+ structure.templates,
+ Each(AllOf(Field(&FrameDependencyTemplate::decode_target_indications,
+ SizeIs(structure.num_decode_targets)),
+ Field(&FrameDependencyTemplate::chain_diffs,
+ SizeIs(structure.num_chains)))));
+}
+
+TEST_P(ScalabilityStructureTest, FrameInfoMatchesFrameDependencyStructure) {
+ std::unique_ptr<ScalableVideoController> svc_controller =
+ CreateScalabilityStructure(GetParam().GetScalabilityMode());
+ FrameDependencyStructure structure = svc_controller->DependencyStructure();
+ std::vector<GenericFrameInfo> frame_infos =
+ ScalabilityStructureWrapper(*svc_controller)
+ .GenerateFrames(GetParam().num_temporal_units);
+ for (size_t frame_id = 0; frame_id < frame_infos.size(); ++frame_id) {
+ const auto& frame = frame_infos[frame_id];
+ EXPECT_GE(frame.spatial_id, 0) << " for frame " << frame_id;
+ EXPECT_GE(frame.temporal_id, 0) << " for frame " << frame_id;
+ EXPECT_THAT(frame.decode_target_indications,
+ SizeIs(structure.num_decode_targets))
+ << " for frame " << frame_id;
+ EXPECT_THAT(frame.part_of_chain, SizeIs(structure.num_chains))
+ << " for frame " << frame_id;
+ }
+}
+
+TEST_P(ScalabilityStructureTest, ThereIsAPerfectTemplateForEachFrame) {
+ std::unique_ptr<ScalableVideoController> svc_controller =
+ CreateScalabilityStructure(GetParam().GetScalabilityMode());
+ FrameDependencyStructure structure = svc_controller->DependencyStructure();
+ std::vector<GenericFrameInfo> frame_infos =
+ ScalabilityStructureWrapper(*svc_controller)
+ .GenerateFrames(GetParam().num_temporal_units);
+ for (size_t frame_id = 0; frame_id < frame_infos.size(); ++frame_id) {
+ EXPECT_THAT(structure.templates, Contains(frame_infos[frame_id]))
+ << " for frame " << frame_id << ", Expected "
+ << FrameDependencyTemplateToString(frame_infos[frame_id]);
+ }
+}
+
+TEST_P(ScalabilityStructureTest, FrameDependsOnSameOrLowerLayer) {
+ std::unique_ptr<ScalableVideoController> svc_controller =
+ CreateScalabilityStructure(GetParam().GetScalabilityMode());
+ std::vector<GenericFrameInfo> frame_infos =
+ ScalabilityStructureWrapper(*svc_controller)
+ .GenerateFrames(GetParam().num_temporal_units);
+ int64_t num_frames = frame_infos.size();
+
+ for (int64_t frame_id = 0; frame_id < num_frames; ++frame_id) {
+ const auto& frame = frame_infos[frame_id];
+ for (int frame_diff : frame.frame_diffs) {
+ int64_t base_frame_id = frame_id - frame_diff;
+ const auto& base_frame = frame_infos[base_frame_id];
+ EXPECT_GE(frame.spatial_id, base_frame.spatial_id)
+ << "Frame " << frame_id << " depends on frame " << base_frame_id;
+ EXPECT_GE(frame.temporal_id, base_frame.temporal_id)
+ << "Frame " << frame_id << " depends on frame " << base_frame_id;
+ }
+ }
+}
+
+TEST_P(ScalabilityStructureTest, NoFrameDependsOnDiscardableOrNotPresent) {
+ std::unique_ptr<ScalableVideoController> svc_controller =
+ CreateScalabilityStructure(GetParam().GetScalabilityMode());
+ std::vector<GenericFrameInfo> frame_infos =
+ ScalabilityStructureWrapper(*svc_controller)
+ .GenerateFrames(GetParam().num_temporal_units);
+ int64_t num_frames = frame_infos.size();
+ FrameDependencyStructure structure = svc_controller->DependencyStructure();
+
+ for (int dt = 0; dt < structure.num_decode_targets; ++dt) {
+ for (int64_t frame_id = 0; frame_id < num_frames; ++frame_id) {
+ const auto& frame = frame_infos[frame_id];
+ if (frame.decode_target_indications[dt] ==
+ DecodeTargetIndication::kNotPresent) {
+ continue;
+ }
+ for (int frame_diff : frame.frame_diffs) {
+ int64_t base_frame_id = frame_id - frame_diff;
+ const auto& base_frame = frame_infos[base_frame_id];
+ EXPECT_NE(base_frame.decode_target_indications[dt],
+ DecodeTargetIndication::kNotPresent)
+ << "Frame " << frame_id << " depends on frame " << base_frame_id
+ << " that is not part of decode target#" << dt;
+ EXPECT_NE(base_frame.decode_target_indications[dt],
+ DecodeTargetIndication::kDiscardable)
+ << "Frame " << frame_id << " depends on frame " << base_frame_id
+ << " that is discardable for decode target#" << dt;
+ }
+ }
+ }
+}
+
+TEST_P(ScalabilityStructureTest, NoFrameDependsThroughSwitchIndication) {
+ std::unique_ptr<ScalableVideoController> svc_controller =
+ CreateScalabilityStructure(GetParam().GetScalabilityMode());
+ FrameDependencyStructure structure = svc_controller->DependencyStructure();
+ std::vector<GenericFrameInfo> frame_infos =
+ ScalabilityStructureWrapper(*svc_controller)
+ .GenerateFrames(GetParam().num_temporal_units);
+ int64_t num_frames = frame_infos.size();
+ std::vector<std::set<int64_t>> full_deps(num_frames);
+
+ // For each frame calculate set of all frames it depends on, both directly and
+ // indirectly.
+ for (int64_t frame_id = 0; frame_id < num_frames; ++frame_id) {
+ std::set<int64_t> all_base_frames;
+ for (int frame_diff : frame_infos[frame_id].frame_diffs) {
+ int64_t base_frame_id = frame_id - frame_diff;
+ all_base_frames.insert(base_frame_id);
+ const auto& indirect = full_deps[base_frame_id];
+ all_base_frames.insert(indirect.begin(), indirect.end());
+ }
+ full_deps[frame_id] = std::move(all_base_frames);
+ }
+
+ // Now check the switch indication: frames after the switch indication mustn't
+ // depend on any addition frames before the switch indications.
+ for (int dt = 0; dt < structure.num_decode_targets; ++dt) {
+ for (int64_t switch_frame_id = 0; switch_frame_id < num_frames;
+ ++switch_frame_id) {
+ if (frame_infos[switch_frame_id].decode_target_indications[dt] !=
+ DecodeTargetIndication::kSwitch) {
+ continue;
+ }
+ for (int64_t later_frame_id = switch_frame_id + 1;
+ later_frame_id < num_frames; ++later_frame_id) {
+ if (frame_infos[later_frame_id].decode_target_indications[dt] ==
+ DecodeTargetIndication::kNotPresent) {
+ continue;
+ }
+ for (int frame_diff : frame_infos[later_frame_id].frame_diffs) {
+ int64_t early_frame_id = later_frame_id - frame_diff;
+ if (early_frame_id < switch_frame_id) {
+ EXPECT_THAT(full_deps[switch_frame_id], Contains(early_frame_id))
+ << "For decode target #" << dt << " frame " << later_frame_id
+ << " depends on the frame " << early_frame_id
+ << " that switch indication frame " << switch_frame_id
+ << " doesn't directly on indirectly depend on.";
+ }
+ }
+ }
+ }
+ }
+}
+
+TEST_P(ScalabilityStructureTest, ProduceNoFrameForDisabledLayers) {
+ std::unique_ptr<ScalableVideoController> svc_controller =
+ CreateScalabilityStructure(GetParam().GetScalabilityMode());
+ ScalableVideoController::StreamLayersConfig structure =
+ svc_controller->StreamConfig();
+
+ VideoBitrateAllocation all_bitrates;
+ for (int sid = 0; sid < structure.num_spatial_layers; ++sid) {
+ for (int tid = 0; tid < structure.num_temporal_layers; ++tid) {
+ all_bitrates.SetBitrate(sid, tid, 100'000);
+ }
+ }
+
+ svc_controller->OnRatesUpdated(all_bitrates);
+ ScalabilityStructureWrapper wrapper(*svc_controller);
+ std::vector<GenericFrameInfo> frames =
+ wrapper.GenerateFrames(GetParam().num_temporal_units);
+
+ for (int sid = 0; sid < structure.num_spatial_layers; ++sid) {
+ for (int tid = 0; tid < structure.num_temporal_layers; ++tid) {
+ // When all layers were enabled, expect there was a frame for each layer.
+ EXPECT_THAT(frames,
+ Contains(AllOf(Field(&GenericFrameInfo::spatial_id, sid),
+ Field(&GenericFrameInfo::temporal_id, tid))))
+ << "For layer (" << sid << "," << tid << ")";
+ // Restore bitrates for all layers before disabling single layer.
+ VideoBitrateAllocation bitrates = all_bitrates;
+ bitrates.SetBitrate(sid, tid, 0);
+ svc_controller->OnRatesUpdated(bitrates);
+ // With layer (sid, tid) disabled, expect no frames are produced for it.
+ EXPECT_THAT(
+ wrapper.GenerateFrames(GetParam().num_temporal_units),
+ Not(Contains(AllOf(Field(&GenericFrameInfo::spatial_id, sid),
+ Field(&GenericFrameInfo::temporal_id, tid)))))
+ << "For layer (" << sid << "," << tid << ")";
+ }
+ }
+}
+
+INSTANTIATE_TEST_SUITE_P(
+ Svc,
+ ScalabilityStructureTest,
+ Values(SvcTestParam{"L1T1", /*num_temporal_units=*/3},
+ SvcTestParam{"L1T2", /*num_temporal_units=*/4},
+ SvcTestParam{"L1T3", /*num_temporal_units=*/8},
+ SvcTestParam{"L2T1", /*num_temporal_units=*/3},
+ SvcTestParam{"L2T1_KEY", /*num_temporal_units=*/3},
+ SvcTestParam{"L3T1", /*num_temporal_units=*/3},
+ SvcTestParam{"L3T1_KEY", /*num_temporal_units=*/3},
+ SvcTestParam{"L3T3", /*num_temporal_units=*/8},
+ SvcTestParam{"S2T1", /*num_temporal_units=*/3},
+ SvcTestParam{"S2T2", /*num_temporal_units=*/4},
+ SvcTestParam{"S2T3", /*num_temporal_units=*/8},
+ SvcTestParam{"S3T1", /*num_temporal_units=*/3},
+ SvcTestParam{"S3T2", /*num_temporal_units=*/4},
+ SvcTestParam{"S3T3", /*num_temporal_units=*/8},
+ SvcTestParam{"L2T2", /*num_temporal_units=*/4},
+ SvcTestParam{"L2T2_KEY", /*num_temporal_units=*/4},
+ SvcTestParam{"L2T2_KEY_SHIFT", /*num_temporal_units=*/4},
+ SvcTestParam{"L2T3", /*num_temporal_units=*/8},
+ SvcTestParam{"L2T3_KEY", /*num_temporal_units=*/8},
+ SvcTestParam{"L3T2", /*num_temporal_units=*/4},
+ SvcTestParam{"L3T2_KEY", /*num_temporal_units=*/4},
+ SvcTestParam{"L3T3_KEY", /*num_temporal_units=*/8}),
+ [](const testing::TestParamInfo<SvcTestParam>& info) {
+ return info.param.name;
+ });
+
+} // namespace
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/svc/scalability_structures_gn/moz.build b/third_party/libwebrtc/modules/video_coding/svc/scalability_structures_gn/moz.build
new file mode 100644
index 0000000000..a3e45b1cd7
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/svc/scalability_structures_gn/moz.build
@@ -0,0 +1,232 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+DEFINES["WEBRTC_STRICT_FIELD_TRIALS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "!/third_party/libwebrtc/gen",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+SOURCES += [
+ "/third_party/libwebrtc/modules/video_coding/svc/scalability_structure_key_svc.cc",
+ "/third_party/libwebrtc/modules/video_coding/svc/scalability_structure_simulcast.cc"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/video_coding/svc/create_scalability_structure.cc",
+ "/third_party/libwebrtc/modules/video_coding/svc/scalability_structure_full_svc.cc",
+ "/third_party/libwebrtc/modules/video_coding/svc/scalability_structure_l2t2_key_shift.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "mips32":
+
+ DEFINES["MIPS32_LE"] = True
+ DEFINES["MIPS_FPU_LE"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "mips64":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("scalability_structures_gn")
diff --git a/third_party/libwebrtc/modules/video_coding/svc/scalable_video_controller.h b/third_party/libwebrtc/modules/video_coding/svc/scalable_video_controller.h
new file mode 100644
index 0000000000..c7362657ec
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/svc/scalable_video_controller.h
@@ -0,0 +1,139 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#ifndef MODULES_VIDEO_CODING_SVC_SCALABLE_VIDEO_CONTROLLER_H_
+#define MODULES_VIDEO_CODING_SVC_SCALABLE_VIDEO_CONTROLLER_H_
+
+#include <vector>
+
+#include "absl/container/inlined_vector.h"
+#include "api/transport/rtp/dependency_descriptor.h"
+#include "api/video/video_bitrate_allocation.h"
+#include "common_video/generic_frame_descriptor/generic_frame_info.h"
+
+namespace webrtc {
+
+// Controls how video should be encoded to be scalable. Outputs results as
+// buffer usage configuration for encoder and enough details to communicate the
+// scalability structure via dependency descriptor rtp header extension.
+class ScalableVideoController {
+ public:
+ struct StreamLayersConfig {
+ int num_spatial_layers = 1;
+ int num_temporal_layers = 1;
+ // Indicates if frames can reference frames of a different resolution.
+ bool uses_reference_scaling = true;
+ // Spatial layers scaling. Frames with spatial_id = i expected to be encoded
+ // with original_resolution * scaling_factor_num[i] / scaling_factor_den[i].
+ int scaling_factor_num[DependencyDescriptor::kMaxSpatialIds] = {1, 1, 1, 1};
+ int scaling_factor_den[DependencyDescriptor::kMaxSpatialIds] = {1, 1, 1, 1};
+ };
+ class LayerFrameConfig {
+ public:
+ // Builders/setters.
+ LayerFrameConfig& Id(int value);
+ LayerFrameConfig& Keyframe();
+ LayerFrameConfig& S(int value);
+ LayerFrameConfig& T(int value);
+ LayerFrameConfig& Reference(int buffer_id);
+ LayerFrameConfig& Update(int buffer_id);
+ LayerFrameConfig& ReferenceAndUpdate(int buffer_id);
+
+ // Getters.
+ int Id() const { return id_; }
+ bool IsKeyframe() const { return is_keyframe_; }
+ int SpatialId() const { return spatial_id_; }
+ int TemporalId() const { return temporal_id_; }
+ const absl::InlinedVector<CodecBufferUsage, kMaxEncoderBuffers>& Buffers()
+ const {
+ return buffers_;
+ }
+
+ private:
+ // Id to match configuration returned by NextFrameConfig with
+ // (possibly modified) configuration passed back via OnEncoderDone.
+ // The meaning of the id is an implementation detail of
+ // the ScalableVideoController.
+ int id_ = 0;
+
+ // Indication frame should be encoded as a key frame. In particular when
+ // `is_keyframe=true` property `CodecBufferUsage::referenced` should be
+ // ignored and treated as false.
+ bool is_keyframe_ = false;
+
+ int spatial_id_ = 0;
+ int temporal_id_ = 0;
+ // Describes how encoder which buffers encoder allowed to reference and
+ // which buffers encoder should update.
+ absl::InlinedVector<CodecBufferUsage, kMaxEncoderBuffers> buffers_;
+ };
+
+ virtual ~ScalableVideoController() = default;
+
+ // Returns video structure description for encoder to configure itself.
+ virtual StreamLayersConfig StreamConfig() const = 0;
+
+ // Returns video structure description in format compatible with
+ // dependency descriptor rtp header extension.
+ virtual FrameDependencyStructure DependencyStructure() const = 0;
+
+ // Notifies Controller with updated bitrates per layer. In particular notifies
+ // when certain layers should be disabled.
+ // Controller shouldn't produce LayerFrameConfig for disabled layers.
+ virtual void OnRatesUpdated(const VideoBitrateAllocation& bitrates) = 0;
+
+ // When `restart` is true, first `LayerFrameConfig` should have `is_keyframe`
+ // set to true.
+ // Returned vector shouldn't be empty.
+ virtual std::vector<LayerFrameConfig> NextFrameConfig(bool restart) = 0;
+
+ // Returns configuration to pass to EncoderCallback.
+ virtual GenericFrameInfo OnEncodeDone(const LayerFrameConfig& config) = 0;
+};
+
+// Below are implementation details.
+inline ScalableVideoController::LayerFrameConfig&
+ScalableVideoController::LayerFrameConfig::Id(int value) {
+ id_ = value;
+ return *this;
+}
+inline ScalableVideoController::LayerFrameConfig&
+ScalableVideoController::LayerFrameConfig::Keyframe() {
+ is_keyframe_ = true;
+ return *this;
+}
+inline ScalableVideoController::LayerFrameConfig&
+ScalableVideoController::LayerFrameConfig::S(int value) {
+ spatial_id_ = value;
+ return *this;
+}
+inline ScalableVideoController::LayerFrameConfig&
+ScalableVideoController::LayerFrameConfig::T(int value) {
+ temporal_id_ = value;
+ return *this;
+}
+inline ScalableVideoController::LayerFrameConfig&
+ScalableVideoController::LayerFrameConfig::Reference(int buffer_id) {
+ buffers_.emplace_back(buffer_id, /*referenced=*/true, /*updated=*/false);
+ return *this;
+}
+inline ScalableVideoController::LayerFrameConfig&
+ScalableVideoController::LayerFrameConfig::Update(int buffer_id) {
+ buffers_.emplace_back(buffer_id, /*referenced=*/false, /*updated=*/true);
+ return *this;
+}
+inline ScalableVideoController::LayerFrameConfig&
+ScalableVideoController::LayerFrameConfig::ReferenceAndUpdate(int buffer_id) {
+ buffers_.emplace_back(buffer_id, /*referenced=*/true, /*updated=*/true);
+ return *this;
+}
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_SVC_SCALABLE_VIDEO_CONTROLLER_H_
diff --git a/third_party/libwebrtc/modules/video_coding/svc/scalable_video_controller_gn/moz.build b/third_party/libwebrtc/modules/video_coding/svc/scalable_video_controller_gn/moz.build
new file mode 100644
index 0000000000..42b8307362
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/svc/scalable_video_controller_gn/moz.build
@@ -0,0 +1,221 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+DEFINES["WEBRTC_STRICT_FIELD_TRIALS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "!/third_party/libwebrtc/gen",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/video_coding/svc/scalable_video_controller_no_layering.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "mips32":
+
+ DEFINES["MIPS32_LE"] = True
+ DEFINES["MIPS_FPU_LE"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "mips64":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("scalable_video_controller_gn")
diff --git a/third_party/libwebrtc/modules/video_coding/svc/scalable_video_controller_no_layering.cc b/third_party/libwebrtc/modules/video_coding/svc/scalable_video_controller_no_layering.cc
new file mode 100644
index 0000000000..a9d530dd9d
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/svc/scalable_video_controller_no_layering.cc
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include "modules/video_coding/svc/scalable_video_controller_no_layering.h"
+
+#include <utility>
+#include <vector>
+
+#include "api/transport/rtp/dependency_descriptor.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+ScalableVideoControllerNoLayering::~ScalableVideoControllerNoLayering() =
+ default;
+
+ScalableVideoController::StreamLayersConfig
+ScalableVideoControllerNoLayering::StreamConfig() const {
+ StreamLayersConfig result;
+ result.num_spatial_layers = 1;
+ result.num_temporal_layers = 1;
+ result.uses_reference_scaling = false;
+ return result;
+}
+
+FrameDependencyStructure
+ScalableVideoControllerNoLayering::DependencyStructure() const {
+ FrameDependencyStructure structure;
+ structure.num_decode_targets = 1;
+ structure.num_chains = 1;
+ structure.decode_target_protected_by_chain = {0};
+
+ FrameDependencyTemplate key_frame;
+ key_frame.decode_target_indications = {DecodeTargetIndication::kSwitch};
+ key_frame.chain_diffs = {0};
+ structure.templates.push_back(key_frame);
+
+ FrameDependencyTemplate delta_frame;
+ delta_frame.decode_target_indications = {DecodeTargetIndication::kSwitch};
+ delta_frame.chain_diffs = {1};
+ delta_frame.frame_diffs = {1};
+ structure.templates.push_back(delta_frame);
+
+ return structure;
+}
+
+std::vector<ScalableVideoController::LayerFrameConfig>
+ScalableVideoControllerNoLayering::NextFrameConfig(bool restart) {
+ if (!enabled_) {
+ return {};
+ }
+ std::vector<LayerFrameConfig> result(1);
+ if (restart || start_) {
+ result[0].Id(0).Keyframe().Update(0);
+ } else {
+ result[0].Id(0).ReferenceAndUpdate(0);
+ }
+ start_ = false;
+ return result;
+}
+
+GenericFrameInfo ScalableVideoControllerNoLayering::OnEncodeDone(
+ const LayerFrameConfig& config) {
+ RTC_DCHECK_EQ(config.Id(), 0);
+ GenericFrameInfo frame_info;
+ frame_info.encoder_buffers = config.Buffers();
+ if (config.IsKeyframe()) {
+ for (auto& buffer : frame_info.encoder_buffers) {
+ buffer.referenced = false;
+ }
+ }
+ frame_info.decode_target_indications = {DecodeTargetIndication::kSwitch};
+ frame_info.part_of_chain = {true};
+ return frame_info;
+}
+
+void ScalableVideoControllerNoLayering::OnRatesUpdated(
+ const VideoBitrateAllocation& bitrates) {
+ enabled_ = bitrates.GetBitrate(0, 0) > 0;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/svc/scalable_video_controller_no_layering.h b/third_party/libwebrtc/modules/video_coding/svc/scalable_video_controller_no_layering.h
new file mode 100644
index 0000000000..6d66b61c8b
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/svc/scalable_video_controller_no_layering.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#ifndef MODULES_VIDEO_CODING_SVC_SCALABLE_VIDEO_CONTROLLER_NO_LAYERING_H_
+#define MODULES_VIDEO_CODING_SVC_SCALABLE_VIDEO_CONTROLLER_NO_LAYERING_H_
+
+#include <vector>
+
+#include "api/transport/rtp/dependency_descriptor.h"
+#include "api/video/video_bitrate_allocation.h"
+#include "common_video/generic_frame_descriptor/generic_frame_info.h"
+#include "modules/video_coding/svc/scalable_video_controller.h"
+
+namespace webrtc {
+
+class ScalableVideoControllerNoLayering : public ScalableVideoController {
+ public:
+ ~ScalableVideoControllerNoLayering() override;
+
+ StreamLayersConfig StreamConfig() const override;
+ FrameDependencyStructure DependencyStructure() const override;
+
+ std::vector<LayerFrameConfig> NextFrameConfig(bool restart) override;
+ GenericFrameInfo OnEncodeDone(const LayerFrameConfig& config) override;
+ void OnRatesUpdated(const VideoBitrateAllocation& bitrates) override;
+
+ private:
+ bool start_ = true;
+ bool enabled_ = true;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_SVC_SCALABLE_VIDEO_CONTROLLER_NO_LAYERING_H_
diff --git a/third_party/libwebrtc/modules/video_coding/svc/svc_rate_allocator.cc b/third_party/libwebrtc/modules/video_coding/svc/svc_rate_allocator.cc
new file mode 100644
index 0000000000..b6ae0d7430
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/svc/svc_rate_allocator.cc
@@ -0,0 +1,452 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/svc/svc_rate_allocator.h"
+
+#include <algorithm>
+#include <cmath>
+#include <cstddef>
+#include <numeric>
+#include <vector>
+
+#include "absl/container/inlined_vector.h"
+#include "modules/video_coding/svc/create_scalability_structure.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+namespace {
+
+constexpr float kSpatialLayeringRateScalingFactor = 0.55f;
+constexpr float kTemporalLayeringRateScalingFactor = 0.55f;
+
+struct ActiveSpatialLayers {
+ size_t first = 0;
+ size_t num = 0;
+};
+
+ActiveSpatialLayers GetActiveSpatialLayers(const VideoCodec& codec,
+ size_t num_spatial_layers) {
+ ActiveSpatialLayers active;
+ for (active.first = 0; active.first < num_spatial_layers; ++active.first) {
+ if (codec.spatialLayers[active.first].active) {
+ break;
+ }
+ }
+
+ size_t last_active_layer = active.first;
+ for (; last_active_layer < num_spatial_layers; ++last_active_layer) {
+ if (!codec.spatialLayers[last_active_layer].active) {
+ break;
+ }
+ }
+ active.num = last_active_layer - active.first;
+
+ return active;
+}
+
+std::vector<DataRate> AdjustAndVerify(
+ const VideoCodec& codec,
+ size_t first_active_layer,
+ const std::vector<DataRate>& spatial_layer_rates) {
+ std::vector<DataRate> adjusted_spatial_layer_rates;
+ // Keep track of rate that couldn't be applied to the previous layer due to
+ // max bitrate constraint, try to pass it forward to the next one.
+ DataRate excess_rate = DataRate::Zero();
+ for (size_t sl_idx = 0; sl_idx < spatial_layer_rates.size(); ++sl_idx) {
+ DataRate min_rate = DataRate::KilobitsPerSec(
+ codec.spatialLayers[first_active_layer + sl_idx].minBitrate);
+ DataRate max_rate = DataRate::KilobitsPerSec(
+ codec.spatialLayers[first_active_layer + sl_idx].maxBitrate);
+
+ DataRate layer_rate = spatial_layer_rates[sl_idx] + excess_rate;
+ if (layer_rate < min_rate) {
+ // Not enough rate to reach min bitrate for desired number of layers,
+ // abort allocation.
+ if (spatial_layer_rates.size() == 1) {
+ return spatial_layer_rates;
+ }
+ return adjusted_spatial_layer_rates;
+ }
+
+ if (layer_rate <= max_rate) {
+ excess_rate = DataRate::Zero();
+ adjusted_spatial_layer_rates.push_back(layer_rate);
+ } else {
+ excess_rate = layer_rate - max_rate;
+ adjusted_spatial_layer_rates.push_back(max_rate);
+ }
+ }
+
+ return adjusted_spatial_layer_rates;
+}
+
+static std::vector<DataRate> SplitBitrate(size_t num_layers,
+ DataRate total_bitrate,
+ float rate_scaling_factor) {
+ std::vector<DataRate> bitrates;
+
+ double denominator = 0.0;
+ for (size_t layer_idx = 0; layer_idx < num_layers; ++layer_idx) {
+ denominator += std::pow(rate_scaling_factor, layer_idx);
+ }
+
+ double numerator = std::pow(rate_scaling_factor, num_layers - 1);
+ for (size_t layer_idx = 0; layer_idx < num_layers; ++layer_idx) {
+ bitrates.push_back(numerator * total_bitrate / denominator);
+ numerator /= rate_scaling_factor;
+ }
+
+ const DataRate sum =
+ std::accumulate(bitrates.begin(), bitrates.end(), DataRate::Zero());
+
+ // Keep the sum of split bitrates equal to the total bitrate by adding or
+ // subtracting bits, which were lost due to rounding, to the latest layer.
+ if (total_bitrate > sum) {
+ bitrates.back() += total_bitrate - sum;
+ } else if (total_bitrate < sum) {
+ bitrates.back() -= sum - total_bitrate;
+ }
+
+ return bitrates;
+}
+
+// Returns the minimum bitrate needed for `num_active_layers` spatial layers to
+// become active using the configuration specified by `codec`.
+DataRate FindLayerTogglingThreshold(const VideoCodec& codec,
+ size_t first_active_layer,
+ size_t num_active_layers) {
+ if (num_active_layers == 1) {
+ return DataRate::KilobitsPerSec(codec.spatialLayers[0].minBitrate);
+ }
+
+ if (codec.mode == VideoCodecMode::kRealtimeVideo) {
+ DataRate lower_bound = DataRate::Zero();
+ DataRate upper_bound = DataRate::Zero();
+ if (num_active_layers > 1) {
+ for (size_t i = 0; i < num_active_layers - 1; ++i) {
+ lower_bound += DataRate::KilobitsPerSec(
+ codec.spatialLayers[first_active_layer + i].minBitrate);
+ upper_bound += DataRate::KilobitsPerSec(
+ codec.spatialLayers[first_active_layer + i].maxBitrate);
+ }
+ }
+ upper_bound += DataRate::KilobitsPerSec(
+ codec.spatialLayers[first_active_layer + num_active_layers - 1]
+ .minBitrate);
+
+ // Do a binary search until upper and lower bound is the highest bitrate for
+ // `num_active_layers` - 1 layers and lowest bitrate for `num_active_layers`
+ // layers respectively.
+ while (upper_bound - lower_bound > DataRate::BitsPerSec(1)) {
+ DataRate try_rate = (lower_bound + upper_bound) / 2;
+ if (AdjustAndVerify(codec, first_active_layer,
+ SplitBitrate(num_active_layers, try_rate,
+ kSpatialLayeringRateScalingFactor))
+ .size() == num_active_layers) {
+ upper_bound = try_rate;
+ } else {
+ lower_bound = try_rate;
+ }
+ }
+ return upper_bound;
+ } else {
+ DataRate toggling_rate = DataRate::Zero();
+ for (size_t i = 0; i < num_active_layers - 1; ++i) {
+ toggling_rate += DataRate::KilobitsPerSec(
+ codec.spatialLayers[first_active_layer + i].targetBitrate);
+ }
+ toggling_rate += DataRate::KilobitsPerSec(
+ codec.spatialLayers[first_active_layer + num_active_layers - 1]
+ .minBitrate);
+ return toggling_rate;
+ }
+}
+
+} // namespace
+
+SvcRateAllocator::NumLayers SvcRateAllocator::GetNumLayers(
+ const VideoCodec& codec) {
+ NumLayers layers;
+ if (absl::optional<ScalabilityMode> scalability_mode =
+ codec.GetScalabilityMode();
+ scalability_mode.has_value()) {
+ if (auto structure = CreateScalabilityStructure(*scalability_mode)) {
+ ScalableVideoController::StreamLayersConfig config =
+ structure->StreamConfig();
+ layers.spatial = config.num_spatial_layers;
+ layers.temporal = config.num_temporal_layers;
+ return layers;
+ }
+ }
+ if (codec.codecType == kVideoCodecVP9) {
+ layers.spatial = codec.VP9().numberOfSpatialLayers;
+ layers.temporal = codec.VP9().numberOfTemporalLayers;
+ return layers;
+ }
+ layers.spatial = 1;
+ layers.temporal = 1;
+ return layers;
+}
+
+SvcRateAllocator::SvcRateAllocator(const VideoCodec& codec)
+ : codec_(codec),
+ num_layers_(GetNumLayers(codec)),
+ experiment_settings_(StableTargetRateExperiment::ParseFromFieldTrials()),
+ cumulative_layer_start_bitrates_(GetLayerStartBitrates(codec)),
+ last_active_layer_count_(0) {
+ RTC_DCHECK_GT(num_layers_.spatial, 0);
+ RTC_DCHECK_LE(num_layers_.spatial, kMaxSpatialLayers);
+ RTC_DCHECK_GT(num_layers_.temporal, 0);
+ RTC_DCHECK_LE(num_layers_.temporal, 3);
+ for (size_t layer_idx = 0; layer_idx < num_layers_.spatial; ++layer_idx) {
+ // Verify min <= target <= max.
+ if (codec.spatialLayers[layer_idx].active) {
+ RTC_DCHECK_GT(codec.spatialLayers[layer_idx].maxBitrate, 0);
+ RTC_DCHECK_GE(codec.spatialLayers[layer_idx].maxBitrate,
+ codec.spatialLayers[layer_idx].minBitrate);
+ RTC_DCHECK_GE(codec.spatialLayers[layer_idx].targetBitrate,
+ codec.spatialLayers[layer_idx].minBitrate);
+ RTC_DCHECK_GE(codec.spatialLayers[layer_idx].maxBitrate,
+ codec.spatialLayers[layer_idx].targetBitrate);
+ }
+ }
+}
+
+VideoBitrateAllocation SvcRateAllocator::Allocate(
+ VideoBitrateAllocationParameters parameters) {
+ DataRate total_bitrate = parameters.total_bitrate;
+ if (codec_.maxBitrate != 0) {
+ total_bitrate =
+ std::min(total_bitrate, DataRate::KilobitsPerSec(codec_.maxBitrate));
+ }
+
+ if (codec_.spatialLayers[0].targetBitrate == 0) {
+ // Delegate rate distribution to encoder wrapper if bitrate thresholds
+ // are not set.
+ VideoBitrateAllocation bitrate_allocation;
+ bitrate_allocation.SetBitrate(0, 0, total_bitrate.bps());
+ return bitrate_allocation;
+ }
+
+ const ActiveSpatialLayers active_layers =
+ GetActiveSpatialLayers(codec_, num_layers_.spatial);
+ size_t num_spatial_layers = active_layers.num;
+
+ if (num_spatial_layers == 0) {
+ return VideoBitrateAllocation(); // All layers are deactivated.
+ }
+
+ // Figure out how many spatial layers should be active.
+ if (experiment_settings_.IsEnabled() &&
+ parameters.stable_bitrate > DataRate::Zero()) {
+ double hysteresis_factor;
+ if (codec_.mode == VideoCodecMode::kScreensharing) {
+ hysteresis_factor = experiment_settings_.GetScreenshareHysteresisFactor();
+ } else {
+ hysteresis_factor = experiment_settings_.GetVideoHysteresisFactor();
+ }
+
+ DataRate stable_rate =
+ std::min(parameters.total_bitrate, parameters.stable_bitrate);
+ // First check if bitrate has grown large enough to enable new layers.
+ size_t num_enabled_with_hysteresis =
+ FindNumEnabledLayers(stable_rate / hysteresis_factor);
+ if (num_enabled_with_hysteresis >= last_active_layer_count_) {
+ num_spatial_layers = num_enabled_with_hysteresis;
+ } else {
+ // We could not enable new layers, check if any should be disabled.
+ num_spatial_layers =
+ std::min(last_active_layer_count_, FindNumEnabledLayers(stable_rate));
+ }
+ } else {
+ num_spatial_layers = FindNumEnabledLayers(parameters.total_bitrate);
+ }
+ last_active_layer_count_ = num_spatial_layers;
+
+ VideoBitrateAllocation allocation;
+ if (codec_.mode == VideoCodecMode::kRealtimeVideo) {
+ allocation = GetAllocationNormalVideo(total_bitrate, active_layers.first,
+ num_spatial_layers);
+ } else {
+ allocation = GetAllocationScreenSharing(total_bitrate, active_layers.first,
+ num_spatial_layers);
+ }
+ allocation.set_bw_limited(num_spatial_layers < active_layers.num);
+ return allocation;
+}
+
+VideoBitrateAllocation SvcRateAllocator::GetAllocationNormalVideo(
+ DataRate total_bitrate,
+ size_t first_active_layer,
+ size_t num_spatial_layers) const {
+ std::vector<DataRate> spatial_layer_rates;
+ if (num_spatial_layers == 0) {
+ // Not enough rate for even the base layer. Force allocation at the total
+ // bitrate anyway.
+ num_spatial_layers = 1;
+ spatial_layer_rates.push_back(total_bitrate);
+ } else {
+ spatial_layer_rates =
+ AdjustAndVerify(codec_, first_active_layer,
+ SplitBitrate(num_spatial_layers, total_bitrate,
+ kSpatialLayeringRateScalingFactor));
+ RTC_DCHECK_EQ(spatial_layer_rates.size(), num_spatial_layers);
+ }
+
+ VideoBitrateAllocation bitrate_allocation;
+
+ for (size_t sl_idx = 0; sl_idx < num_spatial_layers; ++sl_idx) {
+ std::vector<DataRate> temporal_layer_rates =
+ SplitBitrate(num_layers_.temporal, spatial_layer_rates[sl_idx],
+ kTemporalLayeringRateScalingFactor);
+
+ // Distribute rate across temporal layers. Allocate more bits to lower
+ // layers since they are used for prediction of higher layers and their
+ // references are far apart.
+ if (num_layers_.temporal == 1) {
+ bitrate_allocation.SetBitrate(sl_idx + first_active_layer, 0,
+ temporal_layer_rates[0].bps());
+ } else if (num_layers_.temporal == 2) {
+ bitrate_allocation.SetBitrate(sl_idx + first_active_layer, 0,
+ temporal_layer_rates[1].bps());
+ bitrate_allocation.SetBitrate(sl_idx + first_active_layer, 1,
+ temporal_layer_rates[0].bps());
+ } else {
+ RTC_CHECK_EQ(num_layers_.temporal, 3);
+ // In case of three temporal layers the high layer has two frames and the
+ // middle layer has one frame within GOP (in between two consecutive low
+ // layer frames). Thus high layer requires more bits (comparing pure
+ // bitrate of layer, excluding bitrate of base layers) to keep quality on
+ // par with lower layers.
+ bitrate_allocation.SetBitrate(sl_idx + first_active_layer, 0,
+ temporal_layer_rates[2].bps());
+ bitrate_allocation.SetBitrate(sl_idx + first_active_layer, 1,
+ temporal_layer_rates[0].bps());
+ bitrate_allocation.SetBitrate(sl_idx + first_active_layer, 2,
+ temporal_layer_rates[1].bps());
+ }
+ }
+
+ return bitrate_allocation;
+}
+
+// Bit-rate is allocated in such a way, that the highest enabled layer will have
+// between min and max bitrate, and all others will have exactly target
+// bit-rate allocated.
+VideoBitrateAllocation SvcRateAllocator::GetAllocationScreenSharing(
+ DataRate total_bitrate,
+ size_t first_active_layer,
+ size_t num_spatial_layers) const {
+ VideoBitrateAllocation bitrate_allocation;
+
+ if (num_spatial_layers == 0 ||
+ total_bitrate <
+ DataRate::KilobitsPerSec(
+ codec_.spatialLayers[first_active_layer].minBitrate)) {
+ // Always enable at least one layer.
+ bitrate_allocation.SetBitrate(first_active_layer, 0, total_bitrate.bps());
+ return bitrate_allocation;
+ }
+
+ DataRate allocated_rate = DataRate::Zero();
+ DataRate top_layer_rate = DataRate::Zero();
+ size_t sl_idx;
+ for (sl_idx = first_active_layer;
+ sl_idx < first_active_layer + num_spatial_layers; ++sl_idx) {
+ const DataRate min_rate =
+ DataRate::KilobitsPerSec(codec_.spatialLayers[sl_idx].minBitrate);
+ const DataRate target_rate =
+ DataRate::KilobitsPerSec(codec_.spatialLayers[sl_idx].targetBitrate);
+
+ if (allocated_rate + min_rate > total_bitrate) {
+ // Use stable rate to determine if layer should be enabled.
+ break;
+ }
+
+ top_layer_rate = std::min(target_rate, total_bitrate - allocated_rate);
+ bitrate_allocation.SetBitrate(sl_idx, 0, top_layer_rate.bps());
+ allocated_rate += top_layer_rate;
+ }
+
+ if (sl_idx > 0 && total_bitrate - allocated_rate > DataRate::Zero()) {
+ // Add leftover to the last allocated layer.
+ top_layer_rate = std::min(
+ top_layer_rate + (total_bitrate - allocated_rate),
+ DataRate::KilobitsPerSec(codec_.spatialLayers[sl_idx - 1].maxBitrate));
+ bitrate_allocation.SetBitrate(sl_idx - 1, 0, top_layer_rate.bps());
+ }
+
+ return bitrate_allocation;
+}
+
+size_t SvcRateAllocator::FindNumEnabledLayers(DataRate target_rate) const {
+ if (cumulative_layer_start_bitrates_.empty()) {
+ return 0;
+ }
+
+ size_t num_enabled_layers = 0;
+ for (DataRate start_rate : cumulative_layer_start_bitrates_) {
+ // First layer is always enabled.
+ if (num_enabled_layers == 0 || start_rate <= target_rate) {
+ ++num_enabled_layers;
+ } else {
+ break;
+ }
+ }
+
+ return num_enabled_layers;
+}
+
+DataRate SvcRateAllocator::GetMaxBitrate(const VideoCodec& codec) {
+ const NumLayers num_layers = GetNumLayers(codec);
+ const ActiveSpatialLayers active_layers =
+ GetActiveSpatialLayers(codec, num_layers.spatial);
+
+ DataRate max_bitrate = DataRate::Zero();
+ for (size_t sl_idx = 0; sl_idx < active_layers.num; ++sl_idx) {
+ max_bitrate += DataRate::KilobitsPerSec(
+ codec.spatialLayers[active_layers.first + sl_idx].maxBitrate);
+ }
+
+ if (codec.maxBitrate != 0) {
+ max_bitrate =
+ std::min(max_bitrate, DataRate::KilobitsPerSec(codec.maxBitrate));
+ }
+
+ return max_bitrate;
+}
+
+DataRate SvcRateAllocator::GetPaddingBitrate(const VideoCodec& codec) {
+ auto start_bitrate = GetLayerStartBitrates(codec);
+ if (start_bitrate.empty()) {
+ return DataRate::Zero(); // All layers are deactivated.
+ }
+
+ return start_bitrate.back();
+}
+
+absl::InlinedVector<DataRate, kMaxSpatialLayers>
+SvcRateAllocator::GetLayerStartBitrates(const VideoCodec& codec) {
+ absl::InlinedVector<DataRate, kMaxSpatialLayers> start_bitrates;
+ const NumLayers num_layers = GetNumLayers(codec);
+ const ActiveSpatialLayers active_layers =
+ GetActiveSpatialLayers(codec, num_layers.spatial);
+ DataRate last_rate = DataRate::Zero();
+ for (size_t i = 1; i <= active_layers.num; ++i) {
+ DataRate layer_toggling_rate =
+ FindLayerTogglingThreshold(codec, active_layers.first, i);
+ start_bitrates.push_back(layer_toggling_rate);
+ RTC_DCHECK_LE(last_rate, layer_toggling_rate);
+ last_rate = layer_toggling_rate;
+ }
+ return start_bitrates;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/svc/svc_rate_allocator.h b/third_party/libwebrtc/modules/video_coding/svc/svc_rate_allocator.h
new file mode 100644
index 0000000000..bd75fca284
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/svc/svc_rate_allocator.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_SVC_SVC_RATE_ALLOCATOR_H_
+#define MODULES_VIDEO_CODING_SVC_SVC_RATE_ALLOCATOR_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "absl/container/inlined_vector.h"
+#include "api/video/video_bitrate_allocation.h"
+#include "api/video/video_bitrate_allocator.h"
+#include "api/video/video_codec_constants.h"
+#include "api/video_codecs/video_codec.h"
+#include "rtc_base/experiments/stable_target_rate_experiment.h"
+
+namespace webrtc {
+
+class SvcRateAllocator : public VideoBitrateAllocator {
+ public:
+ explicit SvcRateAllocator(const VideoCodec& codec);
+
+ VideoBitrateAllocation Allocate(
+ VideoBitrateAllocationParameters parameters) override;
+
+ static DataRate GetMaxBitrate(const VideoCodec& codec);
+ static DataRate GetPaddingBitrate(const VideoCodec& codec);
+ static absl::InlinedVector<DataRate, kMaxSpatialLayers> GetLayerStartBitrates(
+ const VideoCodec& codec);
+
+ private:
+ struct NumLayers {
+ size_t spatial = 1;
+ size_t temporal = 1;
+ };
+
+ static NumLayers GetNumLayers(const VideoCodec& codec);
+ VideoBitrateAllocation GetAllocationNormalVideo(
+ DataRate total_bitrate,
+ size_t first_active_layer,
+ size_t num_spatial_layers) const;
+
+ VideoBitrateAllocation GetAllocationScreenSharing(
+ DataRate total_bitrate,
+ size_t first_active_layer,
+ size_t num_spatial_layers) const;
+
+ // Returns the number of layers that are active and have enough bitrate to
+ // actually be enabled.
+ size_t FindNumEnabledLayers(DataRate target_rate) const;
+
+ const VideoCodec codec_;
+ const NumLayers num_layers_;
+ const StableTargetRateExperiment experiment_settings_;
+ const absl::InlinedVector<DataRate, kMaxSpatialLayers>
+ cumulative_layer_start_bitrates_;
+ size_t last_active_layer_count_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_SVC_SVC_RATE_ALLOCATOR_H_
diff --git a/third_party/libwebrtc/modules/video_coding/svc/svc_rate_allocator_gn/moz.build b/third_party/libwebrtc/modules/video_coding/svc/svc_rate_allocator_gn/moz.build
new file mode 100644
index 0000000000..5d7d952a69
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/svc/svc_rate_allocator_gn/moz.build
@@ -0,0 +1,225 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+DEFINES["WEBRTC_STRICT_FIELD_TRIALS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "!/third_party/libwebrtc/gen",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/video_coding/svc/svc_rate_allocator.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "mips32":
+
+ DEFINES["MIPS32_LE"] = True
+ DEFINES["MIPS_FPU_LE"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "mips64":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("svc_rate_allocator_gn")
diff --git a/third_party/libwebrtc/modules/video_coding/svc/svc_rate_allocator_unittest.cc b/third_party/libwebrtc/modules/video_coding/svc/svc_rate_allocator_unittest.cc
new file mode 100644
index 0000000000..b3a365d722
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/svc/svc_rate_allocator_unittest.cc
@@ -0,0 +1,584 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/svc/svc_rate_allocator.h"
+
+#include <algorithm>
+#include <vector>
+
+#include "modules/video_coding/codecs/vp9/svc_config.h"
+#include "rtc_base/checks.h"
+#include "test/field_trial.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace test {
+namespace {
+static VideoCodec Configure(size_t width,
+ size_t height,
+ size_t num_spatial_layers,
+ size_t num_temporal_layers,
+ bool is_screen_sharing) {
+ VideoCodec codec;
+ codec.width = width;
+ codec.height = height;
+ codec.codecType = kVideoCodecVP9;
+ codec.mode = is_screen_sharing ? VideoCodecMode::kScreensharing
+ : VideoCodecMode::kRealtimeVideo;
+
+ std::vector<SpatialLayer> spatial_layers =
+ GetSvcConfig(width, height, 30, /*first_active_layer=*/0,
+ num_spatial_layers, num_temporal_layers, is_screen_sharing);
+ RTC_CHECK_LE(spatial_layers.size(), kMaxSpatialLayers);
+
+ codec.VP9()->numberOfSpatialLayers =
+ std::min<unsigned char>(num_spatial_layers, spatial_layers.size());
+ codec.VP9()->numberOfTemporalLayers = std::min<unsigned char>(
+ num_temporal_layers, spatial_layers.back().numberOfTemporalLayers);
+
+ for (size_t sl_idx = 0; sl_idx < spatial_layers.size(); ++sl_idx) {
+ codec.spatialLayers[sl_idx] = spatial_layers[sl_idx];
+ }
+
+ return codec;
+}
+
+} // namespace
+
+TEST(SvcRateAllocatorTest, SingleLayerFor320x180Input) {
+ VideoCodec codec = Configure(320, 180, 3, 3, false);
+ SvcRateAllocator allocator = SvcRateAllocator(codec);
+
+ VideoBitrateAllocation allocation =
+ allocator.Allocate(VideoBitrateAllocationParameters(1000 * 1000, 30));
+
+ EXPECT_GT(allocation.GetSpatialLayerSum(0), 0u);
+ EXPECT_EQ(allocation.GetSpatialLayerSum(1), 0u);
+}
+
+TEST(SvcRateAllocatorTest, TwoLayersFor640x360Input) {
+ VideoCodec codec = Configure(640, 360, 3, 3, false);
+ SvcRateAllocator allocator = SvcRateAllocator(codec);
+
+ VideoBitrateAllocation allocation =
+ allocator.Allocate(VideoBitrateAllocationParameters(1000 * 1000, 30));
+
+ EXPECT_GT(allocation.GetSpatialLayerSum(0), 0u);
+ EXPECT_GT(allocation.GetSpatialLayerSum(1), 0u);
+ EXPECT_EQ(allocation.GetSpatialLayerSum(2), 0u);
+}
+
+TEST(SvcRateAllocatorTest, ThreeLayersFor1280x720Input) {
+ VideoCodec codec = Configure(1280, 720, 3, 3, false);
+ SvcRateAllocator allocator = SvcRateAllocator(codec);
+
+ VideoBitrateAllocation allocation =
+ allocator.Allocate(VideoBitrateAllocationParameters(1000 * 1000, 30));
+
+ EXPECT_GT(allocation.GetSpatialLayerSum(0), 0u);
+ EXPECT_GT(allocation.GetSpatialLayerSum(1), 0u);
+ EXPECT_GT(allocation.GetSpatialLayerSum(2), 0u);
+}
+
+TEST(SvcRateAllocatorTest,
+ BaseLayerNonZeroBitrateEvenIfTotalIfLessThanMinimum) {
+ VideoCodec codec = Configure(1280, 720, 3, 3, false);
+ SvcRateAllocator allocator = SvcRateAllocator(codec);
+
+ const SpatialLayer* layers = codec.spatialLayers;
+
+ VideoBitrateAllocation allocation = allocator.Allocate(
+ VideoBitrateAllocationParameters(layers[0].minBitrate * 1000 / 2, 30));
+
+ EXPECT_GT(allocation.GetSpatialLayerSum(0), 0u);
+ EXPECT_LT(allocation.GetSpatialLayerSum(0), layers[0].minBitrate * 1000);
+ EXPECT_EQ(allocation.GetSpatialLayerSum(1), 0u);
+}
+
+TEST(SvcRateAllocatorTest, Disable640x360Layer) {
+ VideoCodec codec = Configure(1280, 720, 3, 3, false);
+ SvcRateAllocator allocator = SvcRateAllocator(codec);
+
+ const SpatialLayer* layers = codec.spatialLayers;
+
+ size_t min_bitrate_for_640x360_layer_kbps =
+ layers[0].minBitrate + layers[1].minBitrate;
+
+ VideoBitrateAllocation allocation =
+ allocator.Allocate(VideoBitrateAllocationParameters(
+ min_bitrate_for_640x360_layer_kbps * 1000 - 1, 30));
+
+ EXPECT_GT(allocation.GetSpatialLayerSum(0), 0u);
+ EXPECT_EQ(allocation.GetSpatialLayerSum(1), 0u);
+}
+
+TEST(SvcRateAllocatorTest, Disable1280x720Layer) {
+ VideoCodec codec = Configure(1280, 720, 3, 3, false);
+ SvcRateAllocator allocator = SvcRateAllocator(codec);
+
+ const SpatialLayer* layers = codec.spatialLayers;
+
+ size_t min_bitrate_for_1280x720_layer_kbps =
+ layers[0].minBitrate + layers[1].minBitrate + layers[2].minBitrate;
+
+ VideoBitrateAllocation allocation =
+ allocator.Allocate(VideoBitrateAllocationParameters(
+ min_bitrate_for_1280x720_layer_kbps * 1000 - 1, 30));
+
+ EXPECT_GT(allocation.GetSpatialLayerSum(0), 0u);
+ EXPECT_GT(allocation.GetSpatialLayerSum(1), 0u);
+ EXPECT_EQ(allocation.GetSpatialLayerSum(2), 0u);
+}
+
+TEST(SvcRateAllocatorTest, BitrateIsCapped) {
+ VideoCodec codec = Configure(1280, 720, 3, 3, false);
+ SvcRateAllocator allocator = SvcRateAllocator(codec);
+
+ const SpatialLayer* layers = codec.spatialLayers;
+
+ const uint32_t link_mbps = 100;
+ VideoBitrateAllocation allocation = allocator.Allocate(
+ VideoBitrateAllocationParameters(link_mbps * 1000000, 30));
+
+ EXPECT_EQ(allocation.get_sum_kbps(),
+ layers[0].maxBitrate + layers[1].maxBitrate + layers[2].maxBitrate);
+ EXPECT_EQ(allocation.GetSpatialLayerSum(0) / 1000, layers[0].maxBitrate);
+ EXPECT_EQ(allocation.GetSpatialLayerSum(1) / 1000, layers[1].maxBitrate);
+ EXPECT_EQ(allocation.GetSpatialLayerSum(2) / 1000, layers[2].maxBitrate);
+}
+
+TEST(SvcRateAllocatorTest, MinBitrateToGetQualityLayer) {
+ VideoCodec codec = Configure(1280, 720, 3, 1, true);
+ SvcRateAllocator allocator = SvcRateAllocator(codec);
+
+ const SpatialLayer* layers = codec.spatialLayers;
+
+ EXPECT_LE(codec.VP9()->numberOfSpatialLayers, 3U);
+
+ VideoBitrateAllocation allocation = allocator.Allocate(
+ VideoBitrateAllocationParameters(layers[0].minBitrate * 1000, 30));
+ EXPECT_EQ(allocation.GetSpatialLayerSum(0) / 1000, layers[0].minBitrate);
+ EXPECT_EQ(allocation.GetSpatialLayerSum(1), 0UL);
+
+ allocation = allocator.Allocate(VideoBitrateAllocationParameters(
+ (layers[0].targetBitrate + layers[1].minBitrate) * 1000, 30));
+ EXPECT_EQ(allocation.GetSpatialLayerSum(0) / 1000, layers[0].targetBitrate);
+ EXPECT_EQ(allocation.GetSpatialLayerSum(1) / 1000, layers[1].minBitrate);
+}
+
+TEST(SvcRateAllocatorTest, DeactivateHigherLayers) {
+ for (int deactivated_idx = 2; deactivated_idx >= 0; --deactivated_idx) {
+ VideoCodec codec = Configure(1280, 720, 3, 1, false);
+ EXPECT_LE(codec.VP9()->numberOfSpatialLayers, 3U);
+
+ for (int i = deactivated_idx; i < 3; ++i)
+ codec.spatialLayers[i].active = false;
+
+ SvcRateAllocator allocator = SvcRateAllocator(codec);
+
+ VideoBitrateAllocation allocation = allocator.Allocate(
+ VideoBitrateAllocationParameters(10 * 1000 * 1000, 30));
+
+ // Ensure layers spatial_idx < deactivated_idx are activated.
+ for (int spatial_idx = 0; spatial_idx < deactivated_idx; ++spatial_idx) {
+ EXPECT_GT(allocation.GetSpatialLayerSum(spatial_idx), 0UL);
+ }
+
+ // Ensure layers spatial_idx >= deactivated_idx are deactivated.
+ for (int spatial_idx = deactivated_idx; spatial_idx < 3; ++spatial_idx) {
+ EXPECT_EQ(allocation.GetSpatialLayerSum(spatial_idx), 0UL);
+ }
+ }
+}
+
+TEST(SvcRateAllocatorTest, DeactivateLowerLayers) {
+ for (int deactivated_idx = 0; deactivated_idx < 3; ++deactivated_idx) {
+ VideoCodec codec = Configure(1280, 720, 3, 1, false);
+ EXPECT_LE(codec.VP9()->numberOfSpatialLayers, 3U);
+
+ for (int i = deactivated_idx; i >= 0; --i)
+ codec.spatialLayers[i].active = false;
+
+ SvcRateAllocator allocator = SvcRateAllocator(codec);
+
+ VideoBitrateAllocation allocation = allocator.Allocate(
+ VideoBitrateAllocationParameters(10 * 1000 * 1000, 30));
+
+ // Ensure layers spatial_idx <= deactivated_idx are deactivated.
+ for (int spatial_idx = 0; spatial_idx <= deactivated_idx; ++spatial_idx) {
+ EXPECT_EQ(allocation.GetSpatialLayerSum(spatial_idx), 0UL);
+ }
+
+ // Ensure layers spatial_idx > deactivated_idx are activated.
+ for (int spatial_idx = deactivated_idx + 1; spatial_idx < 3;
+ ++spatial_idx) {
+ EXPECT_GT(allocation.GetSpatialLayerSum(spatial_idx), 0UL);
+ }
+ }
+}
+
+TEST(SvcRateAllocatorTest, SignalsBwLimited) {
+ VideoCodec codec = Configure(1280, 720, 3, 1, false);
+ SvcRateAllocator allocator = SvcRateAllocator(codec);
+
+ // Rough estimate calculated by hand.
+ uint32_t min_to_enable_all = 900000;
+
+ EXPECT_TRUE(
+ allocator
+ .Allocate(VideoBitrateAllocationParameters(min_to_enable_all / 2, 30))
+ .is_bw_limited());
+
+ EXPECT_FALSE(
+ allocator
+ .Allocate(VideoBitrateAllocationParameters(min_to_enable_all, 30))
+ .is_bw_limited());
+}
+
+TEST(SvcRateAllocatorTest, NoPaddingIfAllLayersAreDeactivated) {
+ VideoCodec codec = Configure(1280, 720, 3, 1, false);
+ EXPECT_EQ(codec.VP9()->numberOfSpatialLayers, 3U);
+ // Deactivation of base layer deactivates all layers.
+ codec.spatialLayers[0].active = false;
+ codec.spatialLayers[1].active = false;
+ codec.spatialLayers[2].active = false;
+ DataRate padding_rate = SvcRateAllocator::GetPaddingBitrate(codec);
+ EXPECT_EQ(padding_rate, DataRate::Zero());
+}
+
+TEST(SvcRateAllocatorTest, FindLayerTogglingThreshold) {
+ // Let's unit test a utility method of the unit test...
+
+ // Predetermined constants indicating the min bitrate needed for two and three
+ // layers to be enabled respectively, using the config from Configure() with
+ // 1280x720 resolution and three spatial layers.
+ const DataRate kTwoLayerMinRate = DataRate::BitsPerSec(299150);
+ const DataRate kThreeLayerMinRate = DataRate::BitsPerSec(891052);
+
+ VideoCodec codec = Configure(1280, 720, 3, 1, false);
+ absl::InlinedVector<DataRate, kMaxSpatialLayers> layer_start_bitrates =
+ SvcRateAllocator::GetLayerStartBitrates(codec);
+ ASSERT_EQ(layer_start_bitrates.size(), 3u);
+ EXPECT_EQ(layer_start_bitrates[1], kTwoLayerMinRate);
+ EXPECT_EQ(layer_start_bitrates[2], kThreeLayerMinRate);
+}
+
+TEST(SvcRateAllocatorTest, SupportsAv1) {
+ VideoCodec codec;
+ codec.width = 640;
+ codec.height = 360;
+ codec.codecType = kVideoCodecAV1;
+ codec.SetScalabilityMode(ScalabilityMode::kL3T3);
+ codec.spatialLayers[0].active = true;
+ codec.spatialLayers[0].minBitrate = 30;
+ codec.spatialLayers[0].targetBitrate = 51;
+ codec.spatialLayers[0].maxBitrate = 73;
+ codec.spatialLayers[1].active = true;
+ codec.spatialLayers[1].minBitrate = 49;
+ codec.spatialLayers[1].targetBitrate = 64;
+ codec.spatialLayers[1].maxBitrate = 97;
+ codec.spatialLayers[2].active = true;
+ codec.spatialLayers[2].minBitrate = 193;
+ codec.spatialLayers[2].targetBitrate = 305;
+ codec.spatialLayers[2].maxBitrate = 418;
+
+ SvcRateAllocator allocator(codec);
+
+ VideoBitrateAllocation allocation =
+ allocator.Allocate(VideoBitrateAllocationParameters(1'000'000, 30));
+
+ EXPECT_GT(allocation.GetSpatialLayerSum(0), 0u);
+ EXPECT_GT(allocation.GetSpatialLayerSum(1), 0u);
+ EXPECT_GT(allocation.GetSpatialLayerSum(2), 0u);
+}
+
+TEST(SvcRateAllocatorTest, SupportsAv1WithSkippedLayer) {
+ VideoCodec codec;
+ codec.width = 640;
+ codec.height = 360;
+ codec.codecType = kVideoCodecAV1;
+ codec.SetScalabilityMode(ScalabilityMode::kL3T3);
+ codec.spatialLayers[0].active = false;
+ codec.spatialLayers[0].minBitrate = 30;
+ codec.spatialLayers[0].targetBitrate = 51;
+ codec.spatialLayers[0].maxBitrate = 73;
+ codec.spatialLayers[1].active = true;
+ codec.spatialLayers[1].minBitrate = 49;
+ codec.spatialLayers[1].targetBitrate = 64;
+ codec.spatialLayers[1].maxBitrate = 97;
+ codec.spatialLayers[2].active = true;
+ codec.spatialLayers[2].minBitrate = 193;
+ codec.spatialLayers[2].targetBitrate = 305;
+ codec.spatialLayers[2].maxBitrate = 418;
+
+ SvcRateAllocator allocator(codec);
+
+ VideoBitrateAllocation allocation =
+ allocator.Allocate(VideoBitrateAllocationParameters(1'000'000, 30));
+
+ EXPECT_EQ(allocation.GetSpatialLayerSum(0), 0u);
+ EXPECT_GT(allocation.GetSpatialLayerSum(1), 0u);
+ EXPECT_GT(allocation.GetSpatialLayerSum(2), 0u);
+}
+
+TEST(SvcRateAllocatorTest, UsesScalabilityModeToGetNumberOfLayers) {
+ VideoCodec codec;
+ codec.width = 640;
+ codec.height = 360;
+ codec.codecType = kVideoCodecAV1;
+ codec.SetScalabilityMode(ScalabilityMode::kL2T2);
+ codec.spatialLayers[0].active = true;
+ codec.spatialLayers[0].minBitrate = 30;
+ codec.spatialLayers[0].targetBitrate = 51;
+ codec.spatialLayers[0].maxBitrate = 73;
+ codec.spatialLayers[1].active = true;
+ codec.spatialLayers[1].minBitrate = 49;
+ codec.spatialLayers[1].targetBitrate = 64;
+ codec.spatialLayers[1].maxBitrate = 97;
+ codec.spatialLayers[2].active = true;
+ codec.spatialLayers[2].minBitrate = 193;
+ codec.spatialLayers[2].targetBitrate = 305;
+ codec.spatialLayers[2].maxBitrate = 418;
+
+ SvcRateAllocator allocator(codec);
+ VideoBitrateAllocation allocation =
+ allocator.Allocate(VideoBitrateAllocationParameters(1'000'000, 30));
+
+ // Expect bitrates for 2 temporal layers.
+ EXPECT_TRUE(allocation.HasBitrate(1, /*temporal_index=*/0));
+ EXPECT_TRUE(allocation.HasBitrate(1, /*temporal_index=*/1));
+ EXPECT_FALSE(allocation.HasBitrate(1, /*temporal_index=*/2));
+
+ // expect codec.spatialLayers[2].active is ignored because scability mode uses
+ // just 2 spatial layers.
+ EXPECT_EQ(allocation.GetSpatialLayerSum(2), 0u);
+}
+
+class SvcRateAllocatorTestParametrizedContentType
+ : public ::testing::Test,
+ public ::testing::WithParamInterface<bool> {
+ public:
+ SvcRateAllocatorTestParametrizedContentType()
+ : is_screen_sharing_(GetParam()) {}
+
+ const bool is_screen_sharing_;
+};
+
+TEST_P(SvcRateAllocatorTestParametrizedContentType, MaxBitrate) {
+ VideoCodec codec = Configure(1280, 720, 3, 1, is_screen_sharing_);
+ EXPECT_EQ(SvcRateAllocator::GetMaxBitrate(codec),
+ DataRate::KilobitsPerSec(codec.spatialLayers[0].maxBitrate +
+ codec.spatialLayers[1].maxBitrate +
+ codec.spatialLayers[2].maxBitrate));
+
+ // Deactivate middle layer. This causes deactivation of top layer as well.
+ codec.spatialLayers[1].active = false;
+ EXPECT_EQ(SvcRateAllocator::GetMaxBitrate(codec),
+ DataRate::KilobitsPerSec(codec.spatialLayers[0].maxBitrate));
+}
+
+TEST_P(SvcRateAllocatorTestParametrizedContentType, PaddingBitrate) {
+ VideoCodec codec = Configure(1280, 720, 3, 1, is_screen_sharing_);
+ SvcRateAllocator allocator = SvcRateAllocator(codec);
+
+ DataRate padding_bitrate = SvcRateAllocator::GetPaddingBitrate(codec);
+
+ VideoBitrateAllocation allocation =
+ allocator.Allocate(VideoBitrateAllocationParameters(padding_bitrate, 30));
+ EXPECT_GT(allocation.GetSpatialLayerSum(0), 0UL);
+ EXPECT_GT(allocation.GetSpatialLayerSum(1), 0UL);
+ EXPECT_GT(allocation.GetSpatialLayerSum(2), 0UL);
+
+ // Allocate 90% of padding bitrate. Top layer should be disabled.
+ allocation = allocator.Allocate(
+ VideoBitrateAllocationParameters(9 * padding_bitrate / 10, 30));
+ EXPECT_GT(allocation.GetSpatialLayerSum(0), 0UL);
+ EXPECT_GT(allocation.GetSpatialLayerSum(1), 0UL);
+ EXPECT_EQ(allocation.GetSpatialLayerSum(2), 0UL);
+
+ // Deactivate top layer.
+ codec.spatialLayers[2].active = false;
+
+ padding_bitrate = SvcRateAllocator::GetPaddingBitrate(codec);
+ allocation =
+ allocator.Allocate(VideoBitrateAllocationParameters(padding_bitrate, 30));
+ EXPECT_GT(allocation.GetSpatialLayerSum(0), 0UL);
+ EXPECT_GT(allocation.GetSpatialLayerSum(1), 0UL);
+ EXPECT_EQ(allocation.GetSpatialLayerSum(2), 0UL);
+
+ allocation = allocator.Allocate(
+ VideoBitrateAllocationParameters(9 * padding_bitrate / 10, 30));
+ EXPECT_GT(allocation.GetSpatialLayerSum(0), 0UL);
+ EXPECT_EQ(allocation.GetSpatialLayerSum(1), 0UL);
+ EXPECT_EQ(allocation.GetSpatialLayerSum(2), 0UL);
+
+ // Deactivate all layers.
+ codec.spatialLayers[0].active = false;
+ codec.spatialLayers[1].active = false;
+ codec.spatialLayers[2].active = false;
+
+ padding_bitrate = SvcRateAllocator::GetPaddingBitrate(codec);
+ // No padding expected.
+ EXPECT_EQ(DataRate::Zero(), padding_bitrate);
+}
+
+TEST_P(SvcRateAllocatorTestParametrizedContentType, StableBitrate) {
+ ScopedFieldTrials field_trial(
+ "WebRTC-StableTargetRate/enabled:true,video_hysteresis_factor:1.0,"
+ "screenshare_hysteresis_factor:1.0/");
+
+ const VideoCodec codec = Configure(1280, 720, 3, 1, is_screen_sharing_);
+ const auto start_rates = SvcRateAllocator::GetLayerStartBitrates(codec);
+ const DataRate min_rate_two_layers = start_rates[1];
+ const DataRate min_rate_three_layers = start_rates[2];
+
+ const DataRate max_rate_one_layer =
+ DataRate::KilobitsPerSec(codec.spatialLayers[0].maxBitrate);
+ const DataRate max_rate_two_layers =
+ is_screen_sharing_
+ ? DataRate::KilobitsPerSec(codec.spatialLayers[0].targetBitrate +
+ codec.spatialLayers[1].maxBitrate)
+ : DataRate::KilobitsPerSec(codec.spatialLayers[0].maxBitrate +
+ codec.spatialLayers[1].maxBitrate);
+
+ SvcRateAllocator allocator = SvcRateAllocator(codec);
+
+ // Two layers, stable and target equal.
+ auto allocation = allocator.Allocate(VideoBitrateAllocationParameters(
+ /*total_bitrate=*/min_rate_two_layers,
+ /*stable_bitrate=*/min_rate_two_layers, /*fps=*/30.0));
+ EXPECT_TRUE(allocation.IsSpatialLayerUsed(1));
+ EXPECT_EQ(allocation.get_sum_bps(), min_rate_two_layers.bps());
+
+ // Two layers, stable bitrate too low for two layers.
+ allocation = allocator.Allocate(VideoBitrateAllocationParameters(
+ /*total_bitrate=*/min_rate_two_layers,
+ /*stable_bitrate=*/min_rate_two_layers - DataRate::BitsPerSec(1),
+ /*fps=*/30.0));
+ EXPECT_FALSE(allocation.IsSpatialLayerUsed(1));
+ EXPECT_EQ(DataRate::BitsPerSec(allocation.get_sum_bps()),
+ std::min(min_rate_two_layers - DataRate::BitsPerSec(1),
+ max_rate_one_layer));
+
+ // Three layers, stable and target equal.
+ allocation = allocator.Allocate(VideoBitrateAllocationParameters(
+ /*total_bitrate=*/min_rate_three_layers,
+ /*stable_bitrate=*/min_rate_three_layers, /*fps=*/30.0));
+ EXPECT_TRUE(allocation.IsSpatialLayerUsed(2));
+ EXPECT_EQ(allocation.get_sum_bps(), min_rate_three_layers.bps());
+
+ // Three layers, stable bitrate too low for three layers.
+ allocation = allocator.Allocate(VideoBitrateAllocationParameters(
+ /*total_bitrate=*/min_rate_three_layers,
+ /*stable_bitrate=*/min_rate_three_layers - DataRate::BitsPerSec(1),
+ /*fps=*/30.0));
+ EXPECT_FALSE(allocation.IsSpatialLayerUsed(2));
+ EXPECT_EQ(DataRate::BitsPerSec(allocation.get_sum_bps()),
+ std::min(min_rate_three_layers - DataRate::BitsPerSec(1),
+ max_rate_two_layers));
+}
+
+TEST_P(SvcRateAllocatorTestParametrizedContentType,
+ StableBitrateWithHysteresis) {
+ const VideoCodec codec = Configure(1280, 720, 3, 1, is_screen_sharing_);
+ const auto start_rates = SvcRateAllocator::GetLayerStartBitrates(codec);
+ const DataRate min_rate_single_layer = start_rates[0];
+ const DataRate min_rate_two_layers = start_rates[1];
+ const DataRate min_rate_three_layers = start_rates[2];
+
+ ScopedFieldTrials field_trial(
+ "WebRTC-StableTargetRate/enabled:true,video_hysteresis_factor:1.1,"
+ "screenshare_hysteresis_factor:1.1/");
+ SvcRateAllocator allocator = SvcRateAllocator(codec);
+ // Always use max bitrate as target, verify only stable is used for layer
+ // count selection.
+ const DataRate max_bitrate = allocator.GetMaxBitrate(codec);
+
+ // Start with a single layer.
+ auto allocation = allocator.Allocate(VideoBitrateAllocationParameters(
+ /*total_bitrate=*/max_bitrate,
+ /*stable_bitrate=*/min_rate_single_layer, /*fps=*/30.0));
+ EXPECT_TRUE(allocation.IsSpatialLayerUsed(0));
+ EXPECT_FALSE(allocation.IsSpatialLayerUsed(1));
+ EXPECT_FALSE(allocation.IsSpatialLayerUsed(2));
+
+ // Min bitrate not enough to enable second layer due to 10% hysteresis.
+ allocation = allocator.Allocate(VideoBitrateAllocationParameters(
+ /*total_bitrate=*/max_bitrate,
+ /*stable_bitrate=*/min_rate_two_layers, /*fps=*/30.0));
+ EXPECT_TRUE(allocation.IsSpatialLayerUsed(0));
+ EXPECT_FALSE(allocation.IsSpatialLayerUsed(1));
+ EXPECT_FALSE(allocation.IsSpatialLayerUsed(2));
+
+ // Add hysteresis, second layer should turn on.
+ allocation = allocator.Allocate(VideoBitrateAllocationParameters(
+ /*total_bitrate=*/max_bitrate,
+ /*stable_bitrate=*/min_rate_two_layers * 1.1, /*fps=*/30.0));
+ EXPECT_TRUE(allocation.IsSpatialLayerUsed(0));
+ EXPECT_TRUE(allocation.IsSpatialLayerUsed(1));
+ EXPECT_FALSE(allocation.IsSpatialLayerUsed(2));
+
+ // Remove hysteresis, second layer should stay on.
+ allocation = allocator.Allocate(VideoBitrateAllocationParameters(
+ /*total_bitrate=*/max_bitrate,
+ /*stable_bitrate=*/min_rate_two_layers, /*fps=*/30.0));
+ EXPECT_TRUE(allocation.IsSpatialLayerUsed(0));
+ EXPECT_TRUE(allocation.IsSpatialLayerUsed(1));
+ EXPECT_FALSE(allocation.IsSpatialLayerUsed(2));
+
+ // Going below min for two layers, second layer should turn off again.
+ allocation = allocator.Allocate(VideoBitrateAllocationParameters(
+ /*total_bitrate=*/max_bitrate,
+ /*stable_bitrate=*/min_rate_two_layers - DataRate::BitsPerSec(1),
+ /*fps=*/30.0));
+ EXPECT_TRUE(allocation.IsSpatialLayerUsed(0));
+ EXPECT_FALSE(allocation.IsSpatialLayerUsed(1));
+ EXPECT_FALSE(allocation.IsSpatialLayerUsed(2));
+
+ // Min bitrate not enough to enable third layer due to 10% hysteresis.
+ allocation = allocator.Allocate(VideoBitrateAllocationParameters(
+ /*total_bitrate=*/max_bitrate,
+ /*stable_bitrate=*/min_rate_three_layers, /*fps=*/30.0));
+ EXPECT_TRUE(allocation.IsSpatialLayerUsed(0));
+ EXPECT_TRUE(allocation.IsSpatialLayerUsed(1));
+ EXPECT_FALSE(allocation.IsSpatialLayerUsed(2));
+
+ // Add hysteresis, third layer should turn on.
+ allocation = allocator.Allocate(VideoBitrateAllocationParameters(
+ /*total_bitrate=*/max_bitrate,
+ /*stable_bitrate=*/min_rate_three_layers * 1.1, /*fps=*/30.0));
+ EXPECT_TRUE(allocation.IsSpatialLayerUsed(0));
+ EXPECT_TRUE(allocation.IsSpatialLayerUsed(1));
+ EXPECT_TRUE(allocation.IsSpatialLayerUsed(2));
+
+ // Remove hysteresis, third layer should stay on.
+ allocation = allocator.Allocate(VideoBitrateAllocationParameters(
+ /*total_bitrate=*/max_bitrate,
+ /*stable_bitrate=*/min_rate_three_layers, /*fps=*/30.0));
+ EXPECT_TRUE(allocation.IsSpatialLayerUsed(0));
+ EXPECT_TRUE(allocation.IsSpatialLayerUsed(1));
+ EXPECT_TRUE(allocation.IsSpatialLayerUsed(2));
+
+ // Going below min for three layers, third layer should turn off again.
+ allocation = allocator.Allocate(VideoBitrateAllocationParameters(
+ /*total_bitrate=*/max_bitrate,
+ /*stable_bitrate=*/min_rate_three_layers - DataRate::BitsPerSec(1),
+ /*fps=*/30.0));
+ EXPECT_TRUE(allocation.IsSpatialLayerUsed(0));
+ EXPECT_TRUE(allocation.IsSpatialLayerUsed(1));
+ EXPECT_FALSE(allocation.IsSpatialLayerUsed(2));
+}
+
+INSTANTIATE_TEST_SUITE_P(_,
+ SvcRateAllocatorTestParametrizedContentType,
+ ::testing::Bool());
+
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/test/stream_generator.cc b/third_party/libwebrtc/modules/video_coding/test/stream_generator.cc
new file mode 100644
index 0000000000..98a0cf1cdc
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/test/stream_generator.cc
@@ -0,0 +1,128 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/test/stream_generator.h"
+
+#include <string.h>
+
+#include <list>
+
+#include "modules/video_coding/packet.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+StreamGenerator::StreamGenerator(uint16_t start_seq_num, int64_t current_time)
+ : packets_(), sequence_number_(start_seq_num), start_time_(current_time) {}
+
+void StreamGenerator::Init(uint16_t start_seq_num, int64_t current_time) {
+ packets_.clear();
+ sequence_number_ = start_seq_num;
+ start_time_ = current_time;
+ memset(packet_buffer_, 0, sizeof(packet_buffer_));
+}
+
+void StreamGenerator::GenerateFrame(VideoFrameType type,
+ int num_media_packets,
+ int num_empty_packets,
+ int64_t time_ms) {
+ uint32_t timestamp = 90 * (time_ms - start_time_);
+ for (int i = 0; i < num_media_packets; ++i) {
+ const int packet_size =
+ (kFrameSize + num_media_packets / 2) / num_media_packets;
+ bool marker_bit = (i == num_media_packets - 1);
+ packets_.push_back(GeneratePacket(sequence_number_, timestamp, packet_size,
+ (i == 0), marker_bit, type));
+ ++sequence_number_;
+ }
+ for (int i = 0; i < num_empty_packets; ++i) {
+ packets_.push_back(GeneratePacket(sequence_number_, timestamp, 0, false,
+ false, VideoFrameType::kEmptyFrame));
+ ++sequence_number_;
+ }
+}
+
+VCMPacket StreamGenerator::GeneratePacket(uint16_t sequence_number,
+ uint32_t timestamp,
+ unsigned int size,
+ bool first_packet,
+ bool marker_bit,
+ VideoFrameType type) {
+ EXPECT_LT(size, kMaxPacketSize);
+ VCMPacket packet;
+ packet.seqNum = sequence_number;
+ packet.timestamp = timestamp;
+ packet.video_header.frame_type = type;
+ packet.video_header.is_first_packet_in_frame = first_packet;
+ packet.markerBit = marker_bit;
+ packet.sizeBytes = size;
+ packet.dataPtr = packet_buffer_;
+ if (packet.is_first_packet_in_frame())
+ packet.completeNALU = kNaluStart;
+ else if (packet.markerBit)
+ packet.completeNALU = kNaluEnd;
+ else
+ packet.completeNALU = kNaluIncomplete;
+ return packet;
+}
+
+bool StreamGenerator::PopPacket(VCMPacket* packet, int index) {
+ std::list<VCMPacket>::iterator it = GetPacketIterator(index);
+ if (it == packets_.end())
+ return false;
+ if (packet)
+ *packet = (*it);
+ packets_.erase(it);
+ return true;
+}
+
+bool StreamGenerator::GetPacket(VCMPacket* packet, int index) {
+ std::list<VCMPacket>::iterator it = GetPacketIterator(index);
+ if (it == packets_.end())
+ return false;
+ if (packet)
+ *packet = (*it);
+ return true;
+}
+
+bool StreamGenerator::NextPacket(VCMPacket* packet) {
+ if (packets_.empty())
+ return false;
+ if (packet != NULL)
+ *packet = packets_.front();
+ packets_.pop_front();
+ return true;
+}
+
+void StreamGenerator::DropLastPacket() {
+ packets_.pop_back();
+}
+
+uint16_t StreamGenerator::NextSequenceNumber() const {
+ if (packets_.empty())
+ return sequence_number_;
+ return packets_.front().seqNum;
+}
+
+int StreamGenerator::PacketsRemaining() const {
+ return packets_.size();
+}
+
+std::list<VCMPacket>::iterator StreamGenerator::GetPacketIterator(int index) {
+ std::list<VCMPacket>::iterator it = packets_.begin();
+ for (int i = 0; i < index; ++i) {
+ ++it;
+ if (it == packets_.end())
+ break;
+ }
+ return it;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/test/stream_generator.h b/third_party/libwebrtc/modules/video_coding/test/stream_generator.h
new file mode 100644
index 0000000000..ddb23ebb76
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/test/stream_generator.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#ifndef MODULES_VIDEO_CODING_TEST_STREAM_GENERATOR_H_
+#define MODULES_VIDEO_CODING_TEST_STREAM_GENERATOR_H_
+
+#include <stdint.h>
+
+#include <list>
+
+#include "modules/video_coding/packet.h"
+
+namespace webrtc {
+
+const unsigned int kDefaultBitrateKbps = 1000;
+const unsigned int kDefaultFrameRate = 25;
+const unsigned int kMaxPacketSize = 1500;
+const unsigned int kFrameSize =
+ (kDefaultBitrateKbps + kDefaultFrameRate * 4) / (kDefaultFrameRate * 8);
+const int kDefaultFramePeriodMs = 1000 / kDefaultFrameRate;
+
+class StreamGenerator {
+ public:
+ StreamGenerator(uint16_t start_seq_num, int64_t current_time);
+
+ StreamGenerator(const StreamGenerator&) = delete;
+ StreamGenerator& operator=(const StreamGenerator&) = delete;
+
+ void Init(uint16_t start_seq_num, int64_t current_time);
+
+ // `time_ms` denotes the timestamp you want to put on the frame, and the unit
+ // is millisecond. GenerateFrame will translate `time_ms` into a 90kHz
+ // timestamp and put it on the frame.
+ void GenerateFrame(VideoFrameType type,
+ int num_media_packets,
+ int num_empty_packets,
+ int64_t time_ms);
+
+ bool PopPacket(VCMPacket* packet, int index);
+ void DropLastPacket();
+
+ bool GetPacket(VCMPacket* packet, int index);
+
+ bool NextPacket(VCMPacket* packet);
+
+ uint16_t NextSequenceNumber() const;
+
+ int PacketsRemaining() const;
+
+ private:
+ VCMPacket GeneratePacket(uint16_t sequence_number,
+ uint32_t timestamp,
+ unsigned int size,
+ bool first_packet,
+ bool marker_bit,
+ VideoFrameType type);
+
+ std::list<VCMPacket>::iterator GetPacketIterator(int index);
+
+ std::list<VCMPacket> packets_;
+ uint16_t sequence_number_;
+ int64_t start_time_;
+ uint8_t packet_buffer_[kMaxPacketSize];
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_TEST_STREAM_GENERATOR_H_
diff --git a/third_party/libwebrtc/modules/video_coding/timing/BUILD.gn b/third_party/libwebrtc/modules/video_coding/timing/BUILD.gn
new file mode 100644
index 0000000000..38348e6967
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/timing/BUILD.gn
@@ -0,0 +1,153 @@
+# Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+import("../../../webrtc.gni")
+
+rtc_library("codec_timer") {
+ sources = [
+ "codec_timer.cc",
+ "codec_timer.h",
+ ]
+ deps = [ "../../../rtc_base:rtc_numerics" ]
+}
+
+rtc_library("inter_frame_delay") {
+ sources = [
+ "inter_frame_delay.cc",
+ "inter_frame_delay.h",
+ ]
+ deps = [
+ "../..:module_api_public",
+ "../../../api/units:frequency",
+ "../../../api/units:time_delta",
+ "../../../api/units:timestamp",
+ "../../../rtc_base:rtc_numerics",
+ ]
+ absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ]
+}
+
+rtc_library("frame_delay_variation_kalman_filter") {
+ sources = [
+ "frame_delay_variation_kalman_filter.cc",
+ "frame_delay_variation_kalman_filter.h",
+ ]
+ deps = [
+ "../../../api/units:data_size",
+ "../../../api/units:time_delta",
+ ]
+ visibility = [
+ ":jitter_estimator",
+ ":timing_unittests",
+ ]
+}
+
+rtc_library("jitter_estimator") {
+ sources = [
+ "jitter_estimator.cc",
+ "jitter_estimator.h",
+ ]
+ deps = [
+ ":frame_delay_variation_kalman_filter",
+ ":rtt_filter",
+ "../../../api:field_trials_view",
+ "../../../api/units:data_size",
+ "../../../api/units:frequency",
+ "../../../api/units:time_delta",
+ "../../../api/units:timestamp",
+ "../../../rtc_base:checks",
+ "../../../rtc_base:logging",
+ "../../../rtc_base:rolling_accumulator",
+ "../../../rtc_base:rtc_numerics",
+ "../../../rtc_base:safe_conversions",
+ "../../../rtc_base/experiments:field_trial_parser",
+ "../../../system_wrappers",
+ ]
+ absl_deps = [
+ "//third_party/abseil-cpp/absl/strings",
+ "//third_party/abseil-cpp/absl/types:optional",
+ ]
+}
+
+rtc_library("rtt_filter") {
+ sources = [
+ "rtt_filter.cc",
+ "rtt_filter.h",
+ ]
+ deps = [ "../../../api/units:time_delta" ]
+ absl_deps = [
+ "//third_party/abseil-cpp/absl/algorithm:container",
+ "//third_party/abseil-cpp/absl/container:inlined_vector",
+ ]
+}
+
+rtc_library("timestamp_extrapolator") {
+ sources = [
+ "timestamp_extrapolator.cc",
+ "timestamp_extrapolator.h",
+ ]
+ deps = [
+ "../../../api/units:timestamp",
+ "../../../modules:module_api_public",
+ "../../../rtc_base:rtc_numerics",
+ ]
+ absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ]
+}
+
+rtc_library("timing_module") {
+ sources = [
+ "timing.cc",
+ "timing.h",
+ ]
+ deps = [
+ ":codec_timer",
+ ":timestamp_extrapolator",
+ "../../../api:field_trials_view",
+ "../../../api/units:time_delta",
+ "../../../api/video:video_frame",
+ "../../../api/video:video_rtp_headers",
+ "../../../rtc_base:logging",
+ "../../../rtc_base:macromagic",
+ "../../../rtc_base:rtc_numerics",
+ "../../../rtc_base/experiments:field_trial_parser",
+ "../../../rtc_base/synchronization:mutex",
+ "../../../system_wrappers",
+ ]
+ absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ]
+}
+
+rtc_library("timing_unittests") {
+ testonly = true
+ sources = [
+ "frame_delay_variation_kalman_filter_unittest.cc",
+ "inter_frame_delay_unittest.cc",
+ "jitter_estimator_unittest.cc",
+ "rtt_filter_unittest.cc",
+ "timestamp_extrapolator_unittest.cc",
+ "timing_unittest.cc",
+ ]
+ deps = [
+ ":frame_delay_variation_kalman_filter",
+ ":inter_frame_delay",
+ ":jitter_estimator",
+ ":rtt_filter",
+ ":timestamp_extrapolator",
+ ":timing_module",
+ "../../../api:array_view",
+ "../../../api:field_trials",
+ "../../../api/units:data_size",
+ "../../../api/units:frequency",
+ "../../../api/units:time_delta",
+ "../../../api/units:timestamp",
+ "../../../rtc_base:histogram_percentile_counter",
+ "../../../rtc_base:timeutils",
+ "../../../system_wrappers:system_wrappers",
+ "../../../test:scoped_key_value_config",
+ "../../../test:test_support",
+ ]
+ absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ]
+}
diff --git a/third_party/libwebrtc/modules/video_coding/timing/codec_timer.cc b/third_party/libwebrtc/modules/video_coding/timing/codec_timer.cc
new file mode 100644
index 0000000000..f57d42d40a
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/timing/codec_timer.cc
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/timing/codec_timer.h"
+
+#include <cstdint>
+
+namespace webrtc {
+
+namespace {
+
+// The first kIgnoredSampleCount samples will be ignored.
+const int kIgnoredSampleCount = 5;
+// Return the `kPercentile` value in RequiredDecodeTimeMs().
+const float kPercentile = 0.95f;
+// The window size in ms.
+const int64_t kTimeLimitMs = 10000;
+
+} // anonymous namespace
+
+CodecTimer::CodecTimer() : ignored_sample_count_(0), filter_(kPercentile) {}
+CodecTimer::~CodecTimer() = default;
+
+void CodecTimer::AddTiming(int64_t decode_time_ms, int64_t now_ms) {
+ // Ignore the first `kIgnoredSampleCount` samples.
+ if (ignored_sample_count_ < kIgnoredSampleCount) {
+ ++ignored_sample_count_;
+ return;
+ }
+
+ // Insert new decode time value.
+ filter_.Insert(decode_time_ms);
+ history_.emplace(decode_time_ms, now_ms);
+
+ // Pop old decode time values.
+ while (!history_.empty() &&
+ now_ms - history_.front().sample_time_ms > kTimeLimitMs) {
+ filter_.Erase(history_.front().decode_time_ms);
+ history_.pop();
+ }
+}
+
+// Get the 95th percentile observed decode time within a time window.
+int64_t CodecTimer::RequiredDecodeTimeMs() const {
+ return filter_.GetPercentileValue();
+}
+
+CodecTimer::Sample::Sample(int64_t decode_time_ms, int64_t sample_time_ms)
+ : decode_time_ms(decode_time_ms), sample_time_ms(sample_time_ms) {}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/timing/codec_timer.h b/third_party/libwebrtc/modules/video_coding/timing/codec_timer.h
new file mode 100644
index 0000000000..9f12d82e98
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/timing/codec_timer.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_TIMING_CODEC_TIMER_H_
+#define MODULES_VIDEO_CODING_TIMING_CODEC_TIMER_H_
+
+#include <queue>
+
+#include "rtc_base/numerics/percentile_filter.h"
+
+namespace webrtc {
+
+class CodecTimer {
+ public:
+ CodecTimer();
+ ~CodecTimer();
+
+ // Add a new decode time to the filter.
+ void AddTiming(int64_t new_decode_time_ms, int64_t now_ms);
+
+ // Get the required decode time in ms. It is the 95th percentile observed
+ // decode time within a time window.
+ int64_t RequiredDecodeTimeMs() const;
+
+ private:
+ struct Sample {
+ Sample(int64_t decode_time_ms, int64_t sample_time_ms);
+ int64_t decode_time_ms;
+ int64_t sample_time_ms;
+ };
+
+ // The number of samples ignored so far.
+ int ignored_sample_count_;
+ // Queue with history of latest decode time values.
+ std::queue<Sample> history_;
+ // `filter_` contains the same values as `history_`, but in a data structure
+ // that allows efficient retrieval of the percentile value.
+ PercentileFilter<int64_t> filter_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_TIMING_CODEC_TIMER_H_
diff --git a/third_party/libwebrtc/modules/video_coding/timing/codec_timer_gn/moz.build b/third_party/libwebrtc/modules/video_coding/timing/codec_timer_gn/moz.build
new file mode 100644
index 0000000000..59498feb91
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/timing/codec_timer_gn/moz.build
@@ -0,0 +1,221 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+DEFINES["WEBRTC_STRICT_FIELD_TRIALS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "!/third_party/libwebrtc/gen",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/video_coding/timing/codec_timer.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "mips32":
+
+ DEFINES["MIPS32_LE"] = True
+ DEFINES["MIPS_FPU_LE"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "mips64":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("codec_timer_gn")
diff --git a/third_party/libwebrtc/modules/video_coding/timing/frame_delay_variation_kalman_filter.cc b/third_party/libwebrtc/modules/video_coding/timing/frame_delay_variation_kalman_filter.cc
new file mode 100644
index 0000000000..ec6aa3445a
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/timing/frame_delay_variation_kalman_filter.cc
@@ -0,0 +1,148 @@
+/*
+ * Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/timing/frame_delay_variation_kalman_filter.h"
+
+#include "api/units/data_size.h"
+#include "api/units/time_delta.h"
+
+namespace webrtc {
+
+namespace {
+// TODO(brandtr): The value below corresponds to 8 Gbps. Is that reasonable?
+constexpr double kMaxBandwidth = 0.000001; // Unit: [1 / bytes per ms].
+} // namespace
+
+FrameDelayVariationKalmanFilter::FrameDelayVariationKalmanFilter() {
+ // TODO(brandtr): Is there a factor 1000 missing here?
+ estimate_[0] = 1 / (512e3 / 8); // Unit: [1 / bytes per ms]
+ estimate_[1] = 0; // Unit: [ms]
+
+ // Initial estimate covariance.
+ estimate_cov_[0][0] = 1e-4; // Unit: [(1 / bytes per ms)^2]
+ estimate_cov_[1][1] = 1e2; // Unit: [ms^2]
+ estimate_cov_[0][1] = estimate_cov_[1][0] = 0;
+
+ // Process noise covariance.
+ process_noise_cov_diag_[0] = 2.5e-10; // Unit: [(1 / bytes per ms)^2]
+ process_noise_cov_diag_[1] = 1e-10; // Unit: [ms^2]
+}
+
+void FrameDelayVariationKalmanFilter::PredictAndUpdate(
+ double frame_delay_variation_ms,
+ double frame_size_variation_bytes,
+ double max_frame_size_bytes,
+ double var_noise) {
+ // Sanity checks.
+ if (max_frame_size_bytes < 1) {
+ return;
+ }
+ if (var_noise <= 0.0) {
+ return;
+ }
+
+ // This member function follows the data flow in
+ // https://en.wikipedia.org/wiki/Kalman_filter#Details.
+
+ // 1) Estimate prediction: `x = F*x`.
+ // For this model, there is no need to explicitly predict the estimate, since
+ // the state transition matrix is the identity.
+
+ // 2) Estimate covariance prediction: `P = F*P*F' + Q`.
+ // Again, since the state transition matrix is the identity, this update
+ // is performed by simply adding the process noise covariance.
+ estimate_cov_[0][0] += process_noise_cov_diag_[0];
+ estimate_cov_[1][1] += process_noise_cov_diag_[1];
+
+ // 3) Innovation: `y = z - H*x`.
+ // This is the part of the measurement that cannot be explained by the current
+ // estimate.
+ double innovation =
+ frame_delay_variation_ms -
+ GetFrameDelayVariationEstimateTotal(frame_size_variation_bytes);
+
+ // 4) Innovation variance: `s = H*P*H' + r`.
+ double estim_cov_times_obs[2];
+ estim_cov_times_obs[0] =
+ estimate_cov_[0][0] * frame_size_variation_bytes + estimate_cov_[0][1];
+ estim_cov_times_obs[1] =
+ estimate_cov_[1][0] * frame_size_variation_bytes + estimate_cov_[1][1];
+ double observation_noise_stddev =
+ (300.0 * exp(-fabs(frame_size_variation_bytes) /
+ (1e0 * max_frame_size_bytes)) +
+ 1) *
+ sqrt(var_noise);
+ if (observation_noise_stddev < 1.0) {
+ observation_noise_stddev = 1.0;
+ }
+ // TODO(brandtr): Shouldn't we add observation_noise_stddev^2 here? Otherwise,
+ // the dimensional analysis fails.
+ double innovation_var = frame_size_variation_bytes * estim_cov_times_obs[0] +
+ estim_cov_times_obs[1] + observation_noise_stddev;
+ if ((innovation_var < 1e-9 && innovation_var >= 0) ||
+ (innovation_var > -1e-9 && innovation_var <= 0)) {
+ RTC_DCHECK_NOTREACHED();
+ return;
+ }
+
+ // 5) Optimal Kalman gain: `K = P*H'/s`.
+ // How much to trust the model vs. how much to trust the measurement.
+ double kalman_gain[2];
+ kalman_gain[0] = estim_cov_times_obs[0] / innovation_var;
+ kalman_gain[1] = estim_cov_times_obs[1] / innovation_var;
+
+ // 6) Estimate update: `x = x + K*y`.
+ // Optimally weight the new information in the innovation and add it to the
+ // old estimate.
+ estimate_[0] += kalman_gain[0] * innovation;
+ estimate_[1] += kalman_gain[1] * innovation;
+
+ // (This clamping is not part of the linear Kalman filter.)
+ if (estimate_[0] < kMaxBandwidth) {
+ estimate_[0] = kMaxBandwidth;
+ }
+
+ // 7) Estimate covariance update: `P = (I - K*H)*P`
+ double t00 = estimate_cov_[0][0];
+ double t01 = estimate_cov_[0][1];
+ estimate_cov_[0][0] =
+ (1 - kalman_gain[0] * frame_size_variation_bytes) * t00 -
+ kalman_gain[0] * estimate_cov_[1][0];
+ estimate_cov_[0][1] =
+ (1 - kalman_gain[0] * frame_size_variation_bytes) * t01 -
+ kalman_gain[0] * estimate_cov_[1][1];
+ estimate_cov_[1][0] = estimate_cov_[1][0] * (1 - kalman_gain[1]) -
+ kalman_gain[1] * frame_size_variation_bytes * t00;
+ estimate_cov_[1][1] = estimate_cov_[1][1] * (1 - kalman_gain[1]) -
+ kalman_gain[1] * frame_size_variation_bytes * t01;
+
+ // Covariance matrix, must be positive semi-definite.
+ RTC_DCHECK(estimate_cov_[0][0] + estimate_cov_[1][1] >= 0 &&
+ estimate_cov_[0][0] * estimate_cov_[1][1] -
+ estimate_cov_[0][1] * estimate_cov_[1][0] >=
+ 0 &&
+ estimate_cov_[0][0] >= 0);
+}
+
+double FrameDelayVariationKalmanFilter::GetFrameDelayVariationEstimateSizeBased(
+ double frame_size_variation_bytes) const {
+ // Unit: [1 / bytes per millisecond] * [bytes] = [milliseconds].
+ return estimate_[0] * frame_size_variation_bytes;
+}
+
+double FrameDelayVariationKalmanFilter::GetFrameDelayVariationEstimateTotal(
+ double frame_size_variation_bytes) const {
+ double frame_transmission_delay_ms =
+ GetFrameDelayVariationEstimateSizeBased(frame_size_variation_bytes);
+ double link_queuing_delay_ms = estimate_[1];
+ return frame_transmission_delay_ms + link_queuing_delay_ms;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/timing/frame_delay_variation_kalman_filter.h b/third_party/libwebrtc/modules/video_coding/timing/frame_delay_variation_kalman_filter.h
new file mode 100644
index 0000000000..a65ceefa10
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/timing/frame_delay_variation_kalman_filter.h
@@ -0,0 +1,106 @@
+/*
+ * Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_TIMING_FRAME_DELAY_VARIATION_KALMAN_FILTER_H_
+#define MODULES_VIDEO_CODING_TIMING_FRAME_DELAY_VARIATION_KALMAN_FILTER_H_
+
+#include "api/units/data_size.h"
+#include "api/units/time_delta.h"
+
+namespace webrtc {
+
+// This class uses a linear Kalman filter (see
+// https://en.wikipedia.org/wiki/Kalman_filter) to estimate the frame delay
+// variation (i.e., the difference in transmission time between a frame and the
+// prior frame) for a frame, given its size variation in bytes (i.e., the
+// difference in size between a frame and the prior frame). The idea is that,
+// given a fixed link bandwidth, a larger frame (in bytes) would take
+// proportionally longer to arrive than a correspondingly smaller frame. Using
+// the variations of frame delay and frame size, the underlying bandwidth and
+// queuing delay variation of the network link can be estimated.
+//
+// The filter takes as input the frame delay variation, the difference between
+// the actual inter-frame arrival time and the expected inter-frame arrival time
+// (based on RTP timestamp), and frame size variation, the inter-frame size
+// delta for a single frame. The frame delay variation is seen as the
+// measurement and the frame size variation is used in the observation model.
+// The hidden state of the filter is the link bandwidth and queuing delay
+// buildup. The estimated state can be used to get the expected frame delay
+// variation for a frame, given its frame size variation. This information can
+// then be used to estimate the frame delay variation coming from network
+// jitter.
+//
+// Mathematical details:
+// * The state (`x` in Wikipedia notation) is a 2x1 vector comprising the
+// reciprocal of link bandwidth [1 / bytes per ms] and the
+// link queuing delay buildup [ms].
+// * The state transition matrix (`F`) is the 2x2 identity matrix, meaning that
+// link bandwidth and link queuing delay buildup are modeled as independent.
+// * The measurement (`z`) is the (scalar) frame delay variation [ms].
+// * The observation matrix (`H`) is a 1x2 vector set as
+// `{frame_size_variation [bytes], 1.0}`.
+// * The state estimate covariance (`P`) is a symmetric 2x2 matrix.
+// * The process noise covariance (`Q`) is a constant 2x2 diagonal matrix
+// [(1 / bytes per ms)^2, ms^2].
+// * The observation noise covariance (`r`) is a scalar [ms^2] that is
+// determined externally to this class.
+class FrameDelayVariationKalmanFilter {
+ public:
+ FrameDelayVariationKalmanFilter();
+ ~FrameDelayVariationKalmanFilter() = default;
+
+ // Predicts and updates the filter, given a new pair of frame delay variation
+ // and frame size variation.
+ //
+ // Inputs:
+ // `frame_delay_variation_ms`:
+ // Frame delay variation as calculated by the `InterFrameDelay` estimator.
+ //
+ // `frame_size_variation_bytes`:
+ // Frame size variation, i.e., the current frame size minus the previous
+ // frame size (in bytes). Note that this quantity may be negative.
+ //
+ // `max_frame_size_bytes`:
+ // Filtered largest frame size received since the last reset.
+ //
+ // `var_noise`:
+ // Variance of the estimated random jitter.
+ //
+ // TODO(bugs.webrtc.org/14381): For now use doubles as input parameters as
+ // units defined in api/units have insufficient underlying precision for
+ // jitter estimation.
+ void PredictAndUpdate(double frame_delay_variation_ms,
+ double frame_size_variation_bytes,
+ double max_frame_size_bytes,
+ double var_noise);
+
+ // Given a frame size variation, returns the estimated frame delay variation
+ // explained by the link bandwidth alone.
+ double GetFrameDelayVariationEstimateSizeBased(
+ double frame_size_variation_bytes) const;
+
+ // Given a frame size variation, returns the estimated frame delay variation
+ // explained by both link bandwidth and link queuing delay buildup.
+ double GetFrameDelayVariationEstimateTotal(
+ double frame_size_variation_bytes) const;
+
+ private:
+ // State estimate (bandwidth [1 / bytes per ms], queue buildup [ms]).
+ double estimate_[2];
+ double estimate_cov_[2][2]; // Estimate covariance.
+
+ // Process noise covariance. This is a diagonal matrix, so we only store the
+ // diagonal entries.
+ double process_noise_cov_diag_[2];
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_TIMING_FRAME_DELAY_VARIATION_KALMAN_FILTER_H_
diff --git a/third_party/libwebrtc/modules/video_coding/timing/frame_delay_variation_kalman_filter_gn/moz.build b/third_party/libwebrtc/modules/video_coding/timing/frame_delay_variation_kalman_filter_gn/moz.build
new file mode 100644
index 0000000000..ecdcfc93e1
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/timing/frame_delay_variation_kalman_filter_gn/moz.build
@@ -0,0 +1,221 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+DEFINES["WEBRTC_STRICT_FIELD_TRIALS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "!/third_party/libwebrtc/gen",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/video_coding/timing/frame_delay_variation_kalman_filter.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "mips32":
+
+ DEFINES["MIPS32_LE"] = True
+ DEFINES["MIPS_FPU_LE"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "mips64":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("frame_delay_variation_kalman_filter_gn")
diff --git a/third_party/libwebrtc/modules/video_coding/timing/frame_delay_variation_kalman_filter_unittest.cc b/third_party/libwebrtc/modules/video_coding/timing/frame_delay_variation_kalman_filter_unittest.cc
new file mode 100644
index 0000000000..6103f3a1bc
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/timing/frame_delay_variation_kalman_filter_unittest.cc
@@ -0,0 +1,115 @@
+/*
+ * Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/timing/frame_delay_variation_kalman_filter.h"
+
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+
+// This test verifies that the initial filter state (link bandwidth, link
+// propagation delay) is such that a frame of size zero would take no time to
+// propagate.
+TEST(FrameDelayVariationKalmanFilterTest,
+ InitializedFilterWithZeroSizeFrameTakesNoTimeToPropagate) {
+ FrameDelayVariationKalmanFilter filter;
+
+ // A zero-sized frame...
+ double frame_size_variation_bytes = 0.0;
+
+ // ...should take no time to propagate due to it's size...
+ EXPECT_EQ(filter.GetFrameDelayVariationEstimateSizeBased(
+ frame_size_variation_bytes),
+ 0.0);
+
+ // ...and no time due to the initial link propagation delay being zero.
+ EXPECT_EQ(
+ filter.GetFrameDelayVariationEstimateTotal(frame_size_variation_bytes),
+ 0.0);
+}
+
+// TODO(brandtr): Look into if there is a factor 1000 missing here? It seems
+// unreasonable to have an initial link bandwidth of 512 _mega_bits per second?
+TEST(FrameDelayVariationKalmanFilterTest,
+ InitializedFilterWithSmallSizeFrameTakesFixedTimeToPropagate) {
+ FrameDelayVariationKalmanFilter filter;
+
+ // A 1000-byte frame...
+ double frame_size_variation_bytes = 1000.0;
+ // ...should take around `1000.0 / (512e3 / 8.0) = 0.015625 ms` to transmit.
+ double expected_frame_delay_variation_estimate_ms = 1000.0 / (512e3 / 8.0);
+
+ EXPECT_EQ(filter.GetFrameDelayVariationEstimateSizeBased(
+ frame_size_variation_bytes),
+ expected_frame_delay_variation_estimate_ms);
+ EXPECT_EQ(
+ filter.GetFrameDelayVariationEstimateTotal(frame_size_variation_bytes),
+ expected_frame_delay_variation_estimate_ms);
+}
+
+TEST(FrameDelayVariationKalmanFilterTest,
+ NegativeNoiseVarianceDoesNotUpdateFilter) {
+ FrameDelayVariationKalmanFilter filter;
+
+ // Negative variance...
+ double var_noise = -0.1;
+ filter.PredictAndUpdate(/*frame_delay_variation_ms=*/3,
+ /*frame_size_variation_bytes=*/200.0,
+ /*max_frame_size_bytes=*/2000, var_noise);
+
+ // ...does _not_ update the filter.
+ EXPECT_EQ(filter.GetFrameDelayVariationEstimateTotal(
+ /*frame_size_variation_bytes=*/0.0),
+ 0.0);
+
+ // Positive variance...
+ var_noise = 0.1;
+ filter.PredictAndUpdate(/*frame_delay_variation_ms=*/3,
+ /*frame_size_variation_bytes=*/200.0,
+ /*max_frame_size_bytes=*/2000, var_noise);
+
+ // ...does update the filter.
+ EXPECT_GT(filter.GetFrameDelayVariationEstimateTotal(
+ /*frame_size_variation_bytes=*/0.0),
+ 0.0);
+}
+
+TEST(FrameDelayVariationKalmanFilterTest,
+ VerifyConvergenceWithAlternatingDeviations) {
+ FrameDelayVariationKalmanFilter filter;
+
+ // One frame every 33 ms.
+ int framerate_fps = 30;
+ // Let's assume approximately 10% delay variation.
+ double frame_delay_variation_ms = 3;
+ // With a bitrate of 512 kbps, each frame will be around 2000 bytes.
+ double max_frame_size_bytes = 2000;
+ // And again, let's assume 10% size deviation.
+ double frame_size_variation_bytes = 200;
+ double var_noise = 0.1;
+ int test_duration_s = 60;
+
+ for (int i = 0; i < test_duration_s * framerate_fps; ++i) {
+ // For simplicity, assume alternating variations.
+ double sign = (i % 2 == 0) ? 1.0 : -1.0;
+ filter.PredictAndUpdate(sign * frame_delay_variation_ms,
+ sign * frame_size_variation_bytes,
+ max_frame_size_bytes, var_noise);
+ }
+
+ // Verify that the filter has converged within a margin of 0.1 ms.
+ EXPECT_NEAR(
+ filter.GetFrameDelayVariationEstimateTotal(frame_size_variation_bytes),
+ frame_delay_variation_ms, 0.1);
+}
+
+} // namespace
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/timing/inter_frame_delay.cc b/third_party/libwebrtc/modules/video_coding/timing/inter_frame_delay.cc
new file mode 100644
index 0000000000..bed9f875ee
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/timing/inter_frame_delay.cc
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/timing/inter_frame_delay.h"
+
+#include "absl/types/optional.h"
+#include "api/units/frequency.h"
+#include "api/units/time_delta.h"
+#include "modules/include/module_common_types_public.h"
+
+namespace webrtc {
+
+namespace {
+constexpr Frequency k90kHz = Frequency::KiloHertz(90);
+}
+
+InterFrameDelay::InterFrameDelay() {
+ Reset();
+}
+
+// Resets the delay estimate.
+void InterFrameDelay::Reset() {
+ prev_wall_clock_ = absl::nullopt;
+ prev_rtp_timestamp_unwrapped_ = 0;
+}
+
+// Calculates the delay of a frame with the given timestamp.
+// This method is called when the frame is complete.
+absl::optional<TimeDelta> InterFrameDelay::CalculateDelay(
+ uint32_t rtp_timestamp,
+ Timestamp now) {
+ int64_t rtp_timestamp_unwrapped = unwrapper_.Unwrap(rtp_timestamp);
+ if (!prev_wall_clock_) {
+ // First set of data, initialization, wait for next frame.
+ prev_wall_clock_ = now;
+ prev_rtp_timestamp_unwrapped_ = rtp_timestamp_unwrapped;
+ return TimeDelta::Zero();
+ }
+
+ // Account for reordering in jitter variance estimate in the future?
+ // Note that this also captures incomplete frames which are grabbed for
+ // decoding after a later frame has been complete, i.e. real packet losses.
+ uint32_t cropped_last = static_cast<uint32_t>(prev_rtp_timestamp_unwrapped_);
+ if (rtp_timestamp_unwrapped < prev_rtp_timestamp_unwrapped_ ||
+ !IsNewerTimestamp(rtp_timestamp, cropped_last)) {
+ return absl::nullopt;
+ }
+
+ // Compute the compensated timestamp difference.
+ int64_t d_rtp_ticks = rtp_timestamp_unwrapped - prev_rtp_timestamp_unwrapped_;
+ TimeDelta dts = d_rtp_ticks / k90kHz;
+ TimeDelta dt = now - *prev_wall_clock_;
+
+ // frameDelay is the difference of dT and dTS -- i.e. the difference of the
+ // wall clock time difference and the timestamp difference between two
+ // following frames.
+ TimeDelta delay = dt - dts;
+
+ prev_rtp_timestamp_unwrapped_ = rtp_timestamp_unwrapped;
+ prev_wall_clock_ = now;
+ return delay;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/timing/inter_frame_delay.h b/third_party/libwebrtc/modules/video_coding/timing/inter_frame_delay.h
new file mode 100644
index 0000000000..03b5f78cc1
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/timing/inter_frame_delay.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_TIMING_INTER_FRAME_DELAY_H_
+#define MODULES_VIDEO_CODING_TIMING_INTER_FRAME_DELAY_H_
+
+#include <stdint.h>
+
+#include "absl/types/optional.h"
+#include "api/units/time_delta.h"
+#include "api/units/timestamp.h"
+#include "rtc_base/numerics/sequence_number_unwrapper.h"
+
+namespace webrtc {
+
+class InterFrameDelay {
+ public:
+ InterFrameDelay();
+
+ // Resets the estimate. Zeros are given as parameters.
+ void Reset();
+
+ // Calculates the delay of a frame with the given timestamp.
+ // This method is called when the frame is complete.
+ absl::optional<TimeDelta> CalculateDelay(uint32_t rtp_timestamp,
+ Timestamp now);
+
+ private:
+ // The previous rtp timestamp passed to the delay estimate
+ int64_t prev_rtp_timestamp_unwrapped_;
+ RtpTimestampUnwrapper unwrapper_;
+
+ // The previous wall clock timestamp used by the delay estimate
+ absl::optional<Timestamp> prev_wall_clock_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_TIMING_INTER_FRAME_DELAY_H_
diff --git a/third_party/libwebrtc/modules/video_coding/timing/inter_frame_delay_gn/moz.build b/third_party/libwebrtc/modules/video_coding/timing/inter_frame_delay_gn/moz.build
new file mode 100644
index 0000000000..765d42aade
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/timing/inter_frame_delay_gn/moz.build
@@ -0,0 +1,221 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+DEFINES["WEBRTC_STRICT_FIELD_TRIALS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "!/third_party/libwebrtc/gen",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/video_coding/timing/inter_frame_delay.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "mips32":
+
+ DEFINES["MIPS32_LE"] = True
+ DEFINES["MIPS_FPU_LE"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "mips64":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("inter_frame_delay_gn")
diff --git a/third_party/libwebrtc/modules/video_coding/timing/inter_frame_delay_unittest.cc b/third_party/libwebrtc/modules/video_coding/timing/inter_frame_delay_unittest.cc
new file mode 100644
index 0000000000..183b378ced
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/timing/inter_frame_delay_unittest.cc
@@ -0,0 +1,190 @@
+/*
+ * Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/timing/inter_frame_delay.h"
+
+#include <limits>
+
+#include "absl/types/optional.h"
+#include "api/units/frequency.h"
+#include "api/units/time_delta.h"
+#include "api/units/timestamp.h"
+#include "system_wrappers/include/clock.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+namespace {
+
+// Test is for frames at 30fps. At 30fps, RTP timestamps will increase by
+// 90000 / 30 = 3000 ticks per frame.
+constexpr Frequency k30Fps = Frequency::Hertz(30);
+constexpr TimeDelta kFrameDelay = 1 / k30Fps;
+constexpr uint32_t kRtpTicksPerFrame = Frequency::KiloHertz(90) / k30Fps;
+constexpr Timestamp kStartTime = Timestamp::Millis(1337);
+
+} // namespace
+
+using ::testing::Eq;
+using ::testing::Optional;
+
+TEST(InterFrameDelayTest, OldRtpTimestamp) {
+ InterFrameDelay inter_frame_delay;
+ EXPECT_THAT(inter_frame_delay.CalculateDelay(180000, kStartTime),
+ Optional(TimeDelta::Zero()));
+ EXPECT_THAT(inter_frame_delay.CalculateDelay(90000, kStartTime),
+ Eq(absl::nullopt));
+}
+
+TEST(InterFrameDelayTest, NegativeWrapAroundIsSameAsOldRtpTimestamp) {
+ InterFrameDelay inter_frame_delay;
+ uint32_t rtp = 1500;
+ EXPECT_THAT(inter_frame_delay.CalculateDelay(rtp, kStartTime),
+ Optional(TimeDelta::Zero()));
+ // RTP has wrapped around backwards.
+ rtp -= 3000;
+ EXPECT_THAT(inter_frame_delay.CalculateDelay(rtp, kStartTime),
+ Eq(absl::nullopt));
+}
+
+TEST(InterFrameDelayTest, CorrectDelayForFrames) {
+ InterFrameDelay inter_frame_delay;
+ // Use a fake clock to simplify time keeping.
+ SimulatedClock clock(kStartTime);
+
+ // First frame is always delay 0.
+ uint32_t rtp = 90000;
+ EXPECT_THAT(inter_frame_delay.CalculateDelay(rtp, clock.CurrentTime()),
+ Optional(TimeDelta::Zero()));
+
+ // Perfectly timed frame has 0 delay.
+ clock.AdvanceTime(kFrameDelay);
+ rtp += kRtpTicksPerFrame;
+ EXPECT_THAT(inter_frame_delay.CalculateDelay(rtp, clock.CurrentTime()),
+ Optional(TimeDelta::Zero()));
+
+ // Slightly early frame will have a negative delay.
+ clock.AdvanceTime(kFrameDelay - TimeDelta::Millis(3));
+ rtp += kRtpTicksPerFrame;
+ EXPECT_THAT(inter_frame_delay.CalculateDelay(rtp, clock.CurrentTime()),
+ Optional(-TimeDelta::Millis(3)));
+
+ // Slightly late frame will have positive delay.
+ clock.AdvanceTime(kFrameDelay + TimeDelta::Micros(5125));
+ rtp += kRtpTicksPerFrame;
+ EXPECT_THAT(inter_frame_delay.CalculateDelay(rtp, clock.CurrentTime()),
+ Optional(TimeDelta::Micros(5125)));
+
+ // Simulate faster frame RTP at the same clock delay. The frame arrives late,
+ // since the RTP timestamp is faster than the delay, and thus is positive.
+ clock.AdvanceTime(kFrameDelay);
+ rtp += kRtpTicksPerFrame / 2;
+ EXPECT_THAT(inter_frame_delay.CalculateDelay(rtp, clock.CurrentTime()),
+ Optional(kFrameDelay / 2.0));
+
+ // Simulate slower frame RTP at the same clock delay. The frame is early,
+ // since the RTP timestamp advanced more than the delay, and thus is negative.
+ clock.AdvanceTime(kFrameDelay);
+ rtp += 1.5 * kRtpTicksPerFrame;
+ EXPECT_THAT(inter_frame_delay.CalculateDelay(rtp, clock.CurrentTime()),
+ Optional(-kFrameDelay / 2.0));
+}
+
+TEST(InterFrameDelayTest, PositiveWrapAround) {
+ InterFrameDelay inter_frame_delay;
+ // Use a fake clock to simplify time keeping.
+ SimulatedClock clock(kStartTime);
+
+ // First frame is behind the max RTP by 1500.
+ uint32_t rtp = std::numeric_limits<uint32_t>::max() - 1500;
+ EXPECT_THAT(inter_frame_delay.CalculateDelay(rtp, clock.CurrentTime()),
+ Optional(TimeDelta::Zero()));
+
+ // Rtp wraps around, now 1499.
+ rtp += kRtpTicksPerFrame;
+
+ // Frame delay should be as normal, in this case simulated as 1ms late.
+ clock.AdvanceTime(kFrameDelay + TimeDelta::Millis(1));
+ EXPECT_THAT(inter_frame_delay.CalculateDelay(rtp, clock.CurrentTime()),
+ Optional(TimeDelta::Millis(1)));
+}
+
+TEST(InterFrameDelayTest, MultipleWrapArounds) {
+ // Simulate a long pauses which cause wrap arounds multiple times.
+ constexpr Frequency k90Khz = Frequency::KiloHertz(90);
+ constexpr uint32_t kHalfRtp = std::numeric_limits<uint32_t>::max() / 2;
+ constexpr TimeDelta kWrapAroundDelay = kHalfRtp / k90Khz;
+
+ InterFrameDelay inter_frame_delay;
+ // Use a fake clock to simplify time keeping.
+ SimulatedClock clock(kStartTime);
+ uint32_t rtp = 0;
+ EXPECT_THAT(inter_frame_delay.CalculateDelay(rtp, clock.CurrentTime()),
+ Optional(TimeDelta::Zero()));
+
+ rtp += kHalfRtp;
+ clock.AdvanceTime(kWrapAroundDelay);
+ EXPECT_THAT(inter_frame_delay.CalculateDelay(rtp, clock.CurrentTime()),
+ Optional(TimeDelta::Zero()));
+ // 1st wrap around.
+ rtp += kHalfRtp + 1;
+ clock.AdvanceTime(kWrapAroundDelay + TimeDelta::Millis(1));
+ EXPECT_THAT(inter_frame_delay.CalculateDelay(rtp, clock.CurrentTime()),
+ Optional(TimeDelta::Millis(1) - (1 / k90Khz)));
+
+ rtp += kHalfRtp;
+ clock.AdvanceTime(kWrapAroundDelay);
+ EXPECT_THAT(inter_frame_delay.CalculateDelay(rtp, clock.CurrentTime()),
+ Optional(TimeDelta::Zero()));
+ // 2nd wrap arounds.
+ rtp += kHalfRtp + 1;
+ clock.AdvanceTime(kWrapAroundDelay - TimeDelta::Millis(1));
+ EXPECT_THAT(inter_frame_delay.CalculateDelay(rtp, clock.CurrentTime()),
+ Optional(-TimeDelta::Millis(1) - (1 / k90Khz)));
+
+ // Ensure short delay (large RTP delay) between wrap-arounds has correct
+ // jitter.
+ rtp += kHalfRtp;
+ clock.AdvanceTime(TimeDelta::Millis(10));
+ EXPECT_THAT(inter_frame_delay.CalculateDelay(rtp, clock.CurrentTime()),
+ Optional(-(kWrapAroundDelay - TimeDelta::Millis(10))));
+ // 3nd wrap arounds, this time with large RTP delay.
+ rtp += kHalfRtp + 1;
+ clock.AdvanceTime(TimeDelta::Millis(10));
+ EXPECT_THAT(
+ inter_frame_delay.CalculateDelay(rtp, clock.CurrentTime()),
+ Optional(-(kWrapAroundDelay - TimeDelta::Millis(10) + (1 / k90Khz))));
+}
+
+TEST(InterFrameDelayTest, NegativeWrapAroundAfterPositiveWrapAround) {
+ InterFrameDelay inter_frame_delay;
+ // Use a fake clock to simplify time keeping.
+ SimulatedClock clock(kStartTime);
+ uint32_t rtp = std::numeric_limits<uint32_t>::max() - 1500;
+ EXPECT_THAT(inter_frame_delay.CalculateDelay(rtp, clock.CurrentTime()),
+ Optional(TimeDelta::Zero()));
+
+ // Rtp wraps around, now 1499.
+ rtp += kRtpTicksPerFrame;
+ // Frame delay should be as normal, in this case simulated as 1ms late.
+ clock.AdvanceTime(kFrameDelay);
+ EXPECT_THAT(inter_frame_delay.CalculateDelay(rtp, clock.CurrentTime()),
+ Optional(TimeDelta::Zero()));
+
+ // Wrap back.
+ rtp -= kRtpTicksPerFrame;
+ // Frame delay should be as normal, in this case simulated as 1ms late.
+ clock.AdvanceTime(kFrameDelay);
+ EXPECT_THAT(inter_frame_delay.CalculateDelay(rtp, clock.CurrentTime()),
+ Eq(absl::nullopt));
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/timing/jitter_estimator.cc b/third_party/libwebrtc/modules/video_coding/timing/jitter_estimator.cc
new file mode 100644
index 0000000000..62757787a1
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/timing/jitter_estimator.cc
@@ -0,0 +1,476 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/timing/jitter_estimator.h"
+
+#include <math.h>
+#include <string.h>
+
+#include <algorithm>
+#include <cstdint>
+
+#include "absl/types/optional.h"
+#include "api/field_trials_view.h"
+#include "api/units/data_size.h"
+#include "api/units/frequency.h"
+#include "api/units/time_delta.h"
+#include "api/units/timestamp.h"
+#include "modules/video_coding/timing/rtt_filter.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/numerics/safe_conversions.h"
+#include "system_wrappers/include/clock.h"
+
+namespace webrtc {
+namespace {
+
+// Number of frames to wait for before post processing estimate. Also used in
+// the frame rate estimator ramp-up.
+constexpr size_t kFrameProcessingStartupCount = 30;
+
+// Number of frames to wait for before enabling the frame size filters.
+constexpr size_t kFramesUntilSizeFiltering = 5;
+
+// Initial value for frame size filters.
+constexpr double kInitialAvgAndMaxFrameSizeBytes = 500.0;
+
+// Time constant for average frame size filter.
+constexpr double kPhi = 0.97;
+// Time constant for max frame size filter.
+constexpr double kPsi = 0.9999;
+// Default constants for percentile frame size filter.
+constexpr double kDefaultMaxFrameSizePercentile = 0.95;
+constexpr int kDefaultFrameSizeWindow = 30 * 10;
+
+// Outlier rejection constants.
+constexpr double kNumStdDevDelayClamp = 3.5;
+constexpr double kNumStdDevDelayOutlier = 15.0;
+constexpr double kNumStdDevSizeOutlier = 3.0;
+constexpr double kCongestionRejectionFactor = -0.25;
+
+// Rampup constant for deviation noise filters.
+constexpr size_t kAlphaCountMax = 400;
+
+// Noise threshold constants.
+// ~Less than 1% chance (look up in normal distribution table)...
+constexpr double kNoiseStdDevs = 2.33;
+// ...of getting 30 ms freezes
+constexpr double kNoiseStdDevOffset = 30.0;
+
+// Jitter estimate clamping limits.
+constexpr TimeDelta kMinJitterEstimate = TimeDelta::Millis(1);
+constexpr TimeDelta kMaxJitterEstimate = TimeDelta::Seconds(10);
+
+// A constant describing the delay from the jitter buffer to the delay on the
+// receiving side which is not accounted for by the jitter buffer nor the
+// decoding delay estimate.
+constexpr TimeDelta OPERATING_SYSTEM_JITTER = TimeDelta::Millis(10);
+
+// Time constant for reseting the NACK count.
+constexpr TimeDelta kNackCountTimeout = TimeDelta::Seconds(60);
+
+// RTT mult activation.
+constexpr size_t kNackLimit = 3;
+
+// Frame rate estimate clamping limit.
+constexpr Frequency kMaxFramerateEstimate = Frequency::Hertz(200);
+
+} // namespace
+
+constexpr char JitterEstimator::Config::kFieldTrialsKey[];
+
+JitterEstimator::Config JitterEstimator::Config::ParseAndValidate(
+ absl::string_view field_trial) {
+ Config config;
+ config.Parser()->Parse(field_trial);
+
+ // The `MovingPercentileFilter` RTC_CHECKs on the validity of the
+ // percentile and window length, so we'd better validate the field trial
+ // provided values here.
+ if (config.max_frame_size_percentile) {
+ double original = *config.max_frame_size_percentile;
+ config.max_frame_size_percentile = std::min(std::max(0.0, original), 1.0);
+ if (config.max_frame_size_percentile != original) {
+ RTC_LOG(LS_ERROR) << "Skipping invalid max_frame_size_percentile="
+ << original;
+ }
+ }
+ if (config.frame_size_window && config.frame_size_window < 1) {
+ RTC_LOG(LS_ERROR) << "Skipping invalid frame_size_window="
+ << *config.frame_size_window;
+ config.frame_size_window = 1;
+ }
+
+ // General sanity checks.
+ if (config.num_stddev_delay_clamp && config.num_stddev_delay_clamp < 0.0) {
+ RTC_LOG(LS_ERROR) << "Skipping invalid num_stddev_delay_clamp="
+ << *config.num_stddev_delay_clamp;
+ config.num_stddev_delay_clamp = 0.0;
+ }
+ if (config.num_stddev_delay_outlier &&
+ config.num_stddev_delay_outlier < 0.0) {
+ RTC_LOG(LS_ERROR) << "Skipping invalid num_stddev_delay_outlier="
+ << *config.num_stddev_delay_outlier;
+ config.num_stddev_delay_outlier = 0.0;
+ }
+ if (config.num_stddev_size_outlier && config.num_stddev_size_outlier < 0.0) {
+ RTC_LOG(LS_ERROR) << "Skipping invalid num_stddev_size_outlier="
+ << *config.num_stddev_size_outlier;
+ config.num_stddev_size_outlier = 0.0;
+ }
+
+ return config;
+}
+
+JitterEstimator::JitterEstimator(Clock* clock,
+ const FieldTrialsView& field_trials)
+ : config_(Config::ParseAndValidate(
+ field_trials.Lookup(Config::kFieldTrialsKey))),
+ avg_frame_size_median_bytes_(static_cast<size_t>(
+ config_.frame_size_window.value_or(kDefaultFrameSizeWindow))),
+ max_frame_size_bytes_percentile_(
+ config_.max_frame_size_percentile.value_or(
+ kDefaultMaxFrameSizePercentile),
+ static_cast<size_t>(
+ config_.frame_size_window.value_or(kDefaultFrameSizeWindow))),
+ fps_counter_(30), // TODO(sprang): Use an estimator with limit based
+ // on time, rather than number of samples.
+ clock_(clock) {
+ Reset();
+}
+
+JitterEstimator::~JitterEstimator() = default;
+
+// Resets the JitterEstimate.
+void JitterEstimator::Reset() {
+ avg_frame_size_bytes_ = kInitialAvgAndMaxFrameSizeBytes;
+ max_frame_size_bytes_ = kInitialAvgAndMaxFrameSizeBytes;
+ var_frame_size_bytes2_ = 100;
+ avg_frame_size_median_bytes_.Reset();
+ max_frame_size_bytes_percentile_.Reset();
+ last_update_time_ = absl::nullopt;
+ prev_estimate_ = absl::nullopt;
+ prev_frame_size_ = absl::nullopt;
+ avg_noise_ms_ = 0.0;
+ var_noise_ms2_ = 4.0;
+ alpha_count_ = 1;
+ filter_jitter_estimate_ = TimeDelta::Zero();
+ latest_nack_ = Timestamp::Zero();
+ nack_count_ = 0;
+ startup_frame_size_sum_bytes_ = 0;
+ startup_frame_size_count_ = 0;
+ startup_count_ = 0;
+ rtt_filter_.Reset();
+ fps_counter_.Reset();
+
+ kalman_filter_ = FrameDelayVariationKalmanFilter();
+}
+
+// Updates the estimates with the new measurements.
+void JitterEstimator::UpdateEstimate(TimeDelta frame_delay,
+ DataSize frame_size) {
+ if (frame_size.IsZero()) {
+ return;
+ }
+ // Can't use DataSize since this can be negative.
+ double delta_frame_bytes =
+ frame_size.bytes() - prev_frame_size_.value_or(DataSize::Zero()).bytes();
+ if (startup_frame_size_count_ < kFramesUntilSizeFiltering) {
+ startup_frame_size_sum_bytes_ += frame_size.bytes();
+ startup_frame_size_count_++;
+ } else if (startup_frame_size_count_ == kFramesUntilSizeFiltering) {
+ // Give the frame size filter.
+ avg_frame_size_bytes_ = startup_frame_size_sum_bytes_ /
+ static_cast<double>(startup_frame_size_count_);
+ startup_frame_size_count_++;
+ }
+
+ double avg_frame_size_bytes =
+ kPhi * avg_frame_size_bytes_ + (1 - kPhi) * frame_size.bytes();
+ double deviation_size_bytes = 2 * sqrt(var_frame_size_bytes2_);
+ if (frame_size.bytes() < avg_frame_size_bytes_ + deviation_size_bytes) {
+ // Only update the average frame size if this sample wasn't a key frame.
+ avg_frame_size_bytes_ = avg_frame_size_bytes;
+ }
+
+ double delta_bytes = frame_size.bytes() - avg_frame_size_bytes;
+ var_frame_size_bytes2_ = std::max(
+ kPhi * var_frame_size_bytes2_ + (1 - kPhi) * (delta_bytes * delta_bytes),
+ 1.0);
+
+ // Update non-linear IIR estimate of max frame size.
+ max_frame_size_bytes_ =
+ std::max<double>(kPsi * max_frame_size_bytes_, frame_size.bytes());
+
+ // Maybe update percentile estimates of frame sizes.
+ if (config_.avg_frame_size_median) {
+ avg_frame_size_median_bytes_.Insert(frame_size.bytes());
+ }
+ if (config_.MaxFrameSizePercentileEnabled()) {
+ max_frame_size_bytes_percentile_.Insert(frame_size.bytes());
+ }
+
+ if (!prev_frame_size_) {
+ prev_frame_size_ = frame_size;
+ return;
+ }
+ prev_frame_size_ = frame_size;
+
+ // Cap frame_delay based on the current time deviation noise.
+ double num_stddev_delay_clamp =
+ config_.num_stddev_delay_clamp.value_or(kNumStdDevDelayClamp);
+ TimeDelta max_time_deviation =
+ TimeDelta::Millis(num_stddev_delay_clamp * sqrt(var_noise_ms2_) + 0.5);
+ frame_delay.Clamp(-max_time_deviation, max_time_deviation);
+
+ double delay_deviation_ms =
+ frame_delay.ms() -
+ kalman_filter_.GetFrameDelayVariationEstimateTotal(delta_frame_bytes);
+
+ // Outlier rejection: these conditions depend on filtered versions of the
+ // delay and frame size _means_, respectively, together with a configurable
+ // number of standard deviations. If a sample is large with respect to the
+ // corresponding mean and dispersion (defined by the number of
+ // standard deviations and the sample standard deviation), it is deemed an
+ // outlier. This "empirical rule" is further described in
+ // https://en.wikipedia.org/wiki/68-95-99.7_rule. Note that neither of the
+ // estimated means are true sample means, which implies that they are possibly
+ // not normally distributed. Hence, this rejection method is just a heuristic.
+ double num_stddev_delay_outlier =
+ config_.num_stddev_delay_outlier.value_or(kNumStdDevDelayOutlier);
+ // Delay outlier rejection is two-sided.
+ bool abs_delay_is_not_outlier =
+ fabs(delay_deviation_ms) <
+ num_stddev_delay_outlier * sqrt(var_noise_ms2_);
+ // The reasoning above means, in particular, that we should use the sample
+ // mean-style `avg_frame_size_bytes_` estimate, as opposed to the
+ // median-filtered version, even if configured to use latter for the
+ // calculation in `CalculateEstimate()`.
+ // Size outlier rejection is one-sided.
+ double num_stddev_size_outlier =
+ config_.num_stddev_size_outlier.value_or(kNumStdDevSizeOutlier);
+ bool size_is_positive_outlier =
+ frame_size.bytes() >
+ avg_frame_size_bytes_ +
+ num_stddev_size_outlier * sqrt(var_frame_size_bytes2_);
+
+ // Only update the Kalman filter if the sample is not considered an extreme
+ // outlier. Even if it is an extreme outlier from a delay point of view, if
+ // the frame size also is large the deviation is probably due to an incorrect
+ // line slope.
+ if (abs_delay_is_not_outlier || size_is_positive_outlier) {
+ // Prevent updating with frames which have been congested by a large frame,
+ // and therefore arrives almost at the same time as that frame.
+ // This can occur when we receive a large frame (key frame) which has been
+ // delayed. The next frame is of normal size (delta frame), and thus deltaFS
+ // will be << 0. This removes all frame samples which arrives after a key
+ // frame.
+ double congestion_rejection_factor =
+ config_.congestion_rejection_factor.value_or(
+ kCongestionRejectionFactor);
+ double filtered_max_frame_size_bytes =
+ config_.MaxFrameSizePercentileEnabled()
+ ? max_frame_size_bytes_percentile_.GetFilteredValue()
+ : max_frame_size_bytes_;
+ bool is_not_congested =
+ delta_frame_bytes >
+ congestion_rejection_factor * filtered_max_frame_size_bytes;
+
+ if (is_not_congested || config_.estimate_noise_when_congested) {
+ // Update the variance of the deviation from the line given by the Kalman
+ // filter.
+ EstimateRandomJitter(delay_deviation_ms);
+ }
+ if (is_not_congested) {
+ // Neither a delay outlier nor a congested frame, so we can safely update
+ // the Kalman filter with the sample.
+ kalman_filter_.PredictAndUpdate(frame_delay.ms(), delta_frame_bytes,
+ filtered_max_frame_size_bytes,
+ var_noise_ms2_);
+ }
+ } else {
+ // Delay outliers affect the noise estimate through a value equal to the
+ // outlier rejection threshold.
+ double num_stddev = (delay_deviation_ms >= 0) ? num_stddev_delay_outlier
+ : -num_stddev_delay_outlier;
+ EstimateRandomJitter(num_stddev * sqrt(var_noise_ms2_));
+ }
+ // Post process the total estimated jitter
+ if (startup_count_ >= kFrameProcessingStartupCount) {
+ PostProcessEstimate();
+ } else {
+ startup_count_++;
+ }
+}
+
+// Updates the nack/packet ratio.
+void JitterEstimator::FrameNacked() {
+ if (nack_count_ < kNackLimit) {
+ nack_count_++;
+ }
+ latest_nack_ = clock_->CurrentTime();
+}
+
+void JitterEstimator::UpdateRtt(TimeDelta rtt) {
+ rtt_filter_.Update(rtt);
+}
+
+JitterEstimator::Config JitterEstimator::GetConfigForTest() const {
+ return config_;
+}
+
+// Estimates the random jitter by calculating the variance of the sample
+// distance from the line given by the Kalman filter.
+void JitterEstimator::EstimateRandomJitter(double d_dT) {
+ Timestamp now = clock_->CurrentTime();
+ if (last_update_time_.has_value()) {
+ fps_counter_.AddSample((now - *last_update_time_).us());
+ }
+ last_update_time_ = now;
+
+ if (alpha_count_ == 0) {
+ RTC_DCHECK_NOTREACHED();
+ return;
+ }
+ double alpha =
+ static_cast<double>(alpha_count_ - 1) / static_cast<double>(alpha_count_);
+ alpha_count_++;
+ if (alpha_count_ > kAlphaCountMax)
+ alpha_count_ = kAlphaCountMax;
+
+ // In order to avoid a low frame rate stream to react slower to changes,
+ // scale the alpha weight relative a 30 fps stream.
+ Frequency fps = GetFrameRate();
+ if (fps > Frequency::Zero()) {
+ constexpr Frequency k30Fps = Frequency::Hertz(30);
+ double rate_scale = k30Fps / fps;
+ // At startup, there can be a lot of noise in the fps estimate.
+ // Interpolate rate_scale linearly, from 1.0 at sample #1, to 30.0 / fps
+ // at sample #kFrameProcessingStartupCount.
+ if (alpha_count_ < kFrameProcessingStartupCount) {
+ rate_scale = (alpha_count_ * rate_scale +
+ (kFrameProcessingStartupCount - alpha_count_)) /
+ kFrameProcessingStartupCount;
+ }
+ alpha = pow(alpha, rate_scale);
+ }
+
+ double avg_noise_ms = alpha * avg_noise_ms_ + (1 - alpha) * d_dT;
+ double var_noise_ms2 = alpha * var_noise_ms2_ + (1 - alpha) *
+ (d_dT - avg_noise_ms_) *
+ (d_dT - avg_noise_ms_);
+ avg_noise_ms_ = avg_noise_ms;
+ var_noise_ms2_ = var_noise_ms2;
+ if (var_noise_ms2_ < 1.0) {
+ // The variance should never be zero, since we might get stuck and consider
+ // all samples as outliers.
+ var_noise_ms2_ = 1.0;
+ }
+}
+
+double JitterEstimator::NoiseThreshold() const {
+ double noise_threshold_ms =
+ kNoiseStdDevs * sqrt(var_noise_ms2_) - kNoiseStdDevOffset;
+ if (noise_threshold_ms < 1.0) {
+ noise_threshold_ms = 1.0;
+ }
+ return noise_threshold_ms;
+}
+
+// Calculates the current jitter estimate from the filtered estimates.
+TimeDelta JitterEstimator::CalculateEstimate() {
+ // Using median- and percentile-filtered versions of the frame sizes may be
+ // more robust than using sample mean-style estimates.
+ double filtered_avg_frame_size_bytes =
+ config_.avg_frame_size_median
+ ? avg_frame_size_median_bytes_.GetFilteredValue()
+ : avg_frame_size_bytes_;
+ double filtered_max_frame_size_bytes =
+ config_.MaxFrameSizePercentileEnabled()
+ ? max_frame_size_bytes_percentile_.GetFilteredValue()
+ : max_frame_size_bytes_;
+ double worst_case_frame_size_deviation_bytes =
+ filtered_max_frame_size_bytes - filtered_avg_frame_size_bytes;
+ double ret_ms = kalman_filter_.GetFrameDelayVariationEstimateSizeBased(
+ worst_case_frame_size_deviation_bytes) +
+ NoiseThreshold();
+ TimeDelta ret = TimeDelta::Millis(ret_ms);
+
+ // A very low estimate (or negative) is neglected.
+ if (ret < kMinJitterEstimate) {
+ ret = prev_estimate_.value_or(kMinJitterEstimate);
+ // Sanity check to make sure that no other method has set `prev_estimate_`
+ // to a value lower than `kMinJitterEstimate`.
+ RTC_DCHECK_GE(ret, kMinJitterEstimate);
+ } else if (ret > kMaxJitterEstimate) { // Sanity
+ ret = kMaxJitterEstimate;
+ }
+ prev_estimate_ = ret;
+ return ret;
+}
+
+void JitterEstimator::PostProcessEstimate() {
+ filter_jitter_estimate_ = CalculateEstimate();
+}
+
+// Returns the current filtered estimate if available,
+// otherwise tries to calculate an estimate.
+TimeDelta JitterEstimator::GetJitterEstimate(
+ double rtt_multiplier,
+ absl::optional<TimeDelta> rtt_mult_add_cap) {
+ TimeDelta jitter = CalculateEstimate() + OPERATING_SYSTEM_JITTER;
+ Timestamp now = clock_->CurrentTime();
+
+ if (now - latest_nack_ > kNackCountTimeout)
+ nack_count_ = 0;
+
+ if (filter_jitter_estimate_ > jitter)
+ jitter = filter_jitter_estimate_;
+ if (nack_count_ >= kNackLimit) {
+ if (rtt_mult_add_cap.has_value()) {
+ jitter += std::min(rtt_filter_.Rtt() * rtt_multiplier,
+ rtt_mult_add_cap.value());
+ } else {
+ jitter += rtt_filter_.Rtt() * rtt_multiplier;
+ }
+ }
+
+ static const Frequency kJitterScaleLowThreshold = Frequency::Hertz(5);
+ static const Frequency kJitterScaleHighThreshold = Frequency::Hertz(10);
+ Frequency fps = GetFrameRate();
+ // Ignore jitter for very low fps streams.
+ if (fps < kJitterScaleLowThreshold) {
+ if (fps.IsZero()) {
+ return std::max(TimeDelta::Zero(), jitter);
+ }
+ return TimeDelta::Zero();
+ }
+
+ // Semi-low frame rate; scale by factor linearly interpolated from 0.0 at
+ // kJitterScaleLowThreshold to 1.0 at kJitterScaleHighThreshold.
+ if (fps < kJitterScaleHighThreshold) {
+ jitter = (1.0 / (kJitterScaleHighThreshold - kJitterScaleLowThreshold)) *
+ (fps - kJitterScaleLowThreshold) * jitter;
+ }
+
+ return std::max(TimeDelta::Zero(), jitter);
+}
+
+Frequency JitterEstimator::GetFrameRate() const {
+ TimeDelta mean_frame_period = TimeDelta::Micros(fps_counter_.ComputeMean());
+ if (mean_frame_period <= TimeDelta::Zero())
+ return Frequency::Zero();
+
+ Frequency fps = 1 / mean_frame_period;
+ // Sanity check.
+ RTC_DCHECK_GE(fps, Frequency::Zero());
+ return std::min(fps, kMaxFramerateEstimate);
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/timing/jitter_estimator.h b/third_party/libwebrtc/modules/video_coding/timing/jitter_estimator.h
new file mode 100644
index 0000000000..a89a4bf1fd
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/timing/jitter_estimator.h
@@ -0,0 +1,218 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_TIMING_JITTER_ESTIMATOR_H_
+#define MODULES_VIDEO_CODING_TIMING_JITTER_ESTIMATOR_H_
+
+#include <algorithm>
+#include <memory>
+#include <queue>
+
+#include "absl/strings/string_view.h"
+#include "absl/types/optional.h"
+#include "api/field_trials_view.h"
+#include "api/units/data_size.h"
+#include "api/units/frequency.h"
+#include "api/units/time_delta.h"
+#include "api/units/timestamp.h"
+#include "modules/video_coding/timing/frame_delay_variation_kalman_filter.h"
+#include "modules/video_coding/timing/rtt_filter.h"
+#include "rtc_base/experiments/struct_parameters_parser.h"
+#include "rtc_base/numerics/moving_percentile_filter.h"
+#include "rtc_base/rolling_accumulator.h"
+
+namespace webrtc {
+
+class Clock;
+
+class JitterEstimator {
+ public:
+ // Configuration struct for statically overriding some constants and
+ // behaviour, configurable through field trials.
+ struct Config {
+ static constexpr char kFieldTrialsKey[] = "WebRTC-JitterEstimatorConfig";
+
+ // Parses a field trial string and validates the values.
+ static Config ParseAndValidate(absl::string_view field_trial);
+
+ std::unique_ptr<StructParametersParser> Parser() {
+ // clang-format off
+ return StructParametersParser::Create(
+ "avg_frame_size_median", &avg_frame_size_median,
+ "max_frame_size_percentile", &max_frame_size_percentile,
+ "frame_size_window", &frame_size_window,
+ "num_stddev_delay_clamp", &num_stddev_delay_clamp,
+ "num_stddev_delay_outlier", &num_stddev_delay_outlier,
+ "num_stddev_size_outlier", &num_stddev_size_outlier,
+ "congestion_rejection_factor", &congestion_rejection_factor,
+ "estimate_noise_when_congested", &estimate_noise_when_congested);
+ // clang-format on
+ }
+
+ bool MaxFrameSizePercentileEnabled() const {
+ return max_frame_size_percentile.has_value();
+ }
+
+ // If true, the "avg" frame size is calculated as the median over a window
+ // of recent frame sizes.
+ bool avg_frame_size_median = false;
+
+ // If set, the "max" frame size is calculated as this percentile over a
+ // window of recent frame sizes.
+ absl::optional<double> max_frame_size_percentile = absl::nullopt;
+
+ // The length of the percentile filters' window, in number of frames.
+ absl::optional<int> frame_size_window = absl::nullopt;
+
+ // The incoming frame delay variation samples are clamped to be at most
+ // this number of standard deviations away from zero.
+ //
+ // Increasing this value clamps fewer samples.
+ absl::optional<double> num_stddev_delay_clamp = absl::nullopt;
+
+ // A (relative) frame delay variation sample is an outlier if its absolute
+ // deviation from the Kalman filter model falls outside this number of
+ // sample standard deviations.
+ //
+ // Increasing this value rejects fewer samples.
+ absl::optional<double> num_stddev_delay_outlier = absl::nullopt;
+
+ // An (absolute) frame size sample is an outlier if its positive deviation
+ // from the estimated average frame size falls outside this number of sample
+ // standard deviations.
+ //
+ // Increasing this value rejects fewer samples.
+ absl::optional<double> num_stddev_size_outlier = absl::nullopt;
+
+ // A (relative) frame size variation sample is deemed "congested", and is
+ // thus rejected, if its value is less than this factor times the estimated
+ // max frame size.
+ //
+ // Decreasing this value rejects fewer samples.
+ absl::optional<double> congestion_rejection_factor = absl::nullopt;
+
+ // If true, the noise estimate will be updated for congestion rejected
+ // frames. This is currently enabled by default, but that may not be optimal
+ // since congested frames typically are not spread around the line with
+ // Gaussian noise. (This is the whole reason for the congestion rejection!)
+ bool estimate_noise_when_congested = true;
+ };
+
+ JitterEstimator(Clock* clock, const FieldTrialsView& field_trials);
+ JitterEstimator(const JitterEstimator&) = delete;
+ JitterEstimator& operator=(const JitterEstimator&) = delete;
+ ~JitterEstimator();
+
+ // Resets the estimate to the initial state.
+ void Reset();
+
+ // Updates the jitter estimate with the new data.
+ //
+ // Input:
+ // - frame_delay : Delay-delta calculated by UTILDelayEstimate.
+ // - frame_size : Frame size of the current frame.
+ void UpdateEstimate(TimeDelta frame_delay, DataSize frame_size);
+
+ // Returns the current jitter estimate and adds an RTT dependent term in cases
+ // of retransmission.
+ // Input:
+ // - rtt_multiplier : RTT param multiplier (when applicable).
+ // - rtt_mult_add_cap : Multiplier cap from the RTTMultExperiment.
+ //
+ // Return value : Jitter estimate.
+ TimeDelta GetJitterEstimate(double rtt_multiplier,
+ absl::optional<TimeDelta> rtt_mult_add_cap);
+
+ // Updates the nack counter.
+ void FrameNacked();
+
+ // Updates the RTT filter.
+ //
+ // Input:
+ // - rtt : Round trip time.
+ void UpdateRtt(TimeDelta rtt);
+
+ // Returns the configuration. Only to be used by unit tests.
+ Config GetConfigForTest() const;
+
+ private:
+ // Updates the random jitter estimate, i.e. the variance of the time
+ // deviations from the line given by the Kalman filter.
+ //
+ // Input:
+ // - d_dT : The deviation from the kalman estimate.
+ void EstimateRandomJitter(double d_dT);
+
+ double NoiseThreshold() const;
+
+ // Calculates the current jitter estimate.
+ //
+ // Return value : The current jitter estimate.
+ TimeDelta CalculateEstimate();
+
+ // Post process the calculated estimate.
+ void PostProcessEstimate();
+
+ // Returns the estimated incoming frame rate.
+ Frequency GetFrameRate() const;
+
+ // Configuration that may override some internals.
+ const Config config_;
+
+ // Filters the {frame_delay_delta, frame_size_delta} measurements through
+ // a linear Kalman filter.
+ FrameDelayVariationKalmanFilter kalman_filter_;
+
+ // TODO(bugs.webrtc.org/14381): Update `avg_frame_size_bytes_` to DataSize
+ // when api/units have sufficient precision.
+ double avg_frame_size_bytes_; // Average frame size
+ double var_frame_size_bytes2_; // Frame size variance. Unit is bytes^2.
+ // Largest frame size received (descending with a factor kPsi).
+ // Used by default.
+ // TODO(bugs.webrtc.org/14381): Update `max_frame_size_bytes_` to DataSize
+ // when api/units have sufficient precision.
+ double max_frame_size_bytes_;
+ // Percentile frame sized received (over a window). Only used if configured.
+ MovingMedianFilter<int64_t> avg_frame_size_median_bytes_;
+ MovingPercentileFilter<int64_t> max_frame_size_bytes_percentile_;
+ // TODO(bugs.webrtc.org/14381): Update `startup_frame_size_sum_bytes_` to
+ // DataSize when api/units have sufficient precision.
+ double startup_frame_size_sum_bytes_;
+ size_t startup_frame_size_count_;
+
+ absl::optional<Timestamp> last_update_time_;
+ // The previously returned jitter estimate
+ absl::optional<TimeDelta> prev_estimate_;
+ // Frame size of the previous frame
+ absl::optional<DataSize> prev_frame_size_;
+ // Average of the random jitter. Unit is milliseconds.
+ double avg_noise_ms_;
+ // Variance of the time-deviation from the line. Unit is milliseconds^2.
+ double var_noise_ms2_;
+ size_t alpha_count_;
+ // The filtered sum of jitter estimates
+ TimeDelta filter_jitter_estimate_ = TimeDelta::Zero();
+
+ size_t startup_count_;
+ // Time when the latest nack was seen
+ Timestamp latest_nack_ = Timestamp::Zero();
+ // Keeps track of the number of nacks received, but never goes above
+ // kNackLimit.
+ size_t nack_count_;
+ RttFilter rtt_filter_;
+
+ // Tracks frame rates in microseconds.
+ rtc::RollingAccumulator<uint64_t> fps_counter_;
+ Clock* clock_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_TIMING_JITTER_ESTIMATOR_H_
diff --git a/third_party/libwebrtc/modules/video_coding/timing/jitter_estimator_gn/moz.build b/third_party/libwebrtc/modules/video_coding/timing/jitter_estimator_gn/moz.build
new file mode 100644
index 0000000000..a0d6154d78
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/timing/jitter_estimator_gn/moz.build
@@ -0,0 +1,232 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+DEFINES["WEBRTC_STRICT_FIELD_TRIALS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "!/third_party/libwebrtc/gen",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/video_coding/timing/jitter_estimator.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "crypt32",
+ "iphlpapi",
+ "secur32",
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "mips32":
+
+ DEFINES["MIPS32_LE"] = True
+ DEFINES["MIPS_FPU_LE"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "mips64":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("jitter_estimator_gn")
diff --git a/third_party/libwebrtc/modules/video_coding/timing/jitter_estimator_unittest.cc b/third_party/libwebrtc/modules/video_coding/timing/jitter_estimator_unittest.cc
new file mode 100644
index 0000000000..8e0c01587f
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/timing/jitter_estimator_unittest.cc
@@ -0,0 +1,305 @@
+/* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/timing/jitter_estimator.h"
+
+#include <stdint.h>
+
+#include <memory>
+#include <string>
+#include <utility>
+#include <vector>
+
+#include "absl/types/optional.h"
+#include "api/array_view.h"
+#include "api/field_trials.h"
+#include "api/units/data_size.h"
+#include "api/units/frequency.h"
+#include "api/units/time_delta.h"
+#include "rtc_base/numerics/histogram_percentile_counter.h"
+#include "rtc_base/time_utils.h"
+#include "system_wrappers/include/clock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+
+// Generates some simple test data in the form of a sawtooth wave.
+class ValueGenerator {
+ public:
+ explicit ValueGenerator(int32_t amplitude)
+ : amplitude_(amplitude), counter_(0) {}
+
+ virtual ~ValueGenerator() = default;
+
+ TimeDelta Delay() const {
+ return TimeDelta::Millis((counter_ % 11) - 5) * amplitude_;
+ }
+
+ DataSize FrameSize() const {
+ return DataSize::Bytes(1000 + Delay().ms() / 5);
+ }
+
+ void Advance() { ++counter_; }
+
+ private:
+ const int32_t amplitude_;
+ int64_t counter_;
+};
+
+class JitterEstimatorTest : public ::testing::Test {
+ protected:
+ explicit JitterEstimatorTest(const std::string& field_trials)
+ : fake_clock_(0),
+ field_trials_(FieldTrials::CreateNoGlobal(field_trials)),
+ estimator_(&fake_clock_, *field_trials_) {}
+ JitterEstimatorTest() : JitterEstimatorTest("") {}
+ virtual ~JitterEstimatorTest() {}
+
+ void Run(int duration_s, int framerate_fps, ValueGenerator& gen) {
+ TimeDelta tick = 1 / Frequency::Hertz(framerate_fps);
+ for (int i = 0; i < duration_s * framerate_fps; ++i) {
+ estimator_.UpdateEstimate(gen.Delay(), gen.FrameSize());
+ fake_clock_.AdvanceTime(tick);
+ gen.Advance();
+ }
+ }
+
+ SimulatedClock fake_clock_;
+ std::unique_ptr<FieldTrials> field_trials_;
+ JitterEstimator estimator_;
+};
+
+TEST_F(JitterEstimatorTest, SteadyStateConvergence) {
+ ValueGenerator gen(10);
+ Run(/*duration_s=*/60, /*framerate_fps=*/30, gen);
+ EXPECT_EQ(estimator_.GetJitterEstimate(0, absl::nullopt).ms(), 54);
+}
+
+TEST_F(JitterEstimatorTest,
+ SizeOutlierIsNotRejectedAndIncreasesJitterEstimate) {
+ ValueGenerator gen(10);
+
+ // Steady state.
+ Run(/*duration_s=*/60, /*framerate_fps=*/30, gen);
+ TimeDelta steady_state_jitter =
+ estimator_.GetJitterEstimate(0, absl::nullopt);
+
+ // A single outlier frame size...
+ estimator_.UpdateEstimate(gen.Delay(), 10 * gen.FrameSize());
+ TimeDelta outlier_jitter = estimator_.GetJitterEstimate(0, absl::nullopt);
+
+ // ...changes the estimate.
+ EXPECT_GT(outlier_jitter.ms(), 1.25 * steady_state_jitter.ms());
+}
+
+TEST_F(JitterEstimatorTest, LowFramerateDisablesJitterEstimator) {
+ ValueGenerator gen(10);
+ // At 5 fps, we disable jitter delay altogether.
+ TimeDelta time_delta = 1 / Frequency::Hertz(5);
+ for (int i = 0; i < 60; ++i) {
+ estimator_.UpdateEstimate(gen.Delay(), gen.FrameSize());
+ fake_clock_.AdvanceTime(time_delta);
+ if (i > 2)
+ EXPECT_EQ(estimator_.GetJitterEstimate(0, absl::nullopt),
+ TimeDelta::Zero());
+ gen.Advance();
+ }
+}
+
+TEST_F(JitterEstimatorTest, RttMultAddCap) {
+ std::vector<std::pair<TimeDelta, rtc::HistogramPercentileCounter>>
+ jitter_by_rtt_mult_cap;
+ jitter_by_rtt_mult_cap.emplace_back(
+ /*rtt_mult_add_cap=*/TimeDelta::Millis(10), /*long_tail_boundary=*/1000);
+ jitter_by_rtt_mult_cap.emplace_back(
+ /*rtt_mult_add_cap=*/TimeDelta::Millis(200), /*long_tail_boundary=*/1000);
+
+ for (auto& [rtt_mult_add_cap, jitter] : jitter_by_rtt_mult_cap) {
+ estimator_.Reset();
+
+ ValueGenerator gen(50);
+ TimeDelta time_delta = 1 / Frequency::Hertz(30);
+ constexpr TimeDelta kRtt = TimeDelta::Millis(250);
+ for (int i = 0; i < 100; ++i) {
+ estimator_.UpdateEstimate(gen.Delay(), gen.FrameSize());
+ fake_clock_.AdvanceTime(time_delta);
+ estimator_.FrameNacked();
+ estimator_.UpdateRtt(kRtt);
+ jitter.Add(
+ estimator_.GetJitterEstimate(/*rtt_mult=*/1.0, rtt_mult_add_cap)
+ .ms());
+ gen.Advance();
+ }
+ }
+
+ // 200ms cap should result in at least 25% higher max compared to 10ms.
+ EXPECT_GT(*jitter_by_rtt_mult_cap[1].second.GetPercentile(1.0),
+ *jitter_by_rtt_mult_cap[0].second.GetPercentile(1.0) * 1.25);
+}
+
+// By default, the `JitterEstimator` is not robust against single large frames.
+TEST_F(JitterEstimatorTest, Single2xFrameSizeImpactsJitterEstimate) {
+ ValueGenerator gen(10);
+
+ // Steady state.
+ Run(/*duration_s=*/60, /*framerate_fps=*/30, gen);
+ TimeDelta steady_state_jitter =
+ estimator_.GetJitterEstimate(0, absl::nullopt);
+
+ // A single outlier frame size...
+ estimator_.UpdateEstimate(gen.Delay(), 2 * gen.FrameSize());
+ TimeDelta outlier_jitter = estimator_.GetJitterEstimate(0, absl::nullopt);
+
+ // ...impacts the estimate.
+ EXPECT_GT(outlier_jitter.ms(), steady_state_jitter.ms());
+}
+
+// Under the default config, congested frames are used when calculating the
+// noise variance, meaning that they will impact the final jitter estimate.
+TEST_F(JitterEstimatorTest, CongestedFrameImpactsJitterEstimate) {
+ ValueGenerator gen(10);
+
+ // Steady state.
+ Run(/*duration_s=*/10, /*framerate_fps=*/30, gen);
+ TimeDelta steady_state_jitter =
+ estimator_.GetJitterEstimate(0, absl::nullopt);
+
+ // Congested frame...
+ estimator_.UpdateEstimate(-10 * gen.Delay(), 0.1 * gen.FrameSize());
+ TimeDelta outlier_jitter = estimator_.GetJitterEstimate(0, absl::nullopt);
+
+ // ...impacts the estimate.
+ EXPECT_GT(outlier_jitter.ms(), steady_state_jitter.ms());
+}
+
+TEST_F(JitterEstimatorTest, EmptyFieldTrialsParsesToUnsetConfig) {
+ JitterEstimator::Config config = estimator_.GetConfigForTest();
+ EXPECT_FALSE(config.avg_frame_size_median);
+ EXPECT_FALSE(config.max_frame_size_percentile.has_value());
+ EXPECT_FALSE(config.frame_size_window.has_value());
+ EXPECT_FALSE(config.num_stddev_delay_clamp.has_value());
+ EXPECT_FALSE(config.num_stddev_delay_outlier.has_value());
+ EXPECT_FALSE(config.num_stddev_size_outlier.has_value());
+ EXPECT_FALSE(config.congestion_rejection_factor.has_value());
+ EXPECT_TRUE(config.estimate_noise_when_congested);
+}
+
+class FieldTrialsOverriddenJitterEstimatorTest : public JitterEstimatorTest {
+ protected:
+ FieldTrialsOverriddenJitterEstimatorTest()
+ : JitterEstimatorTest(
+ "WebRTC-JitterEstimatorConfig/"
+ "avg_frame_size_median:true,"
+ "max_frame_size_percentile:0.9,"
+ "frame_size_window:30,"
+ "num_stddev_delay_clamp:1.1,"
+ "num_stddev_delay_outlier:2,"
+ "num_stddev_size_outlier:3.1,"
+ "congestion_rejection_factor:-1.55,"
+ "estimate_noise_when_congested:false/") {}
+ ~FieldTrialsOverriddenJitterEstimatorTest() {}
+};
+
+TEST_F(FieldTrialsOverriddenJitterEstimatorTest, FieldTrialsParsesCorrectly) {
+ JitterEstimator::Config config = estimator_.GetConfigForTest();
+ EXPECT_TRUE(config.avg_frame_size_median);
+ EXPECT_EQ(*config.max_frame_size_percentile, 0.9);
+ EXPECT_EQ(*config.frame_size_window, 30);
+ EXPECT_EQ(*config.num_stddev_delay_clamp, 1.1);
+ EXPECT_EQ(*config.num_stddev_delay_outlier, 2.0);
+ EXPECT_EQ(*config.num_stddev_size_outlier, 3.1);
+ EXPECT_EQ(*config.congestion_rejection_factor, -1.55);
+ EXPECT_FALSE(config.estimate_noise_when_congested);
+}
+
+TEST_F(FieldTrialsOverriddenJitterEstimatorTest,
+ DelayOutlierIsRejectedAndMaintainsJitterEstimate) {
+ ValueGenerator gen(10);
+
+ // Steady state.
+ Run(/*duration_s=*/60, /*framerate_fps=*/30, gen);
+ TimeDelta steady_state_jitter =
+ estimator_.GetJitterEstimate(0, absl::nullopt);
+
+ // A single outlier frame size...
+ estimator_.UpdateEstimate(10 * gen.Delay(), gen.FrameSize());
+ TimeDelta outlier_jitter = estimator_.GetJitterEstimate(0, absl::nullopt);
+
+ // ...does not change the estimate.
+ EXPECT_EQ(outlier_jitter.ms(), steady_state_jitter.ms());
+}
+
+// The field trial is configured to be robust against the `(1 - 0.9) = 10%`
+// largest frames over a window of length `30`.
+TEST_F(FieldTrialsOverriddenJitterEstimatorTest,
+ Four2xFrameSizesImpactJitterEstimate) {
+ ValueGenerator gen(10);
+
+ // Steady state.
+ Run(/*duration_s=*/60, /*framerate_fps=*/30, gen);
+ TimeDelta steady_state_jitter =
+ estimator_.GetJitterEstimate(0, absl::nullopt);
+
+ // Three outlier frames do not impact the jitter estimate.
+ for (int i = 0; i < 3; ++i) {
+ estimator_.UpdateEstimate(gen.Delay(), 2 * gen.FrameSize());
+ }
+ TimeDelta outlier_jitter_3x = estimator_.GetJitterEstimate(0, absl::nullopt);
+ EXPECT_EQ(outlier_jitter_3x.ms(), steady_state_jitter.ms());
+
+ // Four outlier frames do impact the jitter estimate.
+ estimator_.UpdateEstimate(gen.Delay(), 2 * gen.FrameSize());
+ TimeDelta outlier_jitter_4x = estimator_.GetJitterEstimate(0, absl::nullopt);
+ EXPECT_GT(outlier_jitter_4x.ms(), outlier_jitter_3x.ms());
+}
+
+// When so configured, congested frames are NOT used when calculating the
+// noise variance, meaning that they will NOT impact the final jitter estimate.
+TEST_F(FieldTrialsOverriddenJitterEstimatorTest,
+ CongestedFrameDoesNotImpactJitterEstimate) {
+ ValueGenerator gen(10);
+
+ // Steady state.
+ Run(/*duration_s=*/10, /*framerate_fps=*/30, gen);
+ TimeDelta steady_state_jitter =
+ estimator_.GetJitterEstimate(0, absl::nullopt);
+
+ // Congested frame...
+ estimator_.UpdateEstimate(-10 * gen.Delay(), 0.1 * gen.FrameSize());
+ TimeDelta outlier_jitter = estimator_.GetJitterEstimate(0, absl::nullopt);
+
+ // ...does not impact the estimate.
+ EXPECT_EQ(outlier_jitter.ms(), steady_state_jitter.ms());
+}
+
+class MisconfiguredFieldTrialsJitterEstimatorTest : public JitterEstimatorTest {
+ protected:
+ MisconfiguredFieldTrialsJitterEstimatorTest()
+ : JitterEstimatorTest(
+ "WebRTC-JitterEstimatorConfig/"
+ "max_frame_size_percentile:-0.9,"
+ "frame_size_window:-1,"
+ "num_stddev_delay_clamp:-1.9,"
+ "num_stddev_delay_outlier:-2,"
+ "num_stddev_size_outlier:-23.1/") {}
+ ~MisconfiguredFieldTrialsJitterEstimatorTest() {}
+};
+
+TEST_F(MisconfiguredFieldTrialsJitterEstimatorTest, FieldTrialsAreValidated) {
+ JitterEstimator::Config config = estimator_.GetConfigForTest();
+ EXPECT_EQ(*config.max_frame_size_percentile, 0.0);
+ EXPECT_EQ(*config.frame_size_window, 1);
+ EXPECT_EQ(*config.num_stddev_delay_clamp, 0.0);
+ EXPECT_EQ(*config.num_stddev_delay_outlier, 0.0);
+ EXPECT_EQ(*config.num_stddev_size_outlier, 0.0);
+}
+
+} // namespace
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/timing/rtt_filter.cc b/third_party/libwebrtc/modules/video_coding/timing/rtt_filter.cc
new file mode 100644
index 0000000000..6962224d61
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/timing/rtt_filter.cc
@@ -0,0 +1,161 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/timing/rtt_filter.h"
+
+#include <math.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <algorithm>
+
+#include "absl/algorithm/container.h"
+#include "absl/container/inlined_vector.h"
+#include "api/units/time_delta.h"
+
+namespace webrtc {
+
+namespace {
+
+constexpr TimeDelta kMaxRtt = TimeDelta::Seconds(3);
+constexpr uint32_t kFilterFactorMax = 35;
+constexpr double kJumpStddev = 2.5;
+constexpr double kDriftStdDev = 3.5;
+
+} // namespace
+
+RttFilter::RttFilter()
+ : avg_rtt_(TimeDelta::Zero()),
+ var_rtt_(0),
+ max_rtt_(TimeDelta::Zero()),
+ jump_buf_(kMaxDriftJumpCount, TimeDelta::Zero()),
+ drift_buf_(kMaxDriftJumpCount, TimeDelta::Zero()) {
+ Reset();
+}
+
+void RttFilter::Reset() {
+ got_non_zero_update_ = false;
+ avg_rtt_ = TimeDelta::Zero();
+ var_rtt_ = 0;
+ max_rtt_ = TimeDelta::Zero();
+ filt_fact_count_ = 1;
+ absl::c_fill(jump_buf_, TimeDelta::Zero());
+ absl::c_fill(drift_buf_, TimeDelta::Zero());
+}
+
+void RttFilter::Update(TimeDelta rtt) {
+ if (!got_non_zero_update_) {
+ if (rtt.IsZero()) {
+ return;
+ }
+ got_non_zero_update_ = true;
+ }
+
+ // Sanity check
+ if (rtt > kMaxRtt) {
+ rtt = kMaxRtt;
+ }
+
+ double filt_factor = 0;
+ if (filt_fact_count_ > 1) {
+ filt_factor = static_cast<double>(filt_fact_count_ - 1) / filt_fact_count_;
+ }
+ filt_fact_count_++;
+ if (filt_fact_count_ > kFilterFactorMax) {
+ // This prevents filt_factor from going above
+ // (_filt_fact_max - 1) / filt_fact_max_,
+ // e.g., filt_fact_max_ = 50 => filt_factor = 49/50 = 0.98
+ filt_fact_count_ = kFilterFactorMax;
+ }
+ TimeDelta old_avg = avg_rtt_;
+ int64_t old_var = var_rtt_;
+ avg_rtt_ = filt_factor * avg_rtt_ + (1 - filt_factor) * rtt;
+ int64_t delta_ms = (rtt - avg_rtt_).ms();
+ var_rtt_ = filt_factor * var_rtt_ + (1 - filt_factor) * (delta_ms * delta_ms);
+ max_rtt_ = std::max(rtt, max_rtt_);
+ if (!JumpDetection(rtt) || !DriftDetection(rtt)) {
+ // In some cases we don't want to update the statistics
+ avg_rtt_ = old_avg;
+ var_rtt_ = old_var;
+ }
+}
+
+bool RttFilter::JumpDetection(TimeDelta rtt) {
+ TimeDelta diff_from_avg = avg_rtt_ - rtt;
+ // Unit of var_rtt_ is ms^2.
+ TimeDelta jump_threshold = TimeDelta::Millis(kJumpStddev * sqrt(var_rtt_));
+ if (diff_from_avg.Abs() > jump_threshold) {
+ bool positive_diff = diff_from_avg >= TimeDelta::Zero();
+ if (!jump_buf_.empty() && positive_diff != last_jump_positive_) {
+ // Since the signs differ the samples currently
+ // in the buffer is useless as they represent a
+ // jump in a different direction.
+ jump_buf_.clear();
+ }
+ if (jump_buf_.size() < kMaxDriftJumpCount) {
+ // Update the buffer used for the short time statistics.
+ // The sign of the diff is used for updating the counter since
+ // we want to use the same buffer for keeping track of when
+ // the RTT jumps down and up.
+ jump_buf_.push_back(rtt);
+ last_jump_positive_ = positive_diff;
+ }
+ if (jump_buf_.size() >= kMaxDriftJumpCount) {
+ // Detected an RTT jump
+ ShortRttFilter(jump_buf_);
+ filt_fact_count_ = kMaxDriftJumpCount + 1;
+ jump_buf_.clear();
+ } else {
+ return false;
+ }
+ } else {
+ jump_buf_.clear();
+ }
+ return true;
+}
+
+bool RttFilter::DriftDetection(TimeDelta rtt) {
+ // Unit of sqrt of var_rtt_ is ms.
+ TimeDelta drift_threshold = TimeDelta::Millis(kDriftStdDev * sqrt(var_rtt_));
+ if (max_rtt_ - avg_rtt_ > drift_threshold) {
+ if (drift_buf_.size() < kMaxDriftJumpCount) {
+ // Update the buffer used for the short time statistics.
+ drift_buf_.push_back(rtt);
+ }
+ if (drift_buf_.size() >= kMaxDriftJumpCount) {
+ // Detected an RTT drift
+ ShortRttFilter(drift_buf_);
+ filt_fact_count_ = kMaxDriftJumpCount + 1;
+ drift_buf_.clear();
+ }
+ } else {
+ drift_buf_.clear();
+ }
+ return true;
+}
+
+void RttFilter::ShortRttFilter(const BufferList& buf) {
+ RTC_DCHECK_EQ(buf.size(), kMaxDriftJumpCount);
+ max_rtt_ = TimeDelta::Zero();
+ avg_rtt_ = TimeDelta::Zero();
+ for (const TimeDelta& rtt : buf) {
+ if (rtt > max_rtt_) {
+ max_rtt_ = rtt;
+ }
+ avg_rtt_ += rtt;
+ }
+ avg_rtt_ = avg_rtt_ / static_cast<double>(buf.size());
+}
+
+TimeDelta RttFilter::Rtt() const {
+ return max_rtt_;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/timing/rtt_filter.h b/third_party/libwebrtc/modules/video_coding/timing/rtt_filter.h
new file mode 100644
index 0000000000..b8700b23ee
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/timing/rtt_filter.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_TIMING_RTT_FILTER_H_
+#define MODULES_VIDEO_CODING_TIMING_RTT_FILTER_H_
+
+#include <stdint.h>
+
+#include "absl/container/inlined_vector.h"
+#include "api/units/time_delta.h"
+
+namespace webrtc {
+
+class RttFilter {
+ public:
+ RttFilter();
+ RttFilter(const RttFilter&) = delete;
+ RttFilter& operator=(const RttFilter&) = delete;
+
+ // Resets the filter.
+ void Reset();
+ // Updates the filter with a new sample.
+ void Update(TimeDelta rtt);
+ // A getter function for the current RTT level.
+ TimeDelta Rtt() const;
+
+ private:
+ // The size of the drift and jump memory buffers
+ // and thus also the detection threshold for these
+ // detectors in number of samples.
+ static constexpr int kMaxDriftJumpCount = 5;
+ using BufferList = absl::InlinedVector<TimeDelta, kMaxDriftJumpCount>;
+
+ // Detects RTT jumps by comparing the difference between
+ // samples and average to the standard deviation.
+ // Returns true if the long time statistics should be updated
+ // and false otherwise
+ bool JumpDetection(TimeDelta rtt);
+
+ // Detects RTT drifts by comparing the difference between
+ // max and average to the standard deviation.
+ // Returns true if the long time statistics should be updated
+ // and false otherwise
+ bool DriftDetection(TimeDelta rtt);
+
+ // Computes the short time average and maximum of the vector buf.
+ void ShortRttFilter(const BufferList& buf);
+
+ bool got_non_zero_update_;
+ TimeDelta avg_rtt_;
+ // Variance units are TimeDelta^2. Store as ms^2.
+ int64_t var_rtt_;
+ TimeDelta max_rtt_;
+ uint32_t filt_fact_count_;
+ bool last_jump_positive_ = false;
+ BufferList jump_buf_;
+ BufferList drift_buf_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_TIMING_RTT_FILTER_H_
diff --git a/third_party/libwebrtc/modules/video_coding/timing/rtt_filter_gn/moz.build b/third_party/libwebrtc/modules/video_coding/timing/rtt_filter_gn/moz.build
new file mode 100644
index 0000000000..488a49d9c2
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/timing/rtt_filter_gn/moz.build
@@ -0,0 +1,221 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+DEFINES["WEBRTC_STRICT_FIELD_TRIALS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "!/third_party/libwebrtc/gen",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/video_coding/timing/rtt_filter.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "mips32":
+
+ DEFINES["MIPS32_LE"] = True
+ DEFINES["MIPS_FPU_LE"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "mips64":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("rtt_filter_gn")
diff --git a/third_party/libwebrtc/modules/video_coding/timing/rtt_filter_unittest.cc b/third_party/libwebrtc/modules/video_coding/timing/rtt_filter_unittest.cc
new file mode 100644
index 0000000000..05502e6f5b
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/timing/rtt_filter_unittest.cc
@@ -0,0 +1,105 @@
+/*
+ * Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/timing/rtt_filter.h"
+
+#include "api/units/time_delta.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+TEST(RttFilterTest, RttIsCapped) {
+ RttFilter rtt_filter;
+ rtt_filter.Update(TimeDelta::Seconds(500));
+
+ EXPECT_EQ(rtt_filter.Rtt(), TimeDelta::Seconds(3));
+}
+
+// If the difference between samples is more than away 2.5 stddev from the mean
+// then this is considered a jump. After more than 5 data points at the new
+// level, the RTT is reset to the new level.
+TEST(RttFilterTest, PositiveJumpDetection) {
+ RttFilter rtt_filter;
+
+ rtt_filter.Update(TimeDelta::Millis(200));
+ rtt_filter.Update(TimeDelta::Millis(200));
+ rtt_filter.Update(TimeDelta::Millis(200));
+
+ // Trigger 5 jumps.
+ rtt_filter.Update(TimeDelta::Millis(1400));
+ rtt_filter.Update(TimeDelta::Millis(1500));
+ rtt_filter.Update(TimeDelta::Millis(1600));
+ rtt_filter.Update(TimeDelta::Millis(1600));
+
+ EXPECT_EQ(rtt_filter.Rtt(), TimeDelta::Millis(1600));
+
+ rtt_filter.Update(TimeDelta::Millis(1600));
+ EXPECT_EQ(rtt_filter.Rtt(), TimeDelta::Millis(1600));
+}
+
+TEST(RttFilterTest, NegativeJumpDetection) {
+ RttFilter rtt_filter;
+
+ for (int i = 0; i < 10; ++i)
+ rtt_filter.Update(TimeDelta::Millis(1500));
+
+ // Trigger 5 negative data points that jump rtt down.
+ rtt_filter.Update(TimeDelta::Millis(200));
+ rtt_filter.Update(TimeDelta::Millis(200));
+ rtt_filter.Update(TimeDelta::Millis(200));
+ rtt_filter.Update(TimeDelta::Millis(200));
+ // Before 5 data points at the new level, max RTT is still 1500.
+ EXPECT_EQ(rtt_filter.Rtt(), TimeDelta::Millis(1500));
+
+ rtt_filter.Update(TimeDelta::Millis(300));
+ EXPECT_EQ(rtt_filter.Rtt(), TimeDelta::Millis(300));
+}
+
+TEST(RttFilterTest, JumpsResetByDirectionShift) {
+ RttFilter rtt_filter;
+ for (int i = 0; i < 10; ++i)
+ rtt_filter.Update(TimeDelta::Millis(1500));
+
+ // Trigger 4 negative jumps, then a positive one. This resets the jump
+ // detection.
+ rtt_filter.Update(TimeDelta::Millis(200));
+ rtt_filter.Update(TimeDelta::Millis(200));
+ rtt_filter.Update(TimeDelta::Millis(200));
+ rtt_filter.Update(TimeDelta::Millis(200));
+ rtt_filter.Update(TimeDelta::Millis(2000));
+ EXPECT_EQ(rtt_filter.Rtt(), TimeDelta::Millis(2000));
+
+ rtt_filter.Update(TimeDelta::Millis(300));
+ EXPECT_EQ(rtt_filter.Rtt(), TimeDelta::Millis(2000));
+}
+
+// If the difference between the max and average is more than 3.5 stddevs away
+// then a drift is detected, and a short filter is applied to find a new max
+// rtt.
+TEST(RttFilterTest, DriftDetection) {
+ RttFilter rtt_filter;
+
+ // Descend RTT by 30ms and settle at 700ms RTT. A drift is detected after rtt
+ // of 700ms is reported around 50 times for these targets.
+ constexpr TimeDelta kStartRtt = TimeDelta::Millis(1000);
+ constexpr TimeDelta kDriftTarget = TimeDelta::Millis(700);
+ constexpr TimeDelta kDelta = TimeDelta::Millis(30);
+ for (TimeDelta rtt = kStartRtt; rtt >= kDriftTarget; rtt -= kDelta)
+ rtt_filter.Update(rtt);
+
+ EXPECT_EQ(rtt_filter.Rtt(), kStartRtt);
+
+ for (int i = 0; i < 50; ++i)
+ rtt_filter.Update(kDriftTarget);
+ EXPECT_EQ(rtt_filter.Rtt(), kDriftTarget);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/timing/timestamp_extrapolator.cc b/third_party/libwebrtc/modules/video_coding/timing/timestamp_extrapolator.cc
new file mode 100644
index 0000000000..dc62ac674a
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/timing/timestamp_extrapolator.cc
@@ -0,0 +1,169 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/timing/timestamp_extrapolator.h"
+
+#include <algorithm>
+
+#include "absl/types/optional.h"
+#include "rtc_base/numerics/sequence_number_unwrapper.h"
+
+namespace webrtc {
+
+namespace {
+
+constexpr double kLambda = 1;
+constexpr uint32_t kStartUpFilterDelayInPackets = 2;
+constexpr double kAlarmThreshold = 60e3;
+// in timestamp ticks, i.e. 15 ms
+constexpr double kAccDrift = 6600;
+constexpr double kAccMaxError = 7000;
+constexpr double kP11 = 1e10;
+
+} // namespace
+
+TimestampExtrapolator::TimestampExtrapolator(Timestamp start)
+ : start_(Timestamp::Zero()),
+ prev_(Timestamp::Zero()),
+ packet_count_(0),
+ detector_accumulator_pos_(0),
+ detector_accumulator_neg_(0) {
+ Reset(start);
+}
+
+void TimestampExtrapolator::Reset(Timestamp start) {
+ start_ = start;
+ prev_ = start_;
+ first_unwrapped_timestamp_ = absl::nullopt;
+ w_[0] = 90.0;
+ w_[1] = 0;
+ p_[0][0] = 1;
+ p_[1][1] = kP11;
+ p_[0][1] = p_[1][0] = 0;
+ unwrapper_ = RtpTimestampUnwrapper();
+ packet_count_ = 0;
+ detector_accumulator_pos_ = 0;
+ detector_accumulator_neg_ = 0;
+}
+
+void TimestampExtrapolator::Update(Timestamp now, uint32_t ts90khz) {
+ if (now - prev_ > TimeDelta::Seconds(10)) {
+ // Ten seconds without a complete frame.
+ // Reset the extrapolator
+ Reset(now);
+ } else {
+ prev_ = now;
+ }
+
+ // Remove offset to prevent badly scaled matrices
+ const TimeDelta offset = now - start_;
+ double t_ms = offset.ms();
+
+ int64_t unwrapped_ts90khz = unwrapper_.Unwrap(ts90khz);
+
+ if (!first_unwrapped_timestamp_) {
+ // Make an initial guess of the offset,
+ // should be almost correct since t_ms - start
+ // should about zero at this time.
+ w_[1] = -w_[0] * t_ms;
+ first_unwrapped_timestamp_ = unwrapped_ts90khz;
+ }
+
+ double residual =
+ (static_cast<double>(unwrapped_ts90khz) - *first_unwrapped_timestamp_) -
+ t_ms * w_[0] - w_[1];
+ if (DelayChangeDetection(residual) &&
+ packet_count_ >= kStartUpFilterDelayInPackets) {
+ // A sudden change of average network delay has been detected.
+ // Force the filter to adjust its offset parameter by changing
+ // the offset uncertainty. Don't do this during startup.
+ p_[1][1] = kP11;
+ }
+
+ if (prev_unwrapped_timestamp_ &&
+ unwrapped_ts90khz < prev_unwrapped_timestamp_) {
+ // Drop reordered frames.
+ return;
+ }
+
+ // T = [t(k) 1]';
+ // that = T'*w;
+ // K = P*T/(lambda + T'*P*T);
+ double K[2];
+ K[0] = p_[0][0] * t_ms + p_[0][1];
+ K[1] = p_[1][0] * t_ms + p_[1][1];
+ double TPT = kLambda + t_ms * K[0] + K[1];
+ K[0] /= TPT;
+ K[1] /= TPT;
+ // w = w + K*(ts(k) - that);
+ w_[0] = w_[0] + K[0] * residual;
+ w_[1] = w_[1] + K[1] * residual;
+ // P = 1/lambda*(P - K*T'*P);
+ double p00 =
+ 1 / kLambda * (p_[0][0] - (K[0] * t_ms * p_[0][0] + K[0] * p_[1][0]));
+ double p01 =
+ 1 / kLambda * (p_[0][1] - (K[0] * t_ms * p_[0][1] + K[0] * p_[1][1]));
+ p_[1][0] =
+ 1 / kLambda * (p_[1][0] - (K[1] * t_ms * p_[0][0] + K[1] * p_[1][0]));
+ p_[1][1] =
+ 1 / kLambda * (p_[1][1] - (K[1] * t_ms * p_[0][1] + K[1] * p_[1][1]));
+ p_[0][0] = p00;
+ p_[0][1] = p01;
+ prev_unwrapped_timestamp_ = unwrapped_ts90khz;
+ if (packet_count_ < kStartUpFilterDelayInPackets) {
+ packet_count_++;
+ }
+}
+
+absl::optional<Timestamp> TimestampExtrapolator::ExtrapolateLocalTime(
+ uint32_t timestamp90khz) const {
+ int64_t unwrapped_ts90khz = unwrapper_.PeekUnwrap(timestamp90khz);
+ RTC_DCHECK_GE(unwrapped_ts90khz, 0);
+
+ if (!first_unwrapped_timestamp_) {
+ return absl::nullopt;
+ } else if (packet_count_ < kStartUpFilterDelayInPackets) {
+ constexpr double kRtpTicksPerMs = 90;
+ TimeDelta diff = TimeDelta::Millis(
+ (unwrapped_ts90khz - *prev_unwrapped_timestamp_) / kRtpTicksPerMs);
+ if (diff.ms() < 0) {
+ RTC_DCHECK_GE(prev_.ms(), -diff.ms());
+ }
+ return prev_ + diff;
+ } else if (w_[0] < 1e-3) {
+ return start_;
+ } else {
+ double timestampDiff = unwrapped_ts90khz - *first_unwrapped_timestamp_;
+ auto diff_ms = static_cast<int64_t>((timestampDiff - w_[1]) / w_[0] + 0.5);
+ if (diff_ms < 0) {
+ RTC_DCHECK_GE(start_.ms(), -diff_ms);
+ }
+ return start_ + TimeDelta::Millis(diff_ms);
+ }
+}
+
+bool TimestampExtrapolator::DelayChangeDetection(double error) {
+ // CUSUM detection of sudden delay changes
+ error = (error > 0) ? std::min(error, kAccMaxError)
+ : std::max(error, -kAccMaxError);
+ detector_accumulator_pos_ =
+ std::max(detector_accumulator_pos_ + error - kAccDrift, double{0});
+ detector_accumulator_neg_ =
+ std::min(detector_accumulator_neg_ + error + kAccDrift, double{0});
+ if (detector_accumulator_pos_ > kAlarmThreshold ||
+ detector_accumulator_neg_ < -kAlarmThreshold) {
+ // Alarm
+ detector_accumulator_pos_ = detector_accumulator_neg_ = 0;
+ return true;
+ }
+ return false;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/timing/timestamp_extrapolator.h b/third_party/libwebrtc/modules/video_coding/timing/timestamp_extrapolator.h
new file mode 100644
index 0000000000..6a9763943e
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/timing/timestamp_extrapolator.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_TIMING_TIMESTAMP_EXTRAPOLATOR_H_
+#define MODULES_VIDEO_CODING_TIMING_TIMESTAMP_EXTRAPOLATOR_H_
+
+#include <stdint.h>
+
+#include "absl/types/optional.h"
+#include "api/units/timestamp.h"
+#include "rtc_base/numerics/sequence_number_unwrapper.h"
+
+namespace webrtc {
+
+// Not thread safe.
+class TimestampExtrapolator {
+ public:
+ explicit TimestampExtrapolator(Timestamp start);
+ void Update(Timestamp now, uint32_t ts90khz);
+ absl::optional<Timestamp> ExtrapolateLocalTime(uint32_t timestamp90khz) const;
+ void Reset(Timestamp start);
+
+ private:
+ void CheckForWrapArounds(uint32_t ts90khz);
+ bool DelayChangeDetection(double error);
+
+ double w_[2];
+ double p_[2][2];
+ Timestamp start_;
+ Timestamp prev_;
+ absl::optional<int64_t> first_unwrapped_timestamp_;
+ RtpTimestampUnwrapper unwrapper_;
+ absl::optional<int64_t> prev_unwrapped_timestamp_;
+ uint32_t packet_count_;
+ double detector_accumulator_pos_;
+ double detector_accumulator_neg_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_TIMING_TIMESTAMP_EXTRAPOLATOR_H_
diff --git a/third_party/libwebrtc/modules/video_coding/timing/timestamp_extrapolator_gn/moz.build b/third_party/libwebrtc/modules/video_coding/timing/timestamp_extrapolator_gn/moz.build
new file mode 100644
index 0000000000..4cd085dd09
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/timing/timestamp_extrapolator_gn/moz.build
@@ -0,0 +1,221 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+DEFINES["WEBRTC_STRICT_FIELD_TRIALS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "!/third_party/libwebrtc/gen",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/video_coding/timing/timestamp_extrapolator.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "mips32":
+
+ DEFINES["MIPS32_LE"] = True
+ DEFINES["MIPS_FPU_LE"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "mips64":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("timestamp_extrapolator_gn")
diff --git a/third_party/libwebrtc/modules/video_coding/timing/timestamp_extrapolator_unittest.cc b/third_party/libwebrtc/modules/video_coding/timing/timestamp_extrapolator_unittest.cc
new file mode 100644
index 0000000000..0b5fd74a8e
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/timing/timestamp_extrapolator_unittest.cc
@@ -0,0 +1,221 @@
+/*
+ * Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/timing/timestamp_extrapolator.h"
+
+#include <stdint.h>
+
+#include <limits>
+
+#include "absl/types/optional.h"
+#include "api/units/frequency.h"
+#include "api/units/time_delta.h"
+#include "api/units/timestamp.h"
+#include "system_wrappers/include/clock.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+using ::testing::Eq;
+using ::testing::Optional;
+
+namespace {
+
+constexpr Frequency kRtpHz = Frequency::KiloHertz(90);
+constexpr Frequency k25Fps = Frequency::Hertz(25);
+constexpr TimeDelta k25FpsDelay = 1 / k25Fps;
+
+} // namespace
+
+TEST(TimestampExtrapolatorTest, ExtrapolationOccursAfter2Packets) {
+ SimulatedClock clock(Timestamp::Millis(1337));
+ TimestampExtrapolator ts_extrapolator(clock.CurrentTime());
+
+ // No packets so no timestamp.
+ EXPECT_THAT(ts_extrapolator.ExtrapolateLocalTime(90000), Eq(absl::nullopt));
+
+ uint32_t rtp = 90000;
+ clock.AdvanceTime(k25FpsDelay);
+ // First result is a bit confusing since it is based off the "start" time,
+ // which is arbitrary.
+ ts_extrapolator.Update(clock.CurrentTime(), rtp);
+ EXPECT_THAT(ts_extrapolator.ExtrapolateLocalTime(rtp),
+ Optional(clock.CurrentTime()));
+
+ rtp += kRtpHz / k25Fps;
+ clock.AdvanceTime(k25FpsDelay);
+ ts_extrapolator.Update(clock.CurrentTime(), rtp);
+ EXPECT_THAT(ts_extrapolator.ExtrapolateLocalTime(rtp),
+ Optional(clock.CurrentTime()));
+ EXPECT_THAT(ts_extrapolator.ExtrapolateLocalTime(rtp + 90000),
+ Optional(clock.CurrentTime() + TimeDelta::Seconds(1)));
+}
+
+TEST(TimestampExtrapolatorTest, ResetsAfter10SecondPause) {
+ SimulatedClock clock(Timestamp::Millis(1337));
+ TimestampExtrapolator ts_extrapolator(clock.CurrentTime());
+
+ uint32_t rtp = 90000;
+ ts_extrapolator.Update(clock.CurrentTime(), rtp);
+ EXPECT_THAT(ts_extrapolator.ExtrapolateLocalTime(rtp),
+ Optional(clock.CurrentTime()));
+
+ rtp += kRtpHz / k25Fps;
+ clock.AdvanceTime(k25FpsDelay);
+ ts_extrapolator.Update(clock.CurrentTime(), rtp);
+ EXPECT_THAT(ts_extrapolator.ExtrapolateLocalTime(rtp),
+ Optional(clock.CurrentTime()));
+
+ rtp += 10 * kRtpHz.hertz();
+ clock.AdvanceTime(TimeDelta::Seconds(10) + TimeDelta::Micros(1));
+ ts_extrapolator.Update(clock.CurrentTime(), rtp);
+ EXPECT_THAT(ts_extrapolator.ExtrapolateLocalTime(rtp),
+ Optional(clock.CurrentTime()));
+}
+
+TEST(TimestampExtrapolatorTest, TimestampExtrapolatesMultipleRtpWrapArounds) {
+ SimulatedClock clock(Timestamp::Millis(1337));
+ TimestampExtrapolator ts_extrapolator(clock.CurrentTime());
+
+ uint32_t rtp = std::numeric_limits<uint32_t>::max();
+ ts_extrapolator.Update(clock.CurrentTime(), rtp);
+ EXPECT_THAT(ts_extrapolator.ExtrapolateLocalTime(rtp),
+ Optional(clock.CurrentTime()));
+
+ // One overflow. Static cast to avoid undefined behaviour with +=.
+ rtp += static_cast<uint32_t>(kRtpHz / k25Fps);
+ clock.AdvanceTime(k25FpsDelay);
+ ts_extrapolator.Update(clock.CurrentTime(), rtp);
+ EXPECT_THAT(ts_extrapolator.ExtrapolateLocalTime(rtp),
+ Optional(clock.CurrentTime()));
+
+ // Assert that extrapolation works across the boundary as expected.
+ EXPECT_THAT(ts_extrapolator.ExtrapolateLocalTime(rtp + 90000),
+ Optional(clock.CurrentTime() + TimeDelta::Seconds(1)));
+ // This is not quite 1s since the math always rounds up.
+ EXPECT_THAT(ts_extrapolator.ExtrapolateLocalTime(rtp - 90000),
+ Optional(clock.CurrentTime() - TimeDelta::Millis(999)));
+
+ // In order to avoid a wrap arounds reset, add a packet every 10s until we
+ // overflow twice.
+ constexpr TimeDelta kRtpOverflowDelay =
+ std::numeric_limits<uint32_t>::max() / kRtpHz;
+ const Timestamp overflow_time = clock.CurrentTime() + kRtpOverflowDelay * 2;
+
+ while (clock.CurrentTime() < overflow_time) {
+ clock.AdvanceTime(TimeDelta::Seconds(10));
+ // Static-cast before += to avoid undefined behaviour of overflow.
+ rtp += static_cast<uint32_t>(kRtpHz * TimeDelta::Seconds(10));
+ ts_extrapolator.Update(clock.CurrentTime(), rtp);
+ EXPECT_THAT(ts_extrapolator.ExtrapolateLocalTime(rtp),
+ Optional(clock.CurrentTime()));
+ }
+}
+
+TEST(TimestampExtrapolatorTest, NegativeRtpTimestampWrapAround) {
+ SimulatedClock clock(Timestamp::Millis(1337));
+ TimestampExtrapolator ts_extrapolator(clock.CurrentTime());
+ uint32_t rtp = 0;
+ ts_extrapolator.Update(clock.CurrentTime(), rtp);
+ EXPECT_THAT(ts_extrapolator.ExtrapolateLocalTime(rtp),
+ Optional(clock.CurrentTime()));
+ // Go backwards!
+ rtp -= kRtpHz.hertz();
+ EXPECT_THAT(ts_extrapolator.ExtrapolateLocalTime(rtp),
+ Optional(clock.CurrentTime() - TimeDelta::Seconds(1)));
+}
+
+TEST(TimestampExtrapolatorTest, Slow90KHzClock) {
+ // This simulates a slow camera, which produces frames at 24Hz instead of
+ // 25Hz. The extrapolator should be able to resolve this with enough data.
+ SimulatedClock clock(Timestamp::Millis(1337));
+ TimestampExtrapolator ts_extrapolator(clock.CurrentTime());
+
+ constexpr TimeDelta k24FpsDelay = 1 / Frequency::Hertz(24);
+ uint32_t rtp = 90000;
+ ts_extrapolator.Update(clock.CurrentTime(), rtp);
+
+ // Slow camera will increment RTP at 25 FPS rate even though its producing at
+ // 24 FPS. After 25 frames the extrapolator should settle at this rate.
+ for (int i = 0; i < 25; ++i) {
+ rtp += kRtpHz / k25Fps;
+ clock.AdvanceTime(k24FpsDelay);
+ ts_extrapolator.Update(clock.CurrentTime(), rtp);
+ }
+
+ // The camera would normally produce 25 frames in 90K ticks, but is slow
+ // so takes 1s + k24FpsDelay for 90K ticks.
+ constexpr Frequency kSlowRtpHz = 90000 / (25 * k24FpsDelay);
+ // The extrapolator will be predicting that time at millisecond precision.
+ auto ts = ts_extrapolator.ExtrapolateLocalTime(rtp + kSlowRtpHz.hertz());
+ ASSERT_TRUE(ts.has_value());
+ EXPECT_EQ(ts->ms(), clock.TimeInMilliseconds() + 1000);
+}
+
+TEST(TimestampExtrapolatorTest, Fast90KHzClock) {
+ // This simulates a fast camera, which produces frames at 26Hz instead of
+ // 25Hz. The extrapolator should be able to resolve this with enough data.
+ SimulatedClock clock(Timestamp::Millis(1337));
+ TimestampExtrapolator ts_extrapolator(clock.CurrentTime());
+
+ constexpr TimeDelta k26FpsDelay = 1 / Frequency::Hertz(26);
+ uint32_t rtp = 90000;
+ ts_extrapolator.Update(clock.CurrentTime(), rtp);
+
+ // Fast camera will increment RTP at 25 FPS rate even though its producing at
+ // 26 FPS. After 25 frames the extrapolator should settle at this rate.
+ for (int i = 0; i < 25; ++i) {
+ rtp += kRtpHz / k25Fps;
+ clock.AdvanceTime(k26FpsDelay);
+ ts_extrapolator.Update(clock.CurrentTime(), rtp);
+ }
+
+ // The camera would normally produce 25 frames in 90K ticks, but is slow
+ // so takes 1s + k24FpsDelay for 90K ticks.
+ constexpr Frequency kSlowRtpHz = 90000 / (25 * k26FpsDelay);
+ // The extrapolator will be predicting that time at millisecond precision.
+ auto ts = ts_extrapolator.ExtrapolateLocalTime(rtp + kSlowRtpHz.hertz());
+ ASSERT_TRUE(ts.has_value());
+ EXPECT_EQ(ts->ms(), clock.TimeInMilliseconds() + 1000);
+}
+
+TEST(TimestampExtrapolatorTest, TimestampJump) {
+ // This simulates a jump in RTP timestamp, which could occur if a camera was
+ // swapped for example.
+ SimulatedClock clock(Timestamp::Millis(1337));
+ TimestampExtrapolator ts_extrapolator(clock.CurrentTime());
+
+ uint32_t rtp = 90000;
+ clock.AdvanceTime(k25FpsDelay);
+ ts_extrapolator.Update(clock.CurrentTime(), rtp);
+ rtp += kRtpHz / k25Fps;
+ clock.AdvanceTime(k25FpsDelay);
+ ts_extrapolator.Update(clock.CurrentTime(), rtp);
+ rtp += kRtpHz / k25Fps;
+ clock.AdvanceTime(k25FpsDelay);
+ ts_extrapolator.Update(clock.CurrentTime(), rtp);
+ EXPECT_THAT(ts_extrapolator.ExtrapolateLocalTime(rtp),
+ Optional(clock.CurrentTime()));
+ EXPECT_THAT(ts_extrapolator.ExtrapolateLocalTime(rtp + 90000),
+ Optional(clock.CurrentTime() + TimeDelta::Seconds(1)));
+
+ // Jump RTP.
+ uint32_t new_rtp = 1337 * 90000;
+ clock.AdvanceTime(k25FpsDelay);
+ ts_extrapolator.Update(clock.CurrentTime(), new_rtp);
+ new_rtp += kRtpHz / k25Fps;
+ clock.AdvanceTime(k25FpsDelay);
+ ts_extrapolator.Update(clock.CurrentTime(), new_rtp);
+ EXPECT_THAT(ts_extrapolator.ExtrapolateLocalTime(new_rtp),
+ Optional(clock.CurrentTime()));
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/timing/timing.cc b/third_party/libwebrtc/modules/video_coding/timing/timing.cc
new file mode 100644
index 0000000000..0b61d5a35e
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/timing/timing.cc
@@ -0,0 +1,297 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/timing/timing.h"
+
+#include <algorithm>
+
+#include "api/units/time_delta.h"
+#include "modules/video_coding/timing/timestamp_extrapolator.h"
+#include "rtc_base/experiments/field_trial_parser.h"
+#include "rtc_base/logging.h"
+#include "system_wrappers/include/clock.h"
+
+namespace webrtc {
+namespace {
+
+// Default pacing that is used for the low-latency renderer path.
+constexpr TimeDelta kZeroPlayoutDelayDefaultMinPacing = TimeDelta::Millis(8);
+constexpr TimeDelta kLowLatencyStreamMaxPlayoutDelayThreshold =
+ TimeDelta::Millis(500);
+
+void CheckDelaysValid(TimeDelta min_delay, TimeDelta max_delay) {
+ if (min_delay > max_delay) {
+ RTC_LOG(LS_ERROR)
+ << "Playout delays set incorrectly: min playout delay (" << min_delay
+ << ") > max playout delay (" << max_delay
+ << "). This is undefined behaviour. Application writers should "
+ "ensure that the min delay is always less than or equals max "
+ "delay. If trying to use the playout delay header extensions "
+ "described in "
+ "https://webrtc.googlesource.com/src/+/refs/heads/main/docs/"
+ "native-code/rtp-hdrext/playout-delay/, be careful that a playout "
+ "delay hint or A/V sync settings may have caused this conflict.";
+ }
+}
+
+} // namespace
+
+VCMTiming::VCMTiming(Clock* clock, const FieldTrialsView& field_trials)
+ : clock_(clock),
+ ts_extrapolator_(
+ std::make_unique<TimestampExtrapolator>(clock_->CurrentTime())),
+ codec_timer_(std::make_unique<CodecTimer>()),
+ render_delay_(kDefaultRenderDelay),
+ min_playout_delay_(TimeDelta::Zero()),
+ max_playout_delay_(TimeDelta::Seconds(10)),
+ jitter_delay_(TimeDelta::Zero()),
+ current_delay_(TimeDelta::Zero()),
+ prev_frame_timestamp_(0),
+ num_decoded_frames_(0),
+ zero_playout_delay_min_pacing_("min_pacing",
+ kZeroPlayoutDelayDefaultMinPacing),
+ last_decode_scheduled_(Timestamp::Zero()) {
+ ParseFieldTrial({&zero_playout_delay_min_pacing_},
+ field_trials.Lookup("WebRTC-ZeroPlayoutDelay"));
+}
+
+void VCMTiming::Reset() {
+ MutexLock lock(&mutex_);
+ ts_extrapolator_->Reset(clock_->CurrentTime());
+ codec_timer_ = std::make_unique<CodecTimer>();
+ render_delay_ = kDefaultRenderDelay;
+ min_playout_delay_ = TimeDelta::Zero();
+ jitter_delay_ = TimeDelta::Zero();
+ current_delay_ = TimeDelta::Zero();
+ prev_frame_timestamp_ = 0;
+}
+
+void VCMTiming::set_render_delay(TimeDelta render_delay) {
+ MutexLock lock(&mutex_);
+ render_delay_ = render_delay;
+}
+
+TimeDelta VCMTiming::min_playout_delay() const {
+ MutexLock lock(&mutex_);
+ return min_playout_delay_;
+}
+
+void VCMTiming::set_min_playout_delay(TimeDelta min_playout_delay) {
+ MutexLock lock(&mutex_);
+ if (min_playout_delay_ != min_playout_delay) {
+ CheckDelaysValid(min_playout_delay, max_playout_delay_);
+ min_playout_delay_ = min_playout_delay;
+ }
+}
+
+void VCMTiming::set_max_playout_delay(TimeDelta max_playout_delay) {
+ MutexLock lock(&mutex_);
+ if (max_playout_delay_ != max_playout_delay) {
+ CheckDelaysValid(min_playout_delay_, max_playout_delay);
+ max_playout_delay_ = max_playout_delay;
+ }
+}
+
+void VCMTiming::SetJitterDelay(TimeDelta jitter_delay) {
+ MutexLock lock(&mutex_);
+ if (jitter_delay != jitter_delay_) {
+ jitter_delay_ = jitter_delay;
+ // When in initial state, set current delay to minimum delay.
+ if (current_delay_.IsZero()) {
+ current_delay_ = jitter_delay_;
+ }
+ }
+}
+
+void VCMTiming::UpdateCurrentDelay(uint32_t frame_timestamp) {
+ MutexLock lock(&mutex_);
+ TimeDelta target_delay = TargetDelayInternal();
+
+ if (current_delay_.IsZero()) {
+ // Not initialized, set current delay to target.
+ current_delay_ = target_delay;
+ } else if (target_delay != current_delay_) {
+ TimeDelta delay_diff = target_delay - current_delay_;
+ // Never change the delay with more than 100 ms every second. If we're
+ // changing the delay in too large steps we will get noticeable freezes. By
+ // limiting the change we can increase the delay in smaller steps, which
+ // will be experienced as the video is played in slow motion. When lowering
+ // the delay the video will be played at a faster pace.
+ TimeDelta max_change = TimeDelta::Zero();
+ if (frame_timestamp < 0x0000ffff && prev_frame_timestamp_ > 0xffff0000) {
+ // wrap
+ max_change =
+ TimeDelta::Millis(kDelayMaxChangeMsPerS *
+ (frame_timestamp + (static_cast<int64_t>(1) << 32) -
+ prev_frame_timestamp_) /
+ 90000);
+ } else {
+ max_change =
+ TimeDelta::Millis(kDelayMaxChangeMsPerS *
+ (frame_timestamp - prev_frame_timestamp_) / 90000);
+ }
+
+ if (max_change <= TimeDelta::Zero()) {
+ // Any changes less than 1 ms are truncated and will be postponed.
+ // Negative change will be due to reordering and should be ignored.
+ return;
+ }
+ delay_diff = std::max(delay_diff, -max_change);
+ delay_diff = std::min(delay_diff, max_change);
+
+ current_delay_ = current_delay_ + delay_diff;
+ }
+ prev_frame_timestamp_ = frame_timestamp;
+}
+
+void VCMTiming::UpdateCurrentDelay(Timestamp render_time,
+ Timestamp actual_decode_time) {
+ MutexLock lock(&mutex_);
+ TimeDelta target_delay = TargetDelayInternal();
+ TimeDelta delayed =
+ (actual_decode_time - render_time) + RequiredDecodeTime() + render_delay_;
+
+ // Only consider `delayed` as negative by more than a few microseconds.
+ if (delayed.ms() < 0) {
+ return;
+ }
+ if (current_delay_ + delayed <= target_delay) {
+ current_delay_ += delayed;
+ } else {
+ current_delay_ = target_delay;
+ }
+}
+
+void VCMTiming::StopDecodeTimer(TimeDelta decode_time, Timestamp now) {
+ MutexLock lock(&mutex_);
+ codec_timer_->AddTiming(decode_time.ms(), now.ms());
+ RTC_DCHECK_GE(decode_time, TimeDelta::Zero());
+ ++num_decoded_frames_;
+}
+
+void VCMTiming::IncomingTimestamp(uint32_t rtp_timestamp, Timestamp now) {
+ MutexLock lock(&mutex_);
+ ts_extrapolator_->Update(now, rtp_timestamp);
+}
+
+Timestamp VCMTiming::RenderTime(uint32_t frame_timestamp, Timestamp now) const {
+ MutexLock lock(&mutex_);
+ return RenderTimeInternal(frame_timestamp, now);
+}
+
+void VCMTiming::SetLastDecodeScheduledTimestamp(
+ Timestamp last_decode_scheduled) {
+ MutexLock lock(&mutex_);
+ last_decode_scheduled_ = last_decode_scheduled;
+}
+
+Timestamp VCMTiming::RenderTimeInternal(uint32_t frame_timestamp,
+ Timestamp now) const {
+ if (UseLowLatencyRendering()) {
+ // Render as soon as possible or with low-latency renderer algorithm.
+ return Timestamp::Zero();
+ }
+ // Note that TimestampExtrapolator::ExtrapolateLocalTime is not a const
+ // method; it mutates the object's wraparound state.
+ Timestamp estimated_complete_time =
+ ts_extrapolator_->ExtrapolateLocalTime(frame_timestamp).value_or(now);
+
+ // Make sure the actual delay stays in the range of `min_playout_delay_`
+ // and `max_playout_delay_`.
+ TimeDelta actual_delay =
+ current_delay_.Clamped(min_playout_delay_, max_playout_delay_);
+ return estimated_complete_time + actual_delay;
+}
+
+TimeDelta VCMTiming::RequiredDecodeTime() const {
+ const int decode_time_ms = codec_timer_->RequiredDecodeTimeMs();
+ RTC_DCHECK_GE(decode_time_ms, 0);
+ return TimeDelta::Millis(decode_time_ms);
+}
+
+TimeDelta VCMTiming::MaxWaitingTime(Timestamp render_time,
+ Timestamp now,
+ bool too_many_frames_queued) const {
+ MutexLock lock(&mutex_);
+
+ if (render_time.IsZero() && zero_playout_delay_min_pacing_->us() > 0 &&
+ min_playout_delay_.IsZero() && max_playout_delay_ > TimeDelta::Zero()) {
+ // `render_time` == 0 indicates that the frame should be decoded and
+ // rendered as soon as possible. However, the decoder can be choked if too
+ // many frames are sent at once. Therefore, limit the interframe delay to
+ // |zero_playout_delay_min_pacing_| unless too many frames are queued in
+ // which case the frames are sent to the decoder at once.
+ if (too_many_frames_queued) {
+ return TimeDelta::Zero();
+ }
+ Timestamp earliest_next_decode_start_time =
+ last_decode_scheduled_ + zero_playout_delay_min_pacing_;
+ TimeDelta max_wait_time = now >= earliest_next_decode_start_time
+ ? TimeDelta::Zero()
+ : earliest_next_decode_start_time - now;
+ return max_wait_time;
+ }
+ return render_time - now - RequiredDecodeTime() - render_delay_;
+}
+
+TimeDelta VCMTiming::TargetVideoDelay() const {
+ MutexLock lock(&mutex_);
+ return TargetDelayInternal();
+}
+
+TimeDelta VCMTiming::TargetDelayInternal() const {
+ return std::max(min_playout_delay_,
+ jitter_delay_ + RequiredDecodeTime() + render_delay_);
+}
+
+VideoFrame::RenderParameters VCMTiming::RenderParameters() const {
+ MutexLock lock(&mutex_);
+ return {.use_low_latency_rendering = UseLowLatencyRendering(),
+ .max_composition_delay_in_frames = max_composition_delay_in_frames_};
+}
+
+bool VCMTiming::UseLowLatencyRendering() const {
+ // min_playout_delay_==0,
+ // max_playout_delay_<=kLowLatencyStreamMaxPlayoutDelayThreshold indicates
+ // that the low-latency path should be used, which means that frames should be
+ // decoded and rendered as soon as possible.
+ return min_playout_delay_.IsZero() &&
+ max_playout_delay_ <= kLowLatencyStreamMaxPlayoutDelayThreshold;
+}
+
+VCMTiming::VideoDelayTimings VCMTiming::GetTimings() const {
+ MutexLock lock(&mutex_);
+ return VideoDelayTimings{.max_decode_duration = RequiredDecodeTime(),
+ .current_delay = current_delay_,
+ .target_delay = TargetDelayInternal(),
+ .jitter_buffer_delay = jitter_delay_,
+ .min_playout_delay = min_playout_delay_,
+ .max_playout_delay = max_playout_delay_,
+ .render_delay = render_delay_,
+ .num_decoded_frames = num_decoded_frames_};
+}
+
+void VCMTiming::SetTimingFrameInfo(const TimingFrameInfo& info) {
+ MutexLock lock(&mutex_);
+ timing_frame_info_.emplace(info);
+}
+
+absl::optional<TimingFrameInfo> VCMTiming::GetTimingFrameInfo() {
+ MutexLock lock(&mutex_);
+ return timing_frame_info_;
+}
+
+void VCMTiming::SetMaxCompositionDelayInFrames(
+ absl::optional<int> max_composition_delay_in_frames) {
+ MutexLock lock(&mutex_);
+ max_composition_delay_in_frames_ = max_composition_delay_in_frames;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/timing/timing.h b/third_party/libwebrtc/modules/video_coding/timing/timing.h
new file mode 100644
index 0000000000..727527f009
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/timing/timing.h
@@ -0,0 +1,160 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_TIMING_TIMING_H_
+#define MODULES_VIDEO_CODING_TIMING_TIMING_H_
+
+#include <memory>
+
+#include "absl/types/optional.h"
+#include "api/field_trials_view.h"
+#include "api/units/time_delta.h"
+#include "api/video/video_frame.h"
+#include "api/video/video_timing.h"
+#include "modules/video_coding/timing/codec_timer.h"
+#include "modules/video_coding/timing/timestamp_extrapolator.h"
+#include "rtc_base/experiments/field_trial_parser.h"
+#include "rtc_base/synchronization/mutex.h"
+#include "rtc_base/thread_annotations.h"
+#include "system_wrappers/include/clock.h"
+
+namespace webrtc {
+
+class VCMTiming {
+ public:
+ static constexpr auto kDefaultRenderDelay = TimeDelta::Millis(10);
+ static constexpr auto kDelayMaxChangeMsPerS = 100;
+
+ VCMTiming(Clock* clock, const FieldTrialsView& field_trials);
+ virtual ~VCMTiming() = default;
+
+ // Resets the timing to the initial state.
+ void Reset();
+
+ // Set the amount of time needed to render an image. Defaults to 10 ms.
+ void set_render_delay(TimeDelta render_delay);
+
+ // Set the minimum time the video must be delayed on the receiver to
+ // get the desired jitter buffer level.
+ void SetJitterDelay(TimeDelta required_delay);
+
+ // Set/get the minimum playout delay from capture to render.
+ TimeDelta min_playout_delay() const;
+ void set_min_playout_delay(TimeDelta min_playout_delay);
+
+ // Set/get the maximum playout delay from capture to render in ms.
+ void set_max_playout_delay(TimeDelta max_playout_delay);
+
+ // Increases or decreases the current delay to get closer to the target delay.
+ // Calculates how long it has been since the previous call to this function,
+ // and increases/decreases the delay in proportion to the time difference.
+ void UpdateCurrentDelay(uint32_t frame_timestamp);
+
+ // Increases or decreases the current delay to get closer to the target delay.
+ // Given the actual decode time in ms and the render time in ms for a frame,
+ // this function calculates how late the frame is and increases the delay
+ // accordingly.
+ void UpdateCurrentDelay(Timestamp render_time, Timestamp actual_decode_time);
+
+ // Stops the decoder timer, should be called when the decoder returns a frame
+ // or when the decoded frame callback is called.
+ void StopDecodeTimer(TimeDelta decode_time, Timestamp now);
+
+ // Used to report that a frame is passed to decoding. Updates the timestamp
+ // filter which is used to map between timestamps and receiver system time.
+ virtual void IncomingTimestamp(uint32_t rtp_timestamp,
+ Timestamp last_packet_time);
+
+ // Returns the receiver system time when the frame with timestamp
+ // `frame_timestamp` should be rendered, assuming that the system time
+ // currently is `now`.
+ virtual Timestamp RenderTime(uint32_t frame_timestamp, Timestamp now) const;
+
+ // Returns the maximum time in ms that we can wait for a frame to become
+ // complete before we must pass it to the decoder. render_time==0 indicates
+ // that the frames should be processed as quickly as possible, with possibly
+ // only a small delay added to make sure that the decoder is not overloaded.
+ // In this case, the parameter too_many_frames_queued is used to signal that
+ // the decode queue is full and that the frame should be decoded as soon as
+ // possible.
+ virtual TimeDelta MaxWaitingTime(Timestamp render_time,
+ Timestamp now,
+ bool too_many_frames_queued) const;
+
+ // Returns the current target delay which is required delay + decode time +
+ // render delay.
+ TimeDelta TargetVideoDelay() const;
+
+ // Return current timing information. Returns true if the first frame has been
+ // decoded, false otherwise.
+ struct VideoDelayTimings {
+ TimeDelta max_decode_duration;
+ TimeDelta current_delay;
+ TimeDelta target_delay;
+ TimeDelta jitter_buffer_delay;
+ TimeDelta min_playout_delay;
+ TimeDelta max_playout_delay;
+ TimeDelta render_delay;
+ size_t num_decoded_frames;
+ };
+ VideoDelayTimings GetTimings() const;
+
+ void SetTimingFrameInfo(const TimingFrameInfo& info);
+ absl::optional<TimingFrameInfo> GetTimingFrameInfo();
+
+ void SetMaxCompositionDelayInFrames(
+ absl::optional<int> max_composition_delay_in_frames);
+
+ VideoFrame::RenderParameters RenderParameters() const;
+
+ // Updates the last time a frame was scheduled for decoding.
+ void SetLastDecodeScheduledTimestamp(Timestamp last_decode_scheduled);
+
+ protected:
+ TimeDelta RequiredDecodeTime() const RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+ Timestamp RenderTimeInternal(uint32_t frame_timestamp, Timestamp now) const
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+ TimeDelta TargetDelayInternal() const RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+ bool UseLowLatencyRendering() const RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+
+ private:
+ mutable Mutex mutex_;
+ Clock* const clock_;
+ const std::unique_ptr<TimestampExtrapolator> ts_extrapolator_
+ RTC_PT_GUARDED_BY(mutex_);
+ std::unique_ptr<CodecTimer> codec_timer_ RTC_GUARDED_BY(mutex_)
+ RTC_PT_GUARDED_BY(mutex_);
+ TimeDelta render_delay_ RTC_GUARDED_BY(mutex_);
+ // Best-effort playout delay range for frames from capture to render.
+ // The receiver tries to keep the delay between `min_playout_delay_ms_`
+ // and `max_playout_delay_ms_` taking the network jitter into account.
+ // A special case is where min_playout_delay_ms_ = max_playout_delay_ms_ = 0,
+ // in which case the receiver tries to play the frames as they arrive.
+ TimeDelta min_playout_delay_ RTC_GUARDED_BY(mutex_);
+ TimeDelta max_playout_delay_ RTC_GUARDED_BY(mutex_);
+ TimeDelta jitter_delay_ RTC_GUARDED_BY(mutex_);
+ TimeDelta current_delay_ RTC_GUARDED_BY(mutex_);
+ uint32_t prev_frame_timestamp_ RTC_GUARDED_BY(mutex_);
+ absl::optional<TimingFrameInfo> timing_frame_info_ RTC_GUARDED_BY(mutex_);
+ size_t num_decoded_frames_ RTC_GUARDED_BY(mutex_);
+ absl::optional<int> max_composition_delay_in_frames_ RTC_GUARDED_BY(mutex_);
+ // Set by the field trial WebRTC-ZeroPlayoutDelay. The parameter min_pacing
+ // determines the minimum delay between frames scheduled for decoding that is
+ // used when min playout delay=0 and max playout delay>=0.
+ FieldTrialParameter<TimeDelta> zero_playout_delay_min_pacing_
+ RTC_GUARDED_BY(mutex_);
+ // Timestamp at which the last frame was scheduled to be sent to the decoder.
+ // Used only when the RTP header extension playout delay is set to min=0 ms
+ // which is indicated by a render time set to 0.
+ Timestamp last_decode_scheduled_ RTC_GUARDED_BY(mutex_);
+};
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_TIMING_TIMING_H_
diff --git a/third_party/libwebrtc/modules/video_coding/timing/timing_module_gn/moz.build b/third_party/libwebrtc/modules/video_coding/timing/timing_module_gn/moz.build
new file mode 100644
index 0000000000..8ab43fb748
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/timing/timing_module_gn/moz.build
@@ -0,0 +1,232 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+DEFINES["WEBRTC_STRICT_FIELD_TRIALS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "!/third_party/libwebrtc/gen",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/video_coding/timing/timing.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "crypt32",
+ "iphlpapi",
+ "secur32",
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "mips32":
+
+ DEFINES["MIPS32_LE"] = True
+ DEFINES["MIPS_FPU_LE"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "mips64":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("timing_module_gn")
diff --git a/third_party/libwebrtc/modules/video_coding/timing/timing_unittest.cc b/third_party/libwebrtc/modules/video_coding/timing/timing_unittest.cc
new file mode 100644
index 0000000000..8633c0de39
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/timing/timing_unittest.cc
@@ -0,0 +1,339 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/timing/timing.h"
+
+#include "api/units/frequency.h"
+#include "api/units/time_delta.h"
+#include "system_wrappers/include/clock.h"
+#include "test/gtest.h"
+#include "test/scoped_key_value_config.h"
+
+namespace webrtc {
+namespace {
+
+constexpr Frequency k25Fps = Frequency::Hertz(25);
+constexpr Frequency k90kHz = Frequency::KiloHertz(90);
+
+} // namespace
+
+TEST(ReceiverTimingTest, JitterDelay) {
+ test::ScopedKeyValueConfig field_trials;
+ SimulatedClock clock(0);
+ VCMTiming timing(&clock, field_trials);
+ timing.Reset();
+
+ uint32_t timestamp = 0;
+ timing.UpdateCurrentDelay(timestamp);
+
+ timing.Reset();
+
+ timing.IncomingTimestamp(timestamp, clock.CurrentTime());
+ TimeDelta jitter_delay = TimeDelta::Millis(20);
+ timing.SetJitterDelay(jitter_delay);
+ timing.UpdateCurrentDelay(timestamp);
+ timing.set_render_delay(TimeDelta::Zero());
+ auto wait_time = timing.MaxWaitingTime(
+ timing.RenderTime(timestamp, clock.CurrentTime()), clock.CurrentTime(),
+ /*too_many_frames_queued=*/false);
+ // First update initializes the render time. Since we have no decode delay
+ // we get wait_time = renderTime - now - renderDelay = jitter.
+ EXPECT_EQ(jitter_delay, wait_time);
+
+ jitter_delay += TimeDelta::Millis(VCMTiming::kDelayMaxChangeMsPerS + 10);
+ timestamp += 90000;
+ clock.AdvanceTimeMilliseconds(1000);
+ timing.SetJitterDelay(jitter_delay);
+ timing.UpdateCurrentDelay(timestamp);
+ wait_time = timing.MaxWaitingTime(
+ timing.RenderTime(timestamp, clock.CurrentTime()), clock.CurrentTime(),
+ /*too_many_frames_queued=*/false);
+ // Since we gradually increase the delay we only get 100 ms every second.
+ EXPECT_EQ(jitter_delay - TimeDelta::Millis(10), wait_time);
+
+ timestamp += 90000;
+ clock.AdvanceTimeMilliseconds(1000);
+ timing.UpdateCurrentDelay(timestamp);
+ wait_time = timing.MaxWaitingTime(
+ timing.RenderTime(timestamp, clock.CurrentTime()), clock.CurrentTime(),
+ /*too_many_frames_queued=*/false);
+ EXPECT_EQ(jitter_delay, wait_time);
+
+ // Insert frames without jitter, verify that this gives the exact wait time.
+ const int kNumFrames = 300;
+ for (int i = 0; i < kNumFrames; i++) {
+ clock.AdvanceTime(1 / k25Fps);
+ timestamp += k90kHz / k25Fps;
+ timing.IncomingTimestamp(timestamp, clock.CurrentTime());
+ }
+ timing.UpdateCurrentDelay(timestamp);
+ wait_time = timing.MaxWaitingTime(
+ timing.RenderTime(timestamp, clock.CurrentTime()), clock.CurrentTime(),
+ /*too_many_frames_queued=*/false);
+ EXPECT_EQ(jitter_delay, wait_time);
+
+ // Add decode time estimates for 1 second.
+ const TimeDelta kDecodeTime = TimeDelta::Millis(10);
+ for (int i = 0; i < k25Fps.hertz(); i++) {
+ clock.AdvanceTime(kDecodeTime);
+ timing.StopDecodeTimer(kDecodeTime, clock.CurrentTime());
+ timestamp += k90kHz / k25Fps;
+ clock.AdvanceTime(1 / k25Fps - kDecodeTime);
+ timing.IncomingTimestamp(timestamp, clock.CurrentTime());
+ }
+ timing.UpdateCurrentDelay(timestamp);
+ wait_time = timing.MaxWaitingTime(
+ timing.RenderTime(timestamp, clock.CurrentTime()), clock.CurrentTime(),
+ /*too_many_frames_queued=*/false);
+ EXPECT_EQ(jitter_delay, wait_time);
+
+ const TimeDelta kMinTotalDelay = TimeDelta::Millis(200);
+ timing.set_min_playout_delay(kMinTotalDelay);
+ clock.AdvanceTimeMilliseconds(5000);
+ timestamp += 5 * 90000;
+ timing.UpdateCurrentDelay(timestamp);
+ const TimeDelta kRenderDelay = TimeDelta::Millis(10);
+ timing.set_render_delay(kRenderDelay);
+ wait_time = timing.MaxWaitingTime(
+ timing.RenderTime(timestamp, clock.CurrentTime()), clock.CurrentTime(),
+ /*too_many_frames_queued=*/false);
+ // We should at least have kMinTotalDelayMs - decodeTime (10) - renderTime
+ // (10) to wait.
+ EXPECT_EQ(kMinTotalDelay - kDecodeTime - kRenderDelay, wait_time);
+ // The total video delay should be equal to the min total delay.
+ EXPECT_EQ(kMinTotalDelay, timing.TargetVideoDelay());
+
+ // Reset playout delay.
+ timing.set_min_playout_delay(TimeDelta::Zero());
+ clock.AdvanceTimeMilliseconds(5000);
+ timestamp += 5 * 90000;
+ timing.UpdateCurrentDelay(timestamp);
+}
+
+TEST(ReceiverTimingTest, TimestampWrapAround) {
+ constexpr auto kStartTime = Timestamp::Millis(1337);
+ test::ScopedKeyValueConfig field_trials;
+ SimulatedClock clock(kStartTime);
+ VCMTiming timing(&clock, field_trials);
+
+ // Provoke a wrap-around. The fifth frame will have wrapped at 25 fps.
+ constexpr uint32_t kRtpTicksPerFrame = k90kHz / k25Fps;
+ uint32_t timestamp = 0xFFFFFFFFu - 3 * kRtpTicksPerFrame;
+ for (int i = 0; i < 5; ++i) {
+ timing.IncomingTimestamp(timestamp, clock.CurrentTime());
+ clock.AdvanceTime(1 / k25Fps);
+ timestamp += kRtpTicksPerFrame;
+ EXPECT_EQ(kStartTime + 3 / k25Fps,
+ timing.RenderTime(0xFFFFFFFFu, clock.CurrentTime()));
+ // One ms later in 90 kHz.
+ EXPECT_EQ(kStartTime + 3 / k25Fps + TimeDelta::Millis(1),
+ timing.RenderTime(89u, clock.CurrentTime()));
+ }
+}
+
+TEST(ReceiverTimingTest, UseLowLatencyRenderer) {
+ test::ScopedKeyValueConfig field_trials;
+ SimulatedClock clock(0);
+ VCMTiming timing(&clock, field_trials);
+ timing.Reset();
+ // Default is false.
+ EXPECT_FALSE(timing.RenderParameters().use_low_latency_rendering);
+ // False if min playout delay > 0.
+ timing.set_min_playout_delay(TimeDelta::Millis(10));
+ timing.set_max_playout_delay(TimeDelta::Millis(20));
+ EXPECT_FALSE(timing.RenderParameters().use_low_latency_rendering);
+ // True if min==0, max > 0.
+ timing.set_min_playout_delay(TimeDelta::Zero());
+ EXPECT_TRUE(timing.RenderParameters().use_low_latency_rendering);
+ // True if min==max==0.
+ timing.set_max_playout_delay(TimeDelta::Zero());
+ EXPECT_TRUE(timing.RenderParameters().use_low_latency_rendering);
+ // True also for max playout delay==500 ms.
+ timing.set_max_playout_delay(TimeDelta::Millis(500));
+ EXPECT_TRUE(timing.RenderParameters().use_low_latency_rendering);
+ // False if max playout delay > 500 ms.
+ timing.set_max_playout_delay(TimeDelta::Millis(501));
+ EXPECT_FALSE(timing.RenderParameters().use_low_latency_rendering);
+}
+
+TEST(ReceiverTimingTest, MaxWaitingTimeIsZeroForZeroRenderTime) {
+ // This is the default path when the RTP playout delay header extension is set
+ // to min==0 and max==0.
+ constexpr int64_t kStartTimeUs = 3.15e13; // About one year in us.
+ constexpr TimeDelta kTimeDelta = 1 / Frequency::Hertz(60);
+ constexpr Timestamp kZeroRenderTime = Timestamp::Zero();
+ SimulatedClock clock(kStartTimeUs);
+ test::ScopedKeyValueConfig field_trials;
+ VCMTiming timing(&clock, field_trials);
+ timing.Reset();
+ timing.set_max_playout_delay(TimeDelta::Zero());
+ for (int i = 0; i < 10; ++i) {
+ clock.AdvanceTime(kTimeDelta);
+ Timestamp now = clock.CurrentTime();
+ EXPECT_LT(timing.MaxWaitingTime(kZeroRenderTime, now,
+ /*too_many_frames_queued=*/false),
+ TimeDelta::Zero());
+ }
+ // Another frame submitted at the same time also returns a negative max
+ // waiting time.
+ Timestamp now = clock.CurrentTime();
+ EXPECT_LT(timing.MaxWaitingTime(kZeroRenderTime, now,
+ /*too_many_frames_queued=*/false),
+ TimeDelta::Zero());
+ // MaxWaitingTime should be less than zero even if there's a burst of frames.
+ EXPECT_LT(timing.MaxWaitingTime(kZeroRenderTime, now,
+ /*too_many_frames_queued=*/false),
+ TimeDelta::Zero());
+ EXPECT_LT(timing.MaxWaitingTime(kZeroRenderTime, now,
+ /*too_many_frames_queued=*/false),
+ TimeDelta::Zero());
+ EXPECT_LT(timing.MaxWaitingTime(kZeroRenderTime, now,
+ /*too_many_frames_queued=*/false),
+ TimeDelta::Zero());
+}
+
+TEST(ReceiverTimingTest, MaxWaitingTimeZeroDelayPacingExperiment) {
+ // The minimum pacing is enabled by a field trial and active if the RTP
+ // playout delay header extension is set to min==0.
+ constexpr TimeDelta kMinPacing = TimeDelta::Millis(3);
+ test::ScopedKeyValueConfig field_trials(
+ "WebRTC-ZeroPlayoutDelay/min_pacing:3ms/");
+ constexpr int64_t kStartTimeUs = 3.15e13; // About one year in us.
+ constexpr TimeDelta kTimeDelta = 1 / Frequency::Hertz(60);
+ constexpr auto kZeroRenderTime = Timestamp::Zero();
+ SimulatedClock clock(kStartTimeUs);
+ VCMTiming timing(&clock, field_trials);
+ timing.Reset();
+ // MaxWaitingTime() returns zero for evenly spaced video frames.
+ for (int i = 0; i < 10; ++i) {
+ clock.AdvanceTime(kTimeDelta);
+ Timestamp now = clock.CurrentTime();
+ EXPECT_EQ(timing.MaxWaitingTime(kZeroRenderTime, now,
+ /*too_many_frames_queued=*/false),
+ TimeDelta::Zero());
+ timing.SetLastDecodeScheduledTimestamp(now);
+ }
+ // Another frame submitted at the same time is paced according to the field
+ // trial setting.
+ auto now = clock.CurrentTime();
+ EXPECT_EQ(timing.MaxWaitingTime(kZeroRenderTime, now,
+ /*too_many_frames_queued=*/false),
+ kMinPacing);
+ // If there's a burst of frames, the wait time is calculated based on next
+ // decode time.
+ EXPECT_EQ(timing.MaxWaitingTime(kZeroRenderTime, now,
+ /*too_many_frames_queued=*/false),
+ kMinPacing);
+ EXPECT_EQ(timing.MaxWaitingTime(kZeroRenderTime, now,
+ /*too_many_frames_queued=*/false),
+ kMinPacing);
+ // Allow a few ms to pass, this should be subtracted from the MaxWaitingTime.
+ constexpr TimeDelta kTwoMs = TimeDelta::Millis(2);
+ clock.AdvanceTime(kTwoMs);
+ now = clock.CurrentTime();
+ EXPECT_EQ(timing.MaxWaitingTime(kZeroRenderTime, now,
+ /*too_many_frames_queued=*/false),
+ kMinPacing - kTwoMs);
+ // A frame is decoded at the current time, the wait time should be restored to
+ // pacing delay.
+ timing.SetLastDecodeScheduledTimestamp(now);
+ EXPECT_EQ(timing.MaxWaitingTime(kZeroRenderTime, now,
+ /*too_many_frames_queued=*/false),
+ kMinPacing);
+}
+
+TEST(ReceiverTimingTest, DefaultMaxWaitingTimeUnaffectedByPacingExperiment) {
+ // The minimum pacing is enabled by a field trial but should not have any
+ // effect if render_time_ms is greater than 0;
+ test::ScopedKeyValueConfig field_trials(
+ "WebRTC-ZeroPlayoutDelay/min_pacing:3ms/");
+ constexpr int64_t kStartTimeUs = 3.15e13; // About one year in us.
+ const TimeDelta kTimeDelta = TimeDelta::Millis(1000.0 / 60.0);
+ SimulatedClock clock(kStartTimeUs);
+ VCMTiming timing(&clock, field_trials);
+ timing.Reset();
+ clock.AdvanceTime(kTimeDelta);
+ auto now = clock.CurrentTime();
+ Timestamp render_time = now + TimeDelta::Millis(30);
+ // Estimate the internal processing delay from the first frame.
+ TimeDelta estimated_processing_delay =
+ (render_time - now) -
+ timing.MaxWaitingTime(render_time, now,
+ /*too_many_frames_queued=*/false);
+ EXPECT_GT(estimated_processing_delay, TimeDelta::Zero());
+
+ // Any other frame submitted at the same time should be scheduled according to
+ // its render time.
+ for (int i = 0; i < 5; ++i) {
+ render_time += kTimeDelta;
+ EXPECT_EQ(timing.MaxWaitingTime(render_time, now,
+ /*too_many_frames_queued=*/false),
+ render_time - now - estimated_processing_delay);
+ }
+}
+
+TEST(ReceiverTimingTest, MaxWaitingTimeReturnsZeroIfTooManyFramesQueuedIsTrue) {
+ // The minimum pacing is enabled by a field trial and active if the RTP
+ // playout delay header extension is set to min==0.
+ constexpr TimeDelta kMinPacing = TimeDelta::Millis(3);
+ test::ScopedKeyValueConfig field_trials(
+ "WebRTC-ZeroPlayoutDelay/min_pacing:3ms/");
+ constexpr int64_t kStartTimeUs = 3.15e13; // About one year in us.
+ const TimeDelta kTimeDelta = TimeDelta::Millis(1000.0 / 60.0);
+ constexpr auto kZeroRenderTime = Timestamp::Zero();
+ SimulatedClock clock(kStartTimeUs);
+ VCMTiming timing(&clock, field_trials);
+ timing.Reset();
+ // MaxWaitingTime() returns zero for evenly spaced video frames.
+ for (int i = 0; i < 10; ++i) {
+ clock.AdvanceTime(kTimeDelta);
+ auto now = clock.CurrentTime();
+ EXPECT_EQ(timing.MaxWaitingTime(kZeroRenderTime, now,
+ /*too_many_frames_queued=*/false),
+ TimeDelta::Zero());
+ timing.SetLastDecodeScheduledTimestamp(now);
+ }
+ // Another frame submitted at the same time is paced according to the field
+ // trial setting.
+ auto now_ms = clock.CurrentTime();
+ EXPECT_EQ(timing.MaxWaitingTime(kZeroRenderTime, now_ms,
+ /*too_many_frames_queued=*/false),
+ kMinPacing);
+ // MaxWaitingTime returns 0 even if there's a burst of frames if
+ // too_many_frames_queued is set to true.
+ EXPECT_EQ(timing.MaxWaitingTime(kZeroRenderTime, now_ms,
+ /*too_many_frames_queued=*/true),
+ TimeDelta::Zero());
+ EXPECT_EQ(timing.MaxWaitingTime(kZeroRenderTime, now_ms,
+ /*too_many_frames_queued=*/true),
+ TimeDelta::Zero());
+}
+
+TEST(ReceiverTimingTest, UpdateCurrentDelayCapsWhenOffByMicroseconds) {
+ test::ScopedKeyValueConfig field_trials;
+ SimulatedClock clock(0);
+ VCMTiming timing(&clock, field_trials);
+ timing.Reset();
+
+ // Set larger initial current delay.
+ timing.set_min_playout_delay(TimeDelta::Millis(200));
+ timing.UpdateCurrentDelay(Timestamp::Millis(900), Timestamp::Millis(1000));
+
+ // Add a few microseconds to ensure that the delta of decode time is 0 after
+ // rounding, and should reset to the target delay.
+ timing.set_min_playout_delay(TimeDelta::Millis(50));
+ Timestamp decode_time = Timestamp::Millis(1337);
+ Timestamp render_time =
+ decode_time + TimeDelta::Millis(10) + TimeDelta::Micros(37);
+ timing.UpdateCurrentDelay(render_time, decode_time);
+ EXPECT_EQ(timing.GetTimings().current_delay, timing.TargetVideoDelay());
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/utility/bandwidth_quality_scaler.cc b/third_party/libwebrtc/modules/video_coding/utility/bandwidth_quality_scaler.cc
new file mode 100644
index 0000000000..13502a142b
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/utility/bandwidth_quality_scaler.cc
@@ -0,0 +1,148 @@
+/*
+ * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/utility/bandwidth_quality_scaler.h"
+
+#include <algorithm>
+#include <memory>
+#include <utility>
+#include <vector>
+
+#include "api/video/video_adaptation_reason.h"
+#include "api/video_codecs/video_encoder.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/experiments/bandwidth_quality_scaler_settings.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/numerics/exp_filter.h"
+#include "rtc_base/time_utils.h"
+#include "rtc_base/weak_ptr.h"
+
+namespace webrtc {
+
+namespace {
+
+constexpr int kDefaultMaxWindowSizeMs = 5000;
+constexpr float kHigherMaxBitrateTolerationFactor = 0.95;
+constexpr float kLowerMinBitrateTolerationFactor = 0.8;
+constexpr int kDefaultBitrateStateUpdateIntervalSeconds = 5;
+} // namespace
+
+BandwidthQualityScaler::BandwidthQualityScaler(
+ BandwidthQualityScalerUsageHandlerInterface* handler)
+ : kBitrateStateUpdateInterval(TimeDelta::Seconds(
+ BandwidthQualityScalerSettings::ParseFromFieldTrials()
+ .BitrateStateUpdateInterval()
+ .value_or(kDefaultBitrateStateUpdateIntervalSeconds))),
+ handler_(handler),
+ encoded_bitrate_(kDefaultMaxWindowSizeMs, RateStatistics::kBpsScale),
+ weak_ptr_factory_(this) {
+ RTC_DCHECK_RUN_ON(&task_checker_);
+ RTC_DCHECK(handler_ != nullptr);
+
+ StartCheckForBitrate();
+}
+
+BandwidthQualityScaler::~BandwidthQualityScaler() {
+ RTC_DCHECK_RUN_ON(&task_checker_);
+}
+
+void BandwidthQualityScaler::StartCheckForBitrate() {
+ RTC_DCHECK_RUN_ON(&task_checker_);
+ TaskQueueBase::Current()->PostDelayedTask(
+ [this_weak_ptr = weak_ptr_factory_.GetWeakPtr(), this] {
+ if (!this_weak_ptr) {
+ // The caller BandwidthQualityScaler has been deleted.
+ return;
+ }
+ RTC_DCHECK_RUN_ON(&task_checker_);
+ switch (CheckBitrate()) {
+ case BandwidthQualityScaler::CheckBitrateResult::kHighBitRate: {
+ handler_->OnReportUsageBandwidthHigh();
+ last_frame_size_pixels_.reset();
+ break;
+ }
+ case BandwidthQualityScaler::CheckBitrateResult::kLowBitRate: {
+ handler_->OnReportUsageBandwidthLow();
+ last_frame_size_pixels_.reset();
+ break;
+ }
+ case BandwidthQualityScaler::CheckBitrateResult::kNormalBitrate: {
+ break;
+ }
+ case BandwidthQualityScaler::CheckBitrateResult::
+ kInsufficientSamples: {
+ break;
+ }
+ }
+ StartCheckForBitrate();
+ },
+ kBitrateStateUpdateInterval);
+}
+
+void BandwidthQualityScaler::ReportEncodeInfo(int frame_size_bytes,
+ int64_t time_sent_in_ms,
+ uint32_t encoded_width,
+ uint32_t encoded_height) {
+ RTC_DCHECK_RUN_ON(&task_checker_);
+ last_time_sent_in_ms_ = time_sent_in_ms;
+ last_frame_size_pixels_ = encoded_width * encoded_height;
+ encoded_bitrate_.Update(frame_size_bytes, time_sent_in_ms);
+}
+
+void BandwidthQualityScaler::SetResolutionBitrateLimits(
+ const std::vector<VideoEncoder::ResolutionBitrateLimits>&
+ resolution_bitrate_limits) {
+ if (resolution_bitrate_limits.empty()) {
+ resolution_bitrate_limits_ = EncoderInfoSettings::
+ GetDefaultSinglecastBitrateLimitsWhenQpIsUntrusted();
+ } else {
+ resolution_bitrate_limits_ = resolution_bitrate_limits;
+ }
+}
+
+BandwidthQualityScaler::CheckBitrateResult
+BandwidthQualityScaler::CheckBitrate() {
+ RTC_DCHECK_RUN_ON(&task_checker_);
+ if (!last_frame_size_pixels_.has_value() ||
+ !last_time_sent_in_ms_.has_value()) {
+ return BandwidthQualityScaler::CheckBitrateResult::kInsufficientSamples;
+ }
+
+ absl::optional<int64_t> current_bitrate_bps =
+ encoded_bitrate_.Rate(last_time_sent_in_ms_.value());
+ if (!current_bitrate_bps.has_value()) {
+ // We can't get a valid bitrate due to not enough data points.
+ return BandwidthQualityScaler::CheckBitrateResult::kInsufficientSamples;
+ }
+ absl::optional<VideoEncoder::ResolutionBitrateLimits> suitable_bitrate_limit =
+ EncoderInfoSettings::
+ GetSinglecastBitrateLimitForResolutionWhenQpIsUntrusted(
+ last_frame_size_pixels_, resolution_bitrate_limits_);
+
+ if (!suitable_bitrate_limit.has_value()) {
+ return BandwidthQualityScaler::CheckBitrateResult::kInsufficientSamples;
+ }
+
+ // Multiply by toleration factor to solve the frequent adaptation due to
+ // critical value.
+ if (current_bitrate_bps > suitable_bitrate_limit->max_bitrate_bps *
+ kHigherMaxBitrateTolerationFactor) {
+ return BandwidthQualityScaler::CheckBitrateResult::kLowBitRate;
+ } else if (current_bitrate_bps <
+ suitable_bitrate_limit->min_start_bitrate_bps *
+ kLowerMinBitrateTolerationFactor) {
+ return BandwidthQualityScaler::CheckBitrateResult::kHighBitRate;
+ }
+ return BandwidthQualityScaler::CheckBitrateResult::kNormalBitrate;
+}
+
+BandwidthQualityScalerUsageHandlerInterface::
+ ~BandwidthQualityScalerUsageHandlerInterface() {}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/utility/bandwidth_quality_scaler.h b/third_party/libwebrtc/modules/video_coding/utility/bandwidth_quality_scaler.h
new file mode 100644
index 0000000000..7cd1de0dd2
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/utility/bandwidth_quality_scaler.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_UTILITY_BANDWIDTH_QUALITY_SCALER_H_
+#define MODULES_VIDEO_CODING_UTILITY_BANDWIDTH_QUALITY_SCALER_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <memory>
+#include <vector>
+
+#include "absl/types/optional.h"
+#include "api/scoped_refptr.h"
+#include "api/sequence_checker.h"
+#include "api/video_codecs/video_encoder.h"
+#include "rtc_base/experiments/encoder_info_settings.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/numerics/exp_filter.h"
+#include "rtc_base/rate_statistics.h"
+#include "rtc_base/ref_count.h"
+#include "rtc_base/system/no_unique_address.h"
+#include "rtc_base/weak_ptr.h"
+
+namespace webrtc {
+
+class BandwidthQualityScalerUsageHandlerInterface {
+ public:
+ virtual ~BandwidthQualityScalerUsageHandlerInterface();
+
+ virtual void OnReportUsageBandwidthHigh() = 0;
+ virtual void OnReportUsageBandwidthLow() = 0;
+};
+
+// BandwidthQualityScaler runs asynchronously and monitors bandwidth values of
+// encoded frames. It holds a reference to a
+// BandwidthQualityScalerUsageHandlerInterface implementation to signal an
+// overuse or underuse of bandwidth (which indicate a desire to scale the video
+// stream down or up).
+class BandwidthQualityScaler {
+ public:
+ explicit BandwidthQualityScaler(
+ BandwidthQualityScalerUsageHandlerInterface* handler);
+ virtual ~BandwidthQualityScaler();
+
+ void ReportEncodeInfo(int frame_size_bytes,
+ int64_t time_sent_in_ms,
+ uint32_t encoded_width,
+ uint32_t encoded_height);
+
+ // We prioritise to using the |resolution_bitrate_limits| provided by the
+ // current decoder. If not provided, we will use the default data by
+ // GetDefaultResolutionBitrateLimits().
+ void SetResolutionBitrateLimits(
+ const std::vector<VideoEncoder::ResolutionBitrateLimits>&
+ resolution_bitrate_limits);
+
+ const TimeDelta kBitrateStateUpdateInterval;
+
+ private:
+ enum class CheckBitrateResult {
+ kInsufficientSamples,
+ kNormalBitrate,
+ kHighBitRate,
+ kLowBitRate,
+ };
+
+ // We will periodically check encode bitrate, this function will make
+ // resolution up or down decisions and report the decision to the adapter.
+ void StartCheckForBitrate();
+ CheckBitrateResult CheckBitrate();
+
+ RTC_NO_UNIQUE_ADDRESS SequenceChecker task_checker_;
+ BandwidthQualityScalerUsageHandlerInterface* const handler_
+ RTC_GUARDED_BY(&task_checker_);
+
+ absl::optional<int64_t> last_time_sent_in_ms_ RTC_GUARDED_BY(&task_checker_);
+ RateStatistics encoded_bitrate_ RTC_GUARDED_BY(&task_checker_);
+ absl::optional<int> last_frame_size_pixels_ RTC_GUARDED_BY(&task_checker_);
+ rtc::WeakPtrFactory<BandwidthQualityScaler> weak_ptr_factory_;
+
+ std::vector<VideoEncoder::ResolutionBitrateLimits> resolution_bitrate_limits_;
+};
+
+} // namespace webrtc
+#endif // MODULES_VIDEO_CODING_UTILITY_BANDWIDTH_QUALITY_SCALER_H_
diff --git a/third_party/libwebrtc/modules/video_coding/utility/bandwidth_quality_scaler_unittest.cc b/third_party/libwebrtc/modules/video_coding/utility/bandwidth_quality_scaler_unittest.cc
new file mode 100644
index 0000000000..d28052e28d
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/utility/bandwidth_quality_scaler_unittest.cc
@@ -0,0 +1,278 @@
+/*
+ * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/utility/bandwidth_quality_scaler.h"
+
+#include <memory>
+#include <string>
+
+#include "api/units/time_delta.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/event.h"
+#include "rtc_base/experiments/encoder_info_settings.h"
+#include "rtc_base/task_queue_for_test.h"
+#include "rtc_base/time_utils.h"
+#include "test/field_trial.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+namespace {
+constexpr int kFramerateFps = 30;
+constexpr TimeDelta kDefaultBitrateStateUpdateInterval = TimeDelta::Seconds(5);
+constexpr TimeDelta kDefaultEncodeTime = TimeDelta::Seconds(1) / kFramerateFps;
+
+} // namespace
+
+class FakeBandwidthQualityScalerHandler
+ : public BandwidthQualityScalerUsageHandlerInterface {
+ public:
+ ~FakeBandwidthQualityScalerHandler() override = default;
+ void OnReportUsageBandwidthHigh() override {
+ adapt_down_event_count_++;
+ event_.Set();
+ }
+
+ void OnReportUsageBandwidthLow() override {
+ adapt_up_event_count_++;
+ event_.Set();
+ }
+
+ rtc::Event event_;
+ int adapt_up_event_count_ = 0;
+ int adapt_down_event_count_ = 0;
+};
+
+class BandwidthQualityScalerUnderTest : public BandwidthQualityScaler {
+ public:
+ explicit BandwidthQualityScalerUnderTest(
+ BandwidthQualityScalerUsageHandlerInterface* handler)
+ : BandwidthQualityScaler(handler) {}
+
+ int GetBitrateStateUpdateIntervalMs() {
+ return this->kBitrateStateUpdateInterval.ms() + 200;
+ }
+};
+
+class BandwidthQualityScalerTest
+ : public ::testing::Test,
+ public ::testing::WithParamInterface<std::string> {
+ protected:
+ enum ScaleDirection {
+ kKeepScaleNormalBandwidth,
+ kKeepScaleAboveMaxBandwidth,
+ kKeepScaleUnderMinBandwidth,
+ };
+
+ enum FrameType {
+ kKeyFrame,
+ kNormalFrame,
+ kNormalFrame_Overuse,
+ kNormalFrame_Underuse,
+ };
+ struct FrameConfig {
+ FrameConfig(int frame_num,
+ FrameType frame_type,
+ int actual_width,
+ int actual_height)
+ : frame_num(frame_num),
+ frame_type(frame_type),
+ actual_width(actual_width),
+ actual_height(actual_height) {}
+
+ int frame_num;
+ FrameType frame_type;
+ int actual_width;
+ int actual_height;
+ };
+
+ BandwidthQualityScalerTest()
+ : scoped_field_trial_(GetParam()),
+ task_queue_("BandwidthQualityScalerTestQueue"),
+ handler_(std::make_unique<FakeBandwidthQualityScalerHandler>()) {
+ task_queue_.SendTask(
+ [this] {
+ bandwidth_quality_scaler_ =
+ std::unique_ptr<BandwidthQualityScalerUnderTest>(
+ new BandwidthQualityScalerUnderTest(handler_.get()));
+ bandwidth_quality_scaler_->SetResolutionBitrateLimits(
+ EncoderInfoSettings::
+ GetDefaultSinglecastBitrateLimitsWhenQpIsUntrusted());
+ // Only for testing. Set first_timestamp_ in RateStatistics to 0.
+ bandwidth_quality_scaler_->ReportEncodeInfo(0, 0, 0, 0);
+ });
+ }
+
+ ~BandwidthQualityScalerTest() {
+ task_queue_.SendTask([this] { bandwidth_quality_scaler_ = nullptr; });
+ }
+
+ int GetFrameSizeBytes(
+ const FrameConfig& config,
+ const VideoEncoder::ResolutionBitrateLimits& bitrate_limits) {
+ int scale = 8 * kFramerateFps;
+ switch (config.frame_type) {
+ case FrameType::kKeyFrame: {
+ // 4 is experimental value. Based on the test, the number of bytes of
+ // the key frame is about four times of the normal frame
+ return bitrate_limits.max_bitrate_bps * 4 / scale;
+ }
+ case FrameType::kNormalFrame_Overuse: {
+ return bitrate_limits.max_bitrate_bps * 3 / 2 / scale;
+ }
+ case FrameType::kNormalFrame_Underuse: {
+ return bitrate_limits.min_start_bitrate_bps * 3 / 4 / scale;
+ }
+ case FrameType::kNormalFrame: {
+ return (bitrate_limits.max_bitrate_bps +
+ bitrate_limits.min_start_bitrate_bps) /
+ 2 / scale;
+ }
+ }
+ return -1;
+ }
+
+ absl::optional<VideoEncoder::ResolutionBitrateLimits>
+ GetDefaultSuitableBitrateLimit(int frame_size_pixels) {
+ return EncoderInfoSettings::
+ GetSinglecastBitrateLimitForResolutionWhenQpIsUntrusted(
+ frame_size_pixels,
+ EncoderInfoSettings::
+ GetDefaultSinglecastBitrateLimitsWhenQpIsUntrusted());
+ }
+
+ void TriggerBandwidthQualityScalerTest(
+ const std::vector<FrameConfig>& frame_configs) {
+ task_queue_.SendTask(
+ [frame_configs, this] {
+ RTC_CHECK(!frame_configs.empty());
+
+ int total_frame_nums = 0;
+ for (const FrameConfig& frame_config : frame_configs) {
+ total_frame_nums += frame_config.frame_num;
+ }
+
+ EXPECT_EQ(
+ kFramerateFps * kDefaultBitrateStateUpdateInterval.seconds(),
+ total_frame_nums);
+
+ uint32_t time_send_to_scaler_ms_ = rtc::TimeMillis();
+ for (size_t i = 0; i < frame_configs.size(); ++i) {
+ const FrameConfig& config = frame_configs[i];
+ absl::optional<VideoEncoder::ResolutionBitrateLimits>
+ suitable_bitrate = GetDefaultSuitableBitrateLimit(
+ config.actual_width * config.actual_height);
+ EXPECT_TRUE(suitable_bitrate);
+ for (int j = 0; j <= config.frame_num; ++j) {
+ time_send_to_scaler_ms_ += kDefaultEncodeTime.ms();
+ int frame_size_bytes =
+ GetFrameSizeBytes(config, suitable_bitrate.value());
+ RTC_CHECK(frame_size_bytes > 0);
+ bandwidth_quality_scaler_->ReportEncodeInfo(
+ frame_size_bytes, time_send_to_scaler_ms_,
+ config.actual_width, config.actual_height);
+ }
+ }
+ });
+ }
+
+ test::ScopedFieldTrials scoped_field_trial_;
+ TaskQueueForTest task_queue_;
+ std::unique_ptr<BandwidthQualityScalerUnderTest> bandwidth_quality_scaler_;
+ std::unique_ptr<FakeBandwidthQualityScalerHandler> handler_;
+};
+
+INSTANTIATE_TEST_SUITE_P(
+ FieldTrials,
+ BandwidthQualityScalerTest,
+ ::testing::Values("WebRTC-Video-BandwidthQualityScalerSettings/"
+ "bitrate_state_update_interval_s_:1/",
+ "WebRTC-Video-BandwidthQualityScalerSettings/"
+ "bitrate_state_update_interval_s_:2/"));
+
+TEST_P(BandwidthQualityScalerTest, AllNormalFrame_640x360) {
+ const std::vector<FrameConfig> frame_configs{
+ FrameConfig(150, FrameType::kNormalFrame, 640, 360)};
+ TriggerBandwidthQualityScalerTest(frame_configs);
+
+ // When resolution is 640*360, experimental working bitrate range is
+ // [500000,800000] bps. Encoded bitrate is 654253, so it falls in the range
+ // without any operation(up/down).
+ EXPECT_FALSE(handler_->event_.Wait(TimeDelta::Millis(
+ bandwidth_quality_scaler_->GetBitrateStateUpdateIntervalMs())));
+ EXPECT_EQ(0, handler_->adapt_down_event_count_);
+ EXPECT_EQ(0, handler_->adapt_up_event_count_);
+}
+
+TEST_P(BandwidthQualityScalerTest, AllNoramlFrame_AboveMaxBandwidth_640x360) {
+ const std::vector<FrameConfig> frame_configs{
+ FrameConfig(150, FrameType::kNormalFrame_Overuse, 640, 360)};
+ TriggerBandwidthQualityScalerTest(frame_configs);
+
+ // When resolution is 640*360, experimental working bitrate range is
+ // [500000,800000] bps. Encoded bitrate is 1208000 > 800000 * 0.95, so it
+ // triggers adapt_up_event_count_.
+ EXPECT_TRUE(handler_->event_.Wait(TimeDelta::Millis(
+ bandwidth_quality_scaler_->GetBitrateStateUpdateIntervalMs())));
+ EXPECT_EQ(0, handler_->adapt_down_event_count_);
+ EXPECT_EQ(1, handler_->adapt_up_event_count_);
+}
+
+TEST_P(BandwidthQualityScalerTest, AllNormalFrame_Underuse_640x360) {
+ const std::vector<FrameConfig> frame_configs{
+ FrameConfig(150, FrameType::kNormalFrame_Underuse, 640, 360)};
+ TriggerBandwidthQualityScalerTest(frame_configs);
+
+ // When resolution is 640*360, experimental working bitrate range is
+ // [500000,800000] bps. Encoded bitrate is 377379 < 500000 * 0.8, so it
+ // triggers adapt_down_event_count_.
+ EXPECT_TRUE(handler_->event_.Wait(TimeDelta::Millis(
+ bandwidth_quality_scaler_->GetBitrateStateUpdateIntervalMs())));
+ EXPECT_EQ(1, handler_->adapt_down_event_count_);
+ EXPECT_EQ(0, handler_->adapt_up_event_count_);
+}
+
+TEST_P(BandwidthQualityScalerTest, FixedFrameTypeTest1_640x360) {
+ const std::vector<FrameConfig> frame_configs{
+ FrameConfig(5, FrameType::kNormalFrame_Underuse, 640, 360),
+ FrameConfig(110, FrameType::kNormalFrame, 640, 360),
+ FrameConfig(20, FrameType::kNormalFrame_Overuse, 640, 360),
+ FrameConfig(15, FrameType::kKeyFrame, 640, 360),
+ };
+ TriggerBandwidthQualityScalerTest(frame_configs);
+
+ // When resolution is 640*360, experimental working bitrate range is
+ // [500000,800000] bps. Encoded bitrate is 1059462 > 800000 * 0.95, so it
+ // triggers adapt_up_event_count_.
+ EXPECT_TRUE(handler_->event_.Wait(TimeDelta::Millis(
+ bandwidth_quality_scaler_->GetBitrateStateUpdateIntervalMs())));
+ EXPECT_EQ(0, handler_->adapt_down_event_count_);
+ EXPECT_EQ(1, handler_->adapt_up_event_count_);
+}
+
+TEST_P(BandwidthQualityScalerTest, FixedFrameTypeTest2_640x360) {
+ const std::vector<FrameConfig> frame_configs{
+ FrameConfig(10, FrameType::kNormalFrame_Underuse, 640, 360),
+ FrameConfig(50, FrameType::kNormalFrame, 640, 360),
+ FrameConfig(5, FrameType::kKeyFrame, 640, 360),
+ FrameConfig(85, FrameType::kNormalFrame_Overuse, 640, 360),
+ };
+ TriggerBandwidthQualityScalerTest(frame_configs);
+
+ // When resolution is 640*360, experimental working bitrate range is
+ // [500000,800000] bps. Encoded bitrate is 1059462 > 800000 * 0.95, so it
+ // triggers adapt_up_event_count_.
+ EXPECT_TRUE(handler_->event_.Wait(TimeDelta::Millis(
+ bandwidth_quality_scaler_->GetBitrateStateUpdateIntervalMs())));
+ EXPECT_EQ(0, handler_->adapt_down_event_count_);
+ EXPECT_EQ(1, handler_->adapt_up_event_count_);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/utility/decoded_frames_history.cc b/third_party/libwebrtc/modules/video_coding/utility/decoded_frames_history.cc
new file mode 100644
index 0000000000..1138aa8448
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/utility/decoded_frames_history.cc
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/utility/decoded_frames_history.h"
+
+#include <algorithm>
+
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+namespace video_coding {
+
+DecodedFramesHistory::DecodedFramesHistory(size_t window_size)
+ : buffer_(window_size) {}
+
+DecodedFramesHistory::~DecodedFramesHistory() = default;
+
+void DecodedFramesHistory::InsertDecoded(int64_t frame_id, uint32_t timestamp) {
+ last_decoded_frame_ = frame_id;
+ last_decoded_frame_timestamp_ = timestamp;
+ int new_index = FrameIdToIndex(frame_id);
+
+ RTC_DCHECK(last_frame_id_ < frame_id);
+
+ // Clears expired values from the cyclic buffer_.
+ if (last_frame_id_) {
+ int64_t id_jump = frame_id - *last_frame_id_;
+ int last_index = FrameIdToIndex(*last_frame_id_);
+
+ if (id_jump >= static_cast<int64_t>(buffer_.size())) {
+ std::fill(buffer_.begin(), buffer_.end(), false);
+ } else if (new_index > last_index) {
+ std::fill(buffer_.begin() + last_index + 1, buffer_.begin() + new_index,
+ false);
+ } else {
+ std::fill(buffer_.begin() + last_index + 1, buffer_.end(), false);
+ std::fill(buffer_.begin(), buffer_.begin() + new_index, false);
+ }
+ }
+
+ buffer_[new_index] = true;
+ last_frame_id_ = frame_id;
+}
+
+bool DecodedFramesHistory::WasDecoded(int64_t frame_id) const {
+ if (!last_frame_id_)
+ return false;
+
+ // Reference to the picture_id out of the stored should happen.
+ if (frame_id <= *last_frame_id_ - static_cast<int64_t>(buffer_.size())) {
+ RTC_LOG(LS_WARNING) << "Referencing a frame out of the window. "
+ "Assuming it was undecoded to avoid artifacts.";
+ return false;
+ }
+
+ if (frame_id > last_frame_id_)
+ return false;
+
+ return buffer_[FrameIdToIndex(frame_id)];
+}
+
+void DecodedFramesHistory::Clear() {
+ last_decoded_frame_timestamp_.reset();
+ last_decoded_frame_.reset();
+ std::fill(buffer_.begin(), buffer_.end(), false);
+ last_frame_id_.reset();
+}
+
+absl::optional<int64_t> DecodedFramesHistory::GetLastDecodedFrameId() const {
+ return last_decoded_frame_;
+}
+
+absl::optional<uint32_t> DecodedFramesHistory::GetLastDecodedFrameTimestamp()
+ const {
+ return last_decoded_frame_timestamp_;
+}
+
+int DecodedFramesHistory::FrameIdToIndex(int64_t frame_id) const {
+ int m = frame_id % buffer_.size();
+ return m >= 0 ? m : m + buffer_.size();
+}
+
+} // namespace video_coding
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/utility/decoded_frames_history.h b/third_party/libwebrtc/modules/video_coding/utility/decoded_frames_history.h
new file mode 100644
index 0000000000..9b8bf65821
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/utility/decoded_frames_history.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_UTILITY_DECODED_FRAMES_HISTORY_H_
+#define MODULES_VIDEO_CODING_UTILITY_DECODED_FRAMES_HISTORY_H_
+
+#include <stdint.h>
+
+#include <bitset>
+#include <vector>
+
+#include "absl/types/optional.h"
+#include "api/video/encoded_frame.h"
+
+namespace webrtc {
+namespace video_coding {
+
+class DecodedFramesHistory {
+ public:
+ // window_size - how much frames back to the past are actually remembered.
+ explicit DecodedFramesHistory(size_t window_size);
+ ~DecodedFramesHistory();
+ // Called for each decoded frame. Assumes frame id's are non-decreasing.
+ void InsertDecoded(int64_t frame_id, uint32_t timestamp);
+ // Query if the following (frame_id, spatial_id) pair was inserted before.
+ // Should be at most less by window_size-1 than the last inserted frame id.
+ bool WasDecoded(int64_t frame_id) const;
+
+ void Clear();
+
+ absl::optional<int64_t> GetLastDecodedFrameId() const;
+ absl::optional<uint32_t> GetLastDecodedFrameTimestamp() const;
+
+ private:
+ int FrameIdToIndex(int64_t frame_id) const;
+
+ std::vector<bool> buffer_;
+ absl::optional<int64_t> last_frame_id_;
+ absl::optional<int64_t> last_decoded_frame_;
+ absl::optional<uint32_t> last_decoded_frame_timestamp_;
+};
+
+} // namespace video_coding
+} // namespace webrtc
+#endif // MODULES_VIDEO_CODING_UTILITY_DECODED_FRAMES_HISTORY_H_
diff --git a/third_party/libwebrtc/modules/video_coding/utility/decoded_frames_history_unittest.cc b/third_party/libwebrtc/modules/video_coding/utility/decoded_frames_history_unittest.cc
new file mode 100644
index 0000000000..ac09a42053
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/utility/decoded_frames_history_unittest.cc
@@ -0,0 +1,114 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/utility/decoded_frames_history.h"
+
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace video_coding {
+namespace {
+
+constexpr int kHistorySize = 1 << 13;
+
+TEST(DecodedFramesHistory, RequestOnEmptyHistory) {
+ DecodedFramesHistory history(kHistorySize);
+ EXPECT_EQ(history.WasDecoded(1234), false);
+}
+
+TEST(DecodedFramesHistory, FindsLastDecodedFrame) {
+ DecodedFramesHistory history(kHistorySize);
+ history.InsertDecoded(1234, 0);
+ EXPECT_EQ(history.WasDecoded(1234), true);
+}
+
+TEST(DecodedFramesHistory, FindsPreviousFrame) {
+ DecodedFramesHistory history(kHistorySize);
+ history.InsertDecoded(1234, 0);
+ history.InsertDecoded(1235, 0);
+ EXPECT_EQ(history.WasDecoded(1234), true);
+}
+
+TEST(DecodedFramesHistory, ReportsMissingFrame) {
+ DecodedFramesHistory history(kHistorySize);
+ history.InsertDecoded(1234, 0);
+ history.InsertDecoded(1236, 0);
+ EXPECT_EQ(history.WasDecoded(1235), false);
+}
+
+TEST(DecodedFramesHistory, ClearsHistory) {
+ DecodedFramesHistory history(kHistorySize);
+ history.InsertDecoded(1234, 0);
+ history.Clear();
+ EXPECT_EQ(history.WasDecoded(1234), false);
+ EXPECT_EQ(history.GetLastDecodedFrameId(), absl::nullopt);
+ EXPECT_EQ(history.GetLastDecodedFrameTimestamp(), absl::nullopt);
+}
+
+TEST(DecodedFramesHistory, HandlesBigJumpInPictureId) {
+ DecodedFramesHistory history(kHistorySize);
+ history.InsertDecoded(1234, 0);
+ history.InsertDecoded(1235, 0);
+ history.InsertDecoded(1236, 0);
+ history.InsertDecoded(1236 + kHistorySize / 2, 0);
+ EXPECT_EQ(history.WasDecoded(1234), true);
+ EXPECT_EQ(history.WasDecoded(1237), false);
+}
+
+TEST(DecodedFramesHistory, ForgetsTooOldHistory) {
+ DecodedFramesHistory history(kHistorySize);
+ history.InsertDecoded(1234, 0);
+ history.InsertDecoded(1235, 0);
+ history.InsertDecoded(1236, 0);
+ history.InsertDecoded(1236 + kHistorySize * 2, 0);
+ EXPECT_EQ(history.WasDecoded(1234), false);
+ EXPECT_EQ(history.WasDecoded(1237), false);
+}
+
+TEST(DecodedFramesHistory, ReturnsLastDecodedFrameId) {
+ DecodedFramesHistory history(kHistorySize);
+ EXPECT_EQ(history.GetLastDecodedFrameId(), absl::nullopt);
+ history.InsertDecoded(1234, 0);
+ EXPECT_EQ(history.GetLastDecodedFrameId(), 1234);
+ history.InsertDecoded(1235, 0);
+ EXPECT_EQ(history.GetLastDecodedFrameId(), 1235);
+}
+
+TEST(DecodedFramesHistory, ReturnsLastDecodedFrameTimestamp) {
+ DecodedFramesHistory history(kHistorySize);
+ EXPECT_EQ(history.GetLastDecodedFrameTimestamp(), absl::nullopt);
+ history.InsertDecoded(1234, 12345);
+ EXPECT_EQ(history.GetLastDecodedFrameTimestamp(), 12345u);
+ history.InsertDecoded(1235, 12366);
+ EXPECT_EQ(history.GetLastDecodedFrameTimestamp(), 12366u);
+}
+
+TEST(DecodedFramesHistory, NegativePictureIds) {
+ DecodedFramesHistory history(kHistorySize);
+ history.InsertDecoded(-1234, 12345);
+ history.InsertDecoded(-1233, 12366);
+ EXPECT_EQ(*history.GetLastDecodedFrameId(), -1233);
+
+ history.InsertDecoded(-1, 12377);
+ history.InsertDecoded(0, 12388);
+ EXPECT_EQ(*history.GetLastDecodedFrameId(), 0);
+
+ history.InsertDecoded(1, 12399);
+ EXPECT_EQ(*history.GetLastDecodedFrameId(), 1);
+
+ EXPECT_EQ(history.WasDecoded(-1234), true);
+ EXPECT_EQ(history.WasDecoded(-1), true);
+ EXPECT_EQ(history.WasDecoded(0), true);
+ EXPECT_EQ(history.WasDecoded(1), true);
+}
+
+} // namespace
+} // namespace video_coding
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/utility/frame_dropper.cc b/third_party/libwebrtc/modules/video_coding/utility/frame_dropper.cc
new file mode 100644
index 0000000000..8ea8a8e268
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/utility/frame_dropper.cc
@@ -0,0 +1,268 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/utility/frame_dropper.h"
+
+#include <algorithm>
+
+namespace webrtc {
+
+namespace {
+
+const float kDefaultFrameSizeAlpha = 0.9f;
+const float kDefaultKeyFrameRatioAlpha = 0.99f;
+// 1 key frame every 10th second in 30 fps.
+const float kDefaultKeyFrameRatioValue = 1 / 300.0f;
+
+const float kDefaultDropRatioAlpha = 0.9f;
+const float kDefaultDropRatioValue = 0.96f;
+// Maximum duration over which frames are continuously dropped.
+const float kDefaultMaxDropDurationSecs = 4.0f;
+
+// Default target bitrate.
+// TODO(isheriff): Should this be higher to avoid dropping too many packets when
+// the bandwidth is unknown at the start ?
+const float kDefaultTargetBitrateKbps = 300.0f;
+const float kDefaultIncomingFrameRate = 30;
+const float kLeakyBucketSizeSeconds = 0.5f;
+
+// A delta frame that is bigger than `kLargeDeltaFactor` times the average
+// delta frame is a large frame that is spread out for accumulation.
+const int kLargeDeltaFactor = 3;
+
+// Cap on the frame size accumulator to prevent excessive drops.
+const float kAccumulatorCapBufferSizeSecs = 3.0f;
+} // namespace
+
+FrameDropper::FrameDropper()
+ : key_frame_ratio_(kDefaultKeyFrameRatioAlpha),
+ delta_frame_size_avg_kbits_(kDefaultFrameSizeAlpha),
+ drop_ratio_(kDefaultDropRatioAlpha, kDefaultDropRatioValue),
+ enabled_(true),
+ max_drop_duration_secs_(kDefaultMaxDropDurationSecs) {
+ Reset();
+}
+
+FrameDropper::~FrameDropper() = default;
+
+void FrameDropper::Reset() {
+ key_frame_ratio_.Reset(kDefaultKeyFrameRatioAlpha);
+ key_frame_ratio_.Apply(1.0f, kDefaultKeyFrameRatioValue);
+ delta_frame_size_avg_kbits_.Reset(kDefaultFrameSizeAlpha);
+
+ accumulator_ = 0.0f;
+ accumulator_max_ = kDefaultTargetBitrateKbps / 2;
+ target_bitrate_ = kDefaultTargetBitrateKbps;
+ incoming_frame_rate_ = kDefaultIncomingFrameRate;
+
+ large_frame_accumulation_count_ = 0;
+ large_frame_accumulation_chunk_size_ = 0;
+ large_frame_accumulation_spread_ = 0.5 * kDefaultIncomingFrameRate;
+
+ drop_next_ = false;
+ drop_ratio_.Reset(0.9f);
+ drop_ratio_.Apply(0.0f, 0.0f);
+ drop_count_ = 0;
+ was_below_max_ = true;
+}
+
+void FrameDropper::Enable(bool enable) {
+ enabled_ = enable;
+}
+
+void FrameDropper::Fill(size_t framesize_bytes, bool delta_frame) {
+ if (!enabled_) {
+ return;
+ }
+ float framesize_kbits = 8.0f * static_cast<float>(framesize_bytes) / 1000.0f;
+ if (!delta_frame) {
+ key_frame_ratio_.Apply(1.0, 1.0);
+ // Do not spread if we are already doing it (or we risk dropping bits that
+ // need accumulation). Given we compute the key frame ratio and spread
+ // based on that, this should not normally happen.
+ if (large_frame_accumulation_count_ == 0) {
+ if (key_frame_ratio_.filtered() > 1e-5 &&
+ 1 / key_frame_ratio_.filtered() < large_frame_accumulation_spread_) {
+ large_frame_accumulation_count_ =
+ static_cast<int32_t>(1 / key_frame_ratio_.filtered() + 0.5);
+ } else {
+ large_frame_accumulation_count_ =
+ static_cast<int32_t>(large_frame_accumulation_spread_ + 0.5);
+ }
+ large_frame_accumulation_chunk_size_ =
+ framesize_kbits / large_frame_accumulation_count_;
+ framesize_kbits = 0;
+ }
+ } else {
+ // Identify if it is an unusually large delta frame and spread accumulation
+ // if that is the case.
+ if (delta_frame_size_avg_kbits_.filtered() != -1 &&
+ (framesize_kbits >
+ kLargeDeltaFactor * delta_frame_size_avg_kbits_.filtered()) &&
+ large_frame_accumulation_count_ == 0) {
+ large_frame_accumulation_count_ =
+ static_cast<int32_t>(large_frame_accumulation_spread_ + 0.5);
+ large_frame_accumulation_chunk_size_ =
+ framesize_kbits / large_frame_accumulation_count_;
+ framesize_kbits = 0;
+ } else {
+ delta_frame_size_avg_kbits_.Apply(1, framesize_kbits);
+ }
+ key_frame_ratio_.Apply(1.0, 0.0);
+ }
+ // Change the level of the accumulator (bucket)
+ accumulator_ += framesize_kbits;
+ CapAccumulator();
+}
+
+void FrameDropper::Leak(uint32_t input_framerate) {
+ if (!enabled_) {
+ return;
+ }
+ if (input_framerate < 1) {
+ return;
+ }
+ if (target_bitrate_ < 0.0f) {
+ return;
+ }
+ // Add lower bound for large frame accumulation spread.
+ large_frame_accumulation_spread_ = std::max(0.5 * input_framerate, 5.0);
+ // Expected bits per frame based on current input frame rate.
+ float expected_bits_per_frame = target_bitrate_ / input_framerate;
+ if (large_frame_accumulation_count_ > 0) {
+ expected_bits_per_frame -= large_frame_accumulation_chunk_size_;
+ --large_frame_accumulation_count_;
+ }
+ accumulator_ -= expected_bits_per_frame;
+ if (accumulator_ < 0.0f) {
+ accumulator_ = 0.0f;
+ }
+ UpdateRatio();
+}
+
+void FrameDropper::UpdateRatio() {
+ if (accumulator_ > 1.3f * accumulator_max_) {
+ // Too far above accumulator max, react faster.
+ drop_ratio_.UpdateBase(0.8f);
+ } else {
+ // Go back to normal reaction.
+ drop_ratio_.UpdateBase(0.9f);
+ }
+ if (accumulator_ > accumulator_max_) {
+ // We are above accumulator max, and should ideally drop a frame. Increase
+ // the drop_ratio_ and drop the frame later.
+ if (was_below_max_) {
+ drop_next_ = true;
+ }
+ drop_ratio_.Apply(1.0f, 1.0f);
+ drop_ratio_.UpdateBase(0.9f);
+ } else {
+ drop_ratio_.Apply(1.0f, 0.0f);
+ }
+ was_below_max_ = accumulator_ < accumulator_max_;
+}
+
+// This function signals when to drop frames to the caller. It makes use of the
+// drop_ratio_ to smooth out the drops over time.
+bool FrameDropper::DropFrame() {
+ if (!enabled_) {
+ return false;
+ }
+ if (drop_next_) {
+ drop_next_ = false;
+ drop_count_ = 0;
+ }
+
+ if (drop_ratio_.filtered() >= 0.5f) { // Drops per keep
+ // Limit is the number of frames we should drop between each kept frame
+ // to keep our drop ratio. limit is positive in this case.
+ float denom = 1.0f - drop_ratio_.filtered();
+ if (denom < 1e-5) {
+ denom = 1e-5f;
+ }
+ int32_t limit = static_cast<int32_t>(1.0f / denom - 1.0f + 0.5f);
+ // Put a bound on the max amount of dropped frames between each kept
+ // frame, in terms of frame rate and window size (secs).
+ int max_limit =
+ static_cast<int>(incoming_frame_rate_ * max_drop_duration_secs_);
+ if (limit > max_limit) {
+ limit = max_limit;
+ }
+ if (drop_count_ < 0) {
+ // Reset the drop_count_ since it was negative and should be positive.
+ drop_count_ = -drop_count_;
+ }
+ if (drop_count_ < limit) {
+ // As long we are below the limit we should drop frames.
+ drop_count_++;
+ return true;
+ } else {
+ // Only when we reset drop_count_ a frame should be kept.
+ drop_count_ = 0;
+ return false;
+ }
+ } else if (drop_ratio_.filtered() > 0.0f &&
+ drop_ratio_.filtered() < 0.5f) { // Keeps per drop
+ // Limit is the number of frames we should keep between each drop
+ // in order to keep the drop ratio. limit is negative in this case,
+ // and the drop_count_ is also negative.
+ float denom = drop_ratio_.filtered();
+ if (denom < 1e-5) {
+ denom = 1e-5f;
+ }
+ int32_t limit = -static_cast<int32_t>(1.0f / denom - 1.0f + 0.5f);
+ if (drop_count_ > 0) {
+ // Reset the drop_count_ since we have a positive
+ // drop_count_, and it should be negative.
+ drop_count_ = -drop_count_;
+ }
+ if (drop_count_ > limit) {
+ if (drop_count_ == 0) {
+ // Drop frames when we reset drop_count_.
+ drop_count_--;
+ return true;
+ } else {
+ // Keep frames as long as we haven't reached limit.
+ drop_count_--;
+ return false;
+ }
+ } else {
+ drop_count_ = 0;
+ return false;
+ }
+ }
+ drop_count_ = 0;
+ return false;
+}
+
+void FrameDropper::SetRates(float bitrate, float incoming_frame_rate) {
+ // Bit rate of -1 means infinite bandwidth.
+ accumulator_max_ = bitrate * kLeakyBucketSizeSeconds;
+ if (target_bitrate_ > 0.0f && bitrate < target_bitrate_ &&
+ accumulator_ > accumulator_max_) {
+ // Rescale the accumulator level if the accumulator max decreases
+ accumulator_ = bitrate / target_bitrate_ * accumulator_;
+ }
+ target_bitrate_ = bitrate;
+ CapAccumulator();
+ incoming_frame_rate_ = incoming_frame_rate;
+}
+
+// Put a cap on the accumulator, i.e., don't let it grow beyond some level.
+// This is a temporary fix for screencasting where very large frames from
+// encoder will cause very slow response (too many frame drops).
+// TODO(isheriff): Remove this now that large delta frames are also spread out ?
+void FrameDropper::CapAccumulator() {
+ float max_accumulator = target_bitrate_ * kAccumulatorCapBufferSizeSecs;
+ if (accumulator_ > max_accumulator) {
+ accumulator_ = max_accumulator;
+ }
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/utility/frame_dropper.h b/third_party/libwebrtc/modules/video_coding/utility/frame_dropper.h
new file mode 100644
index 0000000000..b45b7fe27f
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/utility/frame_dropper.h
@@ -0,0 +1,94 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_UTILITY_FRAME_DROPPER_H_
+#define MODULES_VIDEO_CODING_UTILITY_FRAME_DROPPER_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "rtc_base/numerics/exp_filter.h"
+
+namespace webrtc {
+
+// The Frame Dropper implements a variant of the leaky bucket algorithm
+// for keeping track of when to drop frames to avoid bit rate
+// over use when the encoder can't keep its bit rate.
+class FrameDropper {
+ public:
+ FrameDropper();
+ ~FrameDropper();
+
+ // Resets the FrameDropper to its initial state.
+ void Reset();
+
+ void Enable(bool enable);
+
+ // Answers the question if it's time to drop a frame if we want to reach a
+ // given frame rate. Must be called for every frame.
+ //
+ // Return value : True if we should drop the current frame.
+ bool DropFrame();
+
+ // Updates the FrameDropper with the size of the latest encoded frame.
+ // The FrameDropper calculates a new drop ratio (can be seen as the
+ // probability to drop a frame) and updates its internal statistics.
+ //
+ // Input:
+ // - framesize_bytes : The size of the latest frame returned
+ // from the encoder.
+ // - delta_frame : True if the encoder returned a delta frame.
+ void Fill(size_t framesize_bytes, bool delta_frame);
+
+ void Leak(uint32_t input_framerate);
+
+ // Sets the target bit rate and the frame rate produced by the camera.
+ //
+ // Input:
+ // - bitrate : The target bit rate.
+ void SetRates(float bitrate, float incoming_frame_rate);
+
+ private:
+ void UpdateRatio();
+ void CapAccumulator();
+
+ rtc::ExpFilter key_frame_ratio_;
+ rtc::ExpFilter delta_frame_size_avg_kbits_;
+
+ // Key frames and large delta frames are not immediately accumulated in the
+ // bucket since they can immediately overflow the bucket leading to large
+ // drops on the following packets that may be much smaller. Instead these
+ // large frames are accumulated over several frames when the bucket leaks.
+
+ // `large_frame_accumulation_spread_` represents the number of frames over
+ // which a large frame is accumulated.
+ float large_frame_accumulation_spread_;
+ // `large_frame_accumulation_count_` represents the number of frames left
+ // to finish accumulating a large frame.
+ int large_frame_accumulation_count_;
+ // `large_frame_accumulation_chunk_size_` represents the size of a single
+ // chunk for large frame accumulation.
+ float large_frame_accumulation_chunk_size_;
+
+ float accumulator_;
+ float accumulator_max_;
+ float target_bitrate_;
+ bool drop_next_;
+ rtc::ExpFilter drop_ratio_;
+ int drop_count_;
+ float incoming_frame_rate_;
+ bool was_below_max_;
+ bool enabled_;
+ const float max_drop_duration_secs_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_UTILITY_FRAME_DROPPER_H_
diff --git a/third_party/libwebrtc/modules/video_coding/utility/frame_dropper_unittest.cc b/third_party/libwebrtc/modules/video_coding/utility/frame_dropper_unittest.cc
new file mode 100644
index 0000000000..066103a788
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/utility/frame_dropper_unittest.cc
@@ -0,0 +1,160 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/utility/frame_dropper.h"
+
+#include "test/gtest.h"
+
+namespace webrtc {
+
+namespace {
+
+const float kTargetBitRateKbps = 300;
+const float kIncomingFrameRate = 30;
+const size_t kFrameSizeBytes = 1250;
+
+const size_t kLargeFrameSizeBytes = 25000;
+
+const bool kIncludeKeyFrame = true;
+const bool kDoNotIncludeKeyFrame = false;
+
+} // namespace
+
+class FrameDropperTest : public ::testing::Test {
+ protected:
+ void SetUp() override {
+ frame_dropper_.SetRates(kTargetBitRateKbps, kIncomingFrameRate);
+ }
+
+ void OverflowLeakyBucket() {
+ // Overflow bucket in frame dropper.
+ for (int i = 0; i < kIncomingFrameRate; ++i) {
+ frame_dropper_.Fill(kFrameSizeBytes, true);
+ }
+ frame_dropper_.Leak(kIncomingFrameRate);
+ }
+
+ void ValidateNoDropsAtTargetBitrate(int large_frame_size_bytes,
+ int large_frame_rate,
+ bool is_large_frame_delta) {
+ // Smaller frame size is computed to meet `kTargetBitRateKbps`.
+ int small_frame_size_bytes =
+ kFrameSizeBytes -
+ (large_frame_size_bytes * large_frame_rate) / kIncomingFrameRate;
+
+ for (int i = 1; i <= 5 * large_frame_rate; ++i) {
+ // Large frame. First frame is always a key frame.
+ frame_dropper_.Fill(large_frame_size_bytes,
+ (i == 1) ? false : is_large_frame_delta);
+ frame_dropper_.Leak(kIncomingFrameRate);
+ EXPECT_FALSE(frame_dropper_.DropFrame());
+
+ // Smaller frames.
+ for (int j = 1; j < kIncomingFrameRate / large_frame_rate; ++j) {
+ frame_dropper_.Fill(small_frame_size_bytes, true);
+ frame_dropper_.Leak(kIncomingFrameRate);
+ EXPECT_FALSE(frame_dropper_.DropFrame());
+ }
+ }
+ }
+
+ void ValidateThroughputMatchesTargetBitrate(int bitrate_kbps,
+ bool include_keyframe) {
+ int delta_frame_size;
+ int total_bytes = 0;
+
+ if (include_keyframe) {
+ delta_frame_size = ((1000.0 / 8 * bitrate_kbps) - kLargeFrameSizeBytes) /
+ (kIncomingFrameRate - 1);
+ } else {
+ delta_frame_size = bitrate_kbps * 1000.0 / (8 * kIncomingFrameRate);
+ }
+ const int kNumIterations = 1000;
+ for (int i = 1; i <= kNumIterations; ++i) {
+ int j = 0;
+ if (include_keyframe) {
+ if (!frame_dropper_.DropFrame()) {
+ frame_dropper_.Fill(kLargeFrameSizeBytes, false);
+ total_bytes += kLargeFrameSizeBytes;
+ }
+ frame_dropper_.Leak(kIncomingFrameRate);
+ j++;
+ }
+ for (; j < kIncomingFrameRate; ++j) {
+ if (!frame_dropper_.DropFrame()) {
+ frame_dropper_.Fill(delta_frame_size, true);
+ total_bytes += delta_frame_size;
+ }
+ frame_dropper_.Leak(kIncomingFrameRate);
+ }
+ }
+ float throughput_kbps = total_bytes * 8.0 / (1000 * kNumIterations);
+ float deviation_from_target =
+ (throughput_kbps - kTargetBitRateKbps) * 100.0 / kTargetBitRateKbps;
+ if (deviation_from_target < 0) {
+ deviation_from_target = -deviation_from_target;
+ }
+
+ // Variation is < 0.1%
+ EXPECT_LE(deviation_from_target, 0.1);
+ }
+
+ FrameDropper frame_dropper_;
+};
+
+TEST_F(FrameDropperTest, NoDropsWhenDisabled) {
+ frame_dropper_.Enable(false);
+ OverflowLeakyBucket();
+ EXPECT_FALSE(frame_dropper_.DropFrame());
+}
+
+TEST_F(FrameDropperTest, DropsByDefaultWhenBucketOverflows) {
+ OverflowLeakyBucket();
+ EXPECT_TRUE(frame_dropper_.DropFrame());
+}
+
+TEST_F(FrameDropperTest, NoDropsWhenFillRateMatchesLeakRate) {
+ for (int i = 0; i < 5 * kIncomingFrameRate; ++i) {
+ frame_dropper_.Fill(kFrameSizeBytes, true);
+ frame_dropper_.Leak(kIncomingFrameRate);
+ EXPECT_FALSE(frame_dropper_.DropFrame());
+ }
+}
+
+TEST_F(FrameDropperTest, LargeKeyFrames) {
+ ValidateNoDropsAtTargetBitrate(kLargeFrameSizeBytes, 1, false);
+ frame_dropper_.Reset();
+ ValidateNoDropsAtTargetBitrate(kLargeFrameSizeBytes / 2, 2, false);
+ frame_dropper_.Reset();
+ ValidateNoDropsAtTargetBitrate(kLargeFrameSizeBytes / 4, 4, false);
+ frame_dropper_.Reset();
+ ValidateNoDropsAtTargetBitrate(kLargeFrameSizeBytes / 8, 8, false);
+}
+
+TEST_F(FrameDropperTest, LargeDeltaFrames) {
+ ValidateNoDropsAtTargetBitrate(kLargeFrameSizeBytes, 1, true);
+ frame_dropper_.Reset();
+ ValidateNoDropsAtTargetBitrate(kLargeFrameSizeBytes / 2, 2, true);
+ frame_dropper_.Reset();
+ ValidateNoDropsAtTargetBitrate(kLargeFrameSizeBytes / 4, 4, true);
+ frame_dropper_.Reset();
+ ValidateNoDropsAtTargetBitrate(kLargeFrameSizeBytes / 8, 8, true);
+}
+
+TEST_F(FrameDropperTest, TrafficVolumeAboveAvailableBandwidth) {
+ ValidateThroughputMatchesTargetBitrate(700, kIncludeKeyFrame);
+ ValidateThroughputMatchesTargetBitrate(700, kDoNotIncludeKeyFrame);
+ ValidateThroughputMatchesTargetBitrate(600, kIncludeKeyFrame);
+ ValidateThroughputMatchesTargetBitrate(600, kDoNotIncludeKeyFrame);
+ ValidateThroughputMatchesTargetBitrate(500, kIncludeKeyFrame);
+ ValidateThroughputMatchesTargetBitrate(500, kDoNotIncludeKeyFrame);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/utility/framerate_controller_deprecated.cc b/third_party/libwebrtc/modules/video_coding/utility/framerate_controller_deprecated.cc
new file mode 100644
index 0000000000..5978adc3c4
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/utility/framerate_controller_deprecated.cc
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/utility/framerate_controller_deprecated.h"
+
+#include <stddef.h>
+
+#include <cstdint>
+
+namespace webrtc {
+
+FramerateControllerDeprecated::FramerateControllerDeprecated(
+ float target_framerate_fps)
+ : min_frame_interval_ms_(0), framerate_estimator_(1000.0, 1000.0) {
+ SetTargetRate(target_framerate_fps);
+}
+
+void FramerateControllerDeprecated::SetTargetRate(float target_framerate_fps) {
+ if (target_framerate_fps_ != target_framerate_fps) {
+ framerate_estimator_.Reset();
+ if (last_timestamp_ms_) {
+ framerate_estimator_.Update(1, *last_timestamp_ms_);
+ }
+
+ const size_t target_frame_interval_ms = 1000 / target_framerate_fps;
+ target_framerate_fps_ = target_framerate_fps;
+ min_frame_interval_ms_ = 85 * target_frame_interval_ms / 100;
+ }
+}
+
+float FramerateControllerDeprecated::GetTargetRate() {
+ return *target_framerate_fps_;
+}
+
+void FramerateControllerDeprecated::Reset() {
+ framerate_estimator_.Reset();
+ last_timestamp_ms_.reset();
+}
+
+bool FramerateControllerDeprecated::DropFrame(uint32_t timestamp_ms) const {
+ if (timestamp_ms < last_timestamp_ms_) {
+ // Timestamp jumps backward. We can't make adequate drop decision. Don't
+ // drop this frame. Stats will be reset in AddFrame().
+ return false;
+ }
+
+ if (Rate(timestamp_ms).value_or(*target_framerate_fps_) >
+ target_framerate_fps_) {
+ return true;
+ }
+
+ if (last_timestamp_ms_) {
+ const int64_t diff_ms =
+ static_cast<int64_t>(timestamp_ms) - *last_timestamp_ms_;
+ if (diff_ms < min_frame_interval_ms_) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+void FramerateControllerDeprecated::AddFrame(uint32_t timestamp_ms) {
+ if (timestamp_ms < last_timestamp_ms_) {
+ // Timestamp jumps backward.
+ Reset();
+ }
+
+ framerate_estimator_.Update(1, timestamp_ms);
+ last_timestamp_ms_ = timestamp_ms;
+}
+
+absl::optional<float> FramerateControllerDeprecated::Rate(
+ uint32_t timestamp_ms) const {
+ return framerate_estimator_.Rate(timestamp_ms);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/utility/framerate_controller_deprecated.h b/third_party/libwebrtc/modules/video_coding/utility/framerate_controller_deprecated.h
new file mode 100644
index 0000000000..ca0cbea053
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/utility/framerate_controller_deprecated.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_UTILITY_FRAMERATE_CONTROLLER_DEPRECATED_H_
+#define MODULES_VIDEO_CODING_UTILITY_FRAMERATE_CONTROLLER_DEPRECATED_H_
+
+#include <stdint.h>
+
+#include "absl/types/optional.h"
+#include "rtc_base/rate_statistics.h"
+
+namespace webrtc {
+
+// Please use webrtc::FramerateController instead.
+class FramerateControllerDeprecated {
+ public:
+ explicit FramerateControllerDeprecated(float target_framerate_fps);
+
+ void SetTargetRate(float target_framerate_fps);
+ float GetTargetRate();
+
+ // Advices user to drop next frame in order to reach target framerate.
+ bool DropFrame(uint32_t timestamp_ms) const;
+
+ void AddFrame(uint32_t timestamp_ms);
+
+ void Reset();
+
+ private:
+ absl::optional<float> Rate(uint32_t timestamp_ms) const;
+
+ absl::optional<float> target_framerate_fps_;
+ absl::optional<uint32_t> last_timestamp_ms_;
+ uint32_t min_frame_interval_ms_;
+ RateStatistics framerate_estimator_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_UTILITY_FRAMERATE_CONTROLLER_DEPRECATED_H_
diff --git a/third_party/libwebrtc/modules/video_coding/utility/framerate_controller_deprecated_unittest.cc b/third_party/libwebrtc/modules/video_coding/utility/framerate_controller_deprecated_unittest.cc
new file mode 100644
index 0000000000..eabf0529db
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/utility/framerate_controller_deprecated_unittest.cc
@@ -0,0 +1,90 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/utility/framerate_controller_deprecated.h"
+
+#include <stddef.h>
+
+#include "test/gtest.h"
+
+namespace webrtc {
+
+TEST(FramerateControllerDeprecated, KeepTargetFramerate) {
+ const float input_framerate_fps = 20;
+ const float target_framerate_fps = 5;
+ const float max_abs_framerate_error_fps = target_framerate_fps * 0.1f;
+ const size_t input_duration_secs = 3;
+ const size_t num_input_frames = input_duration_secs * input_framerate_fps;
+
+ FramerateControllerDeprecated framerate_controller(target_framerate_fps);
+ size_t num_dropped_frames = 0;
+ for (size_t frame_num = 0; frame_num < num_input_frames; ++frame_num) {
+ const uint32_t timestamp_ms =
+ static_cast<uint32_t>(1000 * frame_num / input_framerate_fps);
+ if (framerate_controller.DropFrame(timestamp_ms)) {
+ ++num_dropped_frames;
+ } else {
+ framerate_controller.AddFrame(timestamp_ms);
+ }
+ }
+
+ const float output_framerate_fps =
+ static_cast<float>(num_input_frames - num_dropped_frames) /
+ input_duration_secs;
+ EXPECT_NEAR(output_framerate_fps, target_framerate_fps,
+ max_abs_framerate_error_fps);
+}
+
+TEST(FramerateControllerDeprecated, DoNotDropAnyFramesIfTargerEqualsInput) {
+ const float input_framerate_fps = 30;
+ const size_t input_duration_secs = 3;
+ const size_t num_input_frames = input_duration_secs * input_framerate_fps;
+
+ FramerateControllerDeprecated framerate_controller(input_framerate_fps);
+ size_t num_dropped_frames = 0;
+ for (size_t frame_num = 0; frame_num < num_input_frames; ++frame_num) {
+ const uint32_t timestamp_ms =
+ static_cast<uint32_t>(1000 * frame_num / input_framerate_fps);
+ if (framerate_controller.DropFrame(timestamp_ms)) {
+ ++num_dropped_frames;
+ } else {
+ framerate_controller.AddFrame(timestamp_ms);
+ }
+ }
+
+ EXPECT_EQ(num_dropped_frames, 0U);
+}
+
+TEST(FramerateControllerDeprecated, DoNotDropFrameWhenTimestampJumpsBackward) {
+ FramerateControllerDeprecated framerate_controller(30);
+ ASSERT_FALSE(framerate_controller.DropFrame(66));
+ framerate_controller.AddFrame(66);
+ EXPECT_FALSE(framerate_controller.DropFrame(33));
+}
+
+TEST(FramerateControllerDeprecated, DropFrameIfItIsTooCloseToPreviousFrame) {
+ FramerateControllerDeprecated framerate_controller(30);
+ ASSERT_FALSE(framerate_controller.DropFrame(33));
+ framerate_controller.AddFrame(33);
+ EXPECT_TRUE(framerate_controller.DropFrame(34));
+}
+
+TEST(FramerateControllerDeprecated, FrameDroppingStartsFromSecondInputFrame) {
+ const float input_framerate_fps = 23;
+ const float target_framerate_fps = 19;
+ const uint32_t input_frame_duration_ms =
+ static_cast<uint32_t>(1000 / input_framerate_fps);
+ FramerateControllerDeprecated framerate_controller(target_framerate_fps);
+ ASSERT_FALSE(framerate_controller.DropFrame(1 * input_frame_duration_ms));
+ framerate_controller.AddFrame(1 * input_frame_duration_ms);
+ EXPECT_TRUE(framerate_controller.DropFrame(2 * input_frame_duration_ms));
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/utility/ivf_defines.h b/third_party/libwebrtc/modules/video_coding/utility/ivf_defines.h
new file mode 100644
index 0000000000..212d381e70
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/utility/ivf_defines.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * This file contains definitions that are common to the IvfFileReader and
+ * IvfFileWriter classes.
+ */
+
+#ifndef MODULES_VIDEO_CODING_UTILITY_IVF_DEFINES_H_
+#define MODULES_VIDEO_CODING_UTILITY_IVF_DEFINES_H_
+
+#include <stddef.h>
+
+namespace webrtc {
+constexpr size_t kIvfHeaderSize = 32;
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_UTILITY_IVF_DEFINES_H_
diff --git a/third_party/libwebrtc/modules/video_coding/utility/ivf_file_reader.cc b/third_party/libwebrtc/modules/video_coding/utility/ivf_file_reader.cc
new file mode 100644
index 0000000000..13092b5e24
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/utility/ivf_file_reader.cc
@@ -0,0 +1,238 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/utility/ivf_file_reader.h"
+
+#include <string>
+#include <vector>
+
+#include "api/video_codecs/video_codec.h"
+#include "modules/rtp_rtcp/source/byte_io.h"
+#include "modules/video_coding/utility/ivf_defines.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+namespace {
+
+constexpr size_t kIvfFrameHeaderSize = 12;
+constexpr int kCodecTypeBytesCount = 4;
+
+constexpr uint8_t kFileHeaderStart[kCodecTypeBytesCount] = {'D', 'K', 'I', 'F'};
+constexpr uint8_t kVp8Header[kCodecTypeBytesCount] = {'V', 'P', '8', '0'};
+constexpr uint8_t kVp9Header[kCodecTypeBytesCount] = {'V', 'P', '9', '0'};
+constexpr uint8_t kAv1Header[kCodecTypeBytesCount] = {'A', 'V', '0', '1'};
+constexpr uint8_t kH264Header[kCodecTypeBytesCount] = {'H', '2', '6', '4'};
+
+// RTP standard required 90kHz clock rate.
+constexpr int32_t kRtpClockRateHz = 90000;
+
+} // namespace
+
+std::unique_ptr<IvfFileReader> IvfFileReader::Create(FileWrapper file) {
+ auto reader =
+ std::unique_ptr<IvfFileReader>(new IvfFileReader(std::move(file)));
+ if (!reader->Reset()) {
+ return nullptr;
+ }
+ return reader;
+}
+IvfFileReader::~IvfFileReader() {
+ Close();
+}
+
+bool IvfFileReader::Reset() {
+ // Set error to true while initialization.
+ has_error_ = true;
+ if (!file_.Rewind()) {
+ RTC_LOG(LS_ERROR) << "Failed to rewind IVF file";
+ return false;
+ }
+
+ uint8_t ivf_header[kIvfHeaderSize] = {0};
+ size_t read = file_.Read(&ivf_header, kIvfHeaderSize);
+ if (read != kIvfHeaderSize) {
+ RTC_LOG(LS_ERROR) << "Failed to read IVF header";
+ return false;
+ }
+
+ if (memcmp(&ivf_header[0], kFileHeaderStart, 4) != 0) {
+ RTC_LOG(LS_ERROR) << "File is not in IVF format: DKIF header expected";
+ return false;
+ }
+
+ absl::optional<VideoCodecType> codec_type = ParseCodecType(ivf_header, 8);
+ if (!codec_type) {
+ return false;
+ }
+ codec_type_ = *codec_type;
+
+ width_ = ByteReader<uint16_t>::ReadLittleEndian(&ivf_header[12]);
+ height_ = ByteReader<uint16_t>::ReadLittleEndian(&ivf_header[14]);
+ if (width_ == 0 || height_ == 0) {
+ RTC_LOG(LS_ERROR) << "Invalid IVF header: width or height is 0";
+ return false;
+ }
+
+ time_scale_ = ByteReader<uint32_t>::ReadLittleEndian(&ivf_header[16]);
+ if (time_scale_ == 0) {
+ RTC_LOG(LS_ERROR) << "Invalid IVF header: time scale can't be 0";
+ return false;
+ }
+
+ num_frames_ = static_cast<size_t>(
+ ByteReader<uint32_t>::ReadLittleEndian(&ivf_header[24]));
+ if (num_frames_ <= 0) {
+ RTC_LOG(LS_ERROR) << "Invalid IVF header: number of frames 0 or negative";
+ return false;
+ }
+
+ num_read_frames_ = 0;
+ next_frame_header_ = ReadNextFrameHeader();
+ if (!next_frame_header_) {
+ RTC_LOG(LS_ERROR) << "Failed to read 1st frame header";
+ return false;
+ }
+ // Initialization succeed: reset error.
+ has_error_ = false;
+
+ const char* codec_name = CodecTypeToPayloadString(codec_type_);
+ RTC_LOG(LS_INFO) << "Opened IVF file with codec data of type " << codec_name
+ << " at resolution " << width_ << " x " << height_
+ << ", using " << time_scale_ << "Hz clock resolution.";
+
+ return true;
+}
+
+absl::optional<EncodedImage> IvfFileReader::NextFrame() {
+ if (has_error_ || !HasMoreFrames()) {
+ return absl::nullopt;
+ }
+
+ rtc::scoped_refptr<EncodedImageBuffer> payload = EncodedImageBuffer::Create();
+ std::vector<size_t> layer_sizes;
+ // next_frame_header_ have to be presented by the way how it was loaded. If it
+ // is missing it means there is a bug in error handling.
+ RTC_DCHECK(next_frame_header_);
+ int64_t current_timestamp = next_frame_header_->timestamp;
+ // The first frame from the file should be marked as Key frame.
+ bool is_first_frame = num_read_frames_ == 0;
+ while (next_frame_header_ &&
+ current_timestamp == next_frame_header_->timestamp) {
+ // Resize payload to fit next spatial layer.
+ size_t current_layer_size = next_frame_header_->frame_size;
+ size_t current_layer_start_pos = payload->size();
+ payload->Realloc(payload->size() + current_layer_size);
+ layer_sizes.push_back(current_layer_size);
+
+ // Read next layer into payload
+ size_t read = file_.Read(&payload->data()[current_layer_start_pos],
+ current_layer_size);
+ if (read != current_layer_size) {
+ RTC_LOG(LS_ERROR) << "Frame #" << num_read_frames_
+ << ": failed to read frame payload";
+ has_error_ = true;
+ return absl::nullopt;
+ }
+ num_read_frames_++;
+
+ current_timestamp = next_frame_header_->timestamp;
+ next_frame_header_ = ReadNextFrameHeader();
+ }
+ if (!next_frame_header_) {
+ // If EOF was reached, we need to check that all frames were met.
+ if (!has_error_ && num_read_frames_ != num_frames_) {
+ RTC_LOG(LS_ERROR) << "Unexpected EOF";
+ has_error_ = true;
+ return absl::nullopt;
+ }
+ }
+
+ EncodedImage image;
+ image.capture_time_ms_ = current_timestamp;
+ image.SetTimestamp(
+ static_cast<uint32_t>(current_timestamp * kRtpClockRateHz / time_scale_));
+ image.SetEncodedData(payload);
+ image.SetSpatialIndex(static_cast<int>(layer_sizes.size()) - 1);
+ for (size_t i = 0; i < layer_sizes.size(); ++i) {
+ image.SetSpatialLayerFrameSize(static_cast<int>(i), layer_sizes[i]);
+ }
+ if (is_first_frame) {
+ image._frameType = VideoFrameType::kVideoFrameKey;
+ }
+
+ return image;
+}
+
+bool IvfFileReader::Close() {
+ if (!file_.is_open())
+ return false;
+
+ file_.Close();
+ return true;
+}
+
+absl::optional<VideoCodecType> IvfFileReader::ParseCodecType(uint8_t* buffer,
+ size_t start_pos) {
+ if (memcmp(&buffer[start_pos], kVp8Header, kCodecTypeBytesCount) == 0) {
+ return VideoCodecType::kVideoCodecVP8;
+ }
+ if (memcmp(&buffer[start_pos], kVp9Header, kCodecTypeBytesCount) == 0) {
+ return VideoCodecType::kVideoCodecVP9;
+ }
+ if (memcmp(&buffer[start_pos], kAv1Header, kCodecTypeBytesCount) == 0) {
+ return VideoCodecType::kVideoCodecAV1;
+ }
+ if (memcmp(&buffer[start_pos], kH264Header, kCodecTypeBytesCount) == 0) {
+ return VideoCodecType::kVideoCodecH264;
+ }
+ has_error_ = true;
+ RTC_LOG(LS_ERROR) << "Unknown codec type: "
+ << std::string(
+ reinterpret_cast<char const*>(&buffer[start_pos]),
+ kCodecTypeBytesCount);
+ return absl::nullopt;
+}
+
+absl::optional<IvfFileReader::FrameHeader>
+IvfFileReader::ReadNextFrameHeader() {
+ uint8_t ivf_frame_header[kIvfFrameHeaderSize] = {0};
+ size_t read = file_.Read(&ivf_frame_header, kIvfFrameHeaderSize);
+ if (read != kIvfFrameHeaderSize) {
+ if (read != 0 || !file_.ReadEof()) {
+ has_error_ = true;
+ RTC_LOG(LS_ERROR) << "Frame #" << num_read_frames_
+ << ": failed to read IVF frame header";
+ }
+ return absl::nullopt;
+ }
+ FrameHeader header;
+ header.frame_size = static_cast<size_t>(
+ ByteReader<uint32_t>::ReadLittleEndian(&ivf_frame_header[0]));
+ header.timestamp =
+ ByteReader<uint64_t>::ReadLittleEndian(&ivf_frame_header[4]);
+
+ if (header.frame_size == 0) {
+ has_error_ = true;
+ RTC_LOG(LS_ERROR) << "Frame #" << num_read_frames_
+ << ": invalid frame size";
+ return absl::nullopt;
+ }
+
+ if (header.timestamp < 0) {
+ has_error_ = true;
+ RTC_LOG(LS_ERROR) << "Frame #" << num_read_frames_
+ << ": negative timestamp";
+ return absl::nullopt;
+ }
+
+ return header;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/utility/ivf_file_reader.h b/third_party/libwebrtc/modules/video_coding/utility/ivf_file_reader.h
new file mode 100644
index 0000000000..db4fc25575
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/utility/ivf_file_reader.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_UTILITY_IVF_FILE_READER_H_
+#define MODULES_VIDEO_CODING_UTILITY_IVF_FILE_READER_H_
+
+#include <memory>
+#include <utility>
+
+#include "absl/types/optional.h"
+#include "api/video/encoded_image.h"
+#include "api/video_codecs/video_codec.h"
+#include "rtc_base/system/file_wrapper.h"
+
+namespace webrtc {
+
+class IvfFileReader {
+ public:
+ // Creates IvfFileReader. Returns nullptr if error acquired.
+ static std::unique_ptr<IvfFileReader> Create(FileWrapper file);
+ ~IvfFileReader();
+
+ IvfFileReader(const IvfFileReader&) = delete;
+ IvfFileReader& operator=(const IvfFileReader&) = delete;
+
+ // Reinitializes reader. Returns false if any error acquired.
+ bool Reset();
+
+ // Returns codec type which was used to create this IVF file and which should
+ // be used to decode EncodedImages from this file.
+ VideoCodecType GetVideoCodecType() const { return codec_type_; }
+ // Returns count of frames in this file.
+ size_t GetFramesCount() const { return num_frames_; }
+
+ // Returns next frame or absl::nullopt if any error acquired. Always returns
+ // absl::nullopt after first error was spotted.
+ absl::optional<EncodedImage> NextFrame();
+ bool HasMoreFrames() const { return num_read_frames_ < num_frames_; }
+ bool HasError() const { return has_error_; }
+
+ uint16_t GetFrameWidth() const { return width_; }
+ uint16_t GetFrameHeight() const { return height_; }
+
+ bool Close();
+
+ private:
+ struct FrameHeader {
+ size_t frame_size;
+ int64_t timestamp;
+ };
+
+ explicit IvfFileReader(FileWrapper file) : file_(std::move(file)) {}
+
+ // Parses codec type from specified position of the buffer. Codec type
+ // contains kCodecTypeBytesCount bytes and caller has to ensure that buffer
+ // won't overflow.
+ absl::optional<VideoCodecType> ParseCodecType(uint8_t* buffer,
+ size_t start_pos);
+ absl::optional<FrameHeader> ReadNextFrameHeader();
+
+ VideoCodecType codec_type_;
+ size_t num_frames_;
+ size_t num_read_frames_;
+ uint16_t width_;
+ uint16_t height_;
+ uint32_t time_scale_;
+ FileWrapper file_;
+
+ absl::optional<FrameHeader> next_frame_header_;
+ bool has_error_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_UTILITY_IVF_FILE_READER_H_
diff --git a/third_party/libwebrtc/modules/video_coding/utility/ivf_file_reader_unittest.cc b/third_party/libwebrtc/modules/video_coding/utility/ivf_file_reader_unittest.cc
new file mode 100644
index 0000000000..c9cf14674b
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/utility/ivf_file_reader_unittest.cc
@@ -0,0 +1,188 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/utility/ivf_file_reader.h"
+#include "modules/video_coding/utility/ivf_file_writer.h"
+
+#include <memory>
+#include <string>
+
+#include "test/gtest.h"
+#include "test/testsupport/file_utils.h"
+
+namespace webrtc {
+namespace {
+
+constexpr int kWidth = 320;
+constexpr int kHeight = 240;
+constexpr int kNumFrames = 3;
+constexpr uint8_t kDummyPayload[4] = {'0', '1', '2', '3'};
+
+} // namespace
+
+class IvfFileReaderTest : public ::testing::Test {
+ protected:
+ void SetUp() override {
+ file_name_ =
+ webrtc::test::TempFilename(webrtc::test::OutputPath(), "test_file.ivf");
+ }
+ void TearDown() override { webrtc::test::RemoveFile(file_name_); }
+
+ bool WriteDummyTestFrames(IvfFileWriter* file_writer,
+ VideoCodecType codec_type,
+ int width,
+ int height,
+ int num_frames,
+ bool use_capture_tims_ms,
+ int spatial_layers_count) {
+ EncodedImage frame;
+ frame.SetSpatialIndex(spatial_layers_count);
+ rtc::scoped_refptr<EncodedImageBuffer> payload = EncodedImageBuffer::Create(
+ sizeof(kDummyPayload) * spatial_layers_count);
+ for (int i = 0; i < spatial_layers_count; ++i) {
+ memcpy(&payload->data()[i * sizeof(kDummyPayload)], kDummyPayload,
+ sizeof(kDummyPayload));
+ frame.SetSpatialLayerFrameSize(i, sizeof(kDummyPayload));
+ }
+ frame.SetEncodedData(payload);
+ frame._encodedWidth = width;
+ frame._encodedHeight = height;
+ for (int i = 1; i <= num_frames; ++i) {
+ if (use_capture_tims_ms) {
+ frame.capture_time_ms_ = i;
+ } else {
+ frame.SetTimestamp(i);
+ }
+ if (!file_writer->WriteFrame(frame, codec_type))
+ return false;
+ }
+ return true;
+ }
+
+ void CreateTestFile(VideoCodecType codec_type,
+ bool use_capture_tims_ms,
+ int spatial_layers_count) {
+ std::unique_ptr<IvfFileWriter> file_writer =
+ IvfFileWriter::Wrap(FileWrapper::OpenWriteOnly(file_name_), 0);
+ ASSERT_TRUE(file_writer.get());
+ ASSERT_TRUE(WriteDummyTestFrames(file_writer.get(), codec_type, kWidth,
+ kHeight, kNumFrames, use_capture_tims_ms,
+ spatial_layers_count));
+ ASSERT_TRUE(file_writer->Close());
+ }
+
+ void ValidateFrame(absl::optional<EncodedImage> frame,
+ int frame_index,
+ bool use_capture_tims_ms,
+ int spatial_layers_count) {
+ ASSERT_TRUE(frame);
+ EXPECT_EQ(frame->SpatialIndex(), spatial_layers_count - 1);
+ if (use_capture_tims_ms) {
+ EXPECT_EQ(frame->capture_time_ms_, static_cast<int64_t>(frame_index));
+ EXPECT_EQ(frame->Timestamp(), static_cast<int64_t>(90 * frame_index));
+ } else {
+ EXPECT_EQ(frame->Timestamp(), static_cast<int64_t>(frame_index));
+ }
+ ASSERT_EQ(frame->size(), sizeof(kDummyPayload) * spatial_layers_count);
+ for (int i = 0; i < spatial_layers_count; ++i) {
+ EXPECT_EQ(memcmp(&frame->data()[i * sizeof(kDummyPayload)], kDummyPayload,
+ sizeof(kDummyPayload)),
+ 0)
+ << std::string(reinterpret_cast<char const*>(
+ &frame->data()[i * sizeof(kDummyPayload)]),
+ sizeof(kDummyPayload));
+ }
+ }
+
+ void ValidateContent(VideoCodecType codec_type,
+ bool use_capture_tims_ms,
+ int spatial_layers_count) {
+ std::unique_ptr<IvfFileReader> reader =
+ IvfFileReader::Create(FileWrapper::OpenReadOnly(file_name_));
+ ASSERT_TRUE(reader.get());
+ EXPECT_EQ(reader->GetVideoCodecType(), codec_type);
+ EXPECT_EQ(reader->GetFramesCount(),
+ spatial_layers_count * static_cast<size_t>(kNumFrames));
+ for (int i = 1; i <= kNumFrames; ++i) {
+ ASSERT_TRUE(reader->HasMoreFrames());
+ ValidateFrame(reader->NextFrame(), i, use_capture_tims_ms,
+ spatial_layers_count);
+ EXPECT_FALSE(reader->HasError());
+ }
+ EXPECT_FALSE(reader->HasMoreFrames());
+ EXPECT_FALSE(reader->NextFrame());
+ EXPECT_FALSE(reader->HasError());
+ ASSERT_TRUE(reader->Close());
+ }
+
+ std::string file_name_;
+};
+
+TEST_F(IvfFileReaderTest, BasicVp8FileNtpTimestamp) {
+ CreateTestFile(kVideoCodecVP8, false, 1);
+ ValidateContent(kVideoCodecVP8, false, 1);
+}
+
+TEST_F(IvfFileReaderTest, BasicVP8FileMsTimestamp) {
+ CreateTestFile(kVideoCodecVP8, true, 1);
+ ValidateContent(kVideoCodecVP8, true, 1);
+}
+
+TEST_F(IvfFileReaderTest, BasicVP9FileNtpTimestamp) {
+ CreateTestFile(kVideoCodecVP9, false, 1);
+ ValidateContent(kVideoCodecVP9, false, 1);
+}
+
+TEST_F(IvfFileReaderTest, BasicVP9FileMsTimestamp) {
+ CreateTestFile(kVideoCodecVP9, true, 1);
+ ValidateContent(kVideoCodecVP9, true, 1);
+}
+
+TEST_F(IvfFileReaderTest, BasicAv1FileNtpTimestamp) {
+ CreateTestFile(kVideoCodecAV1, false, 1);
+ ValidateContent(kVideoCodecAV1, false, 1);
+}
+
+TEST_F(IvfFileReaderTest, BasicAv1FileMsTimestamp) {
+ CreateTestFile(kVideoCodecAV1, true, 1);
+ ValidateContent(kVideoCodecAV1, true, 1);
+}
+
+TEST_F(IvfFileReaderTest, BasicH264FileNtpTimestamp) {
+ CreateTestFile(kVideoCodecH264, false, 1);
+ ValidateContent(kVideoCodecH264, false, 1);
+}
+
+TEST_F(IvfFileReaderTest, BasicH264FileMsTimestamp) {
+ CreateTestFile(kVideoCodecH264, true, 1);
+ ValidateContent(kVideoCodecH264, true, 1);
+}
+
+TEST_F(IvfFileReaderTest, MultilayerVp8FileNtpTimestamp) {
+ CreateTestFile(kVideoCodecVP8, false, 3);
+ ValidateContent(kVideoCodecVP8, false, 3);
+}
+
+TEST_F(IvfFileReaderTest, MultilayerVP9FileNtpTimestamp) {
+ CreateTestFile(kVideoCodecVP9, false, 3);
+ ValidateContent(kVideoCodecVP9, false, 3);
+}
+
+TEST_F(IvfFileReaderTest, MultilayerAv1FileNtpTimestamp) {
+ CreateTestFile(kVideoCodecAV1, false, 3);
+ ValidateContent(kVideoCodecAV1, false, 3);
+}
+
+TEST_F(IvfFileReaderTest, MultilayerH264FileNtpTimestamp) {
+ CreateTestFile(kVideoCodecH264, false, 3);
+ ValidateContent(kVideoCodecH264, false, 3);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/utility/ivf_file_writer.cc b/third_party/libwebrtc/modules/video_coding/utility/ivf_file_writer.cc
new file mode 100644
index 0000000000..5b27ef3ef7
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/utility/ivf_file_writer.cc
@@ -0,0 +1,245 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/utility/ivf_file_writer.h"
+
+#include <utility>
+
+#include "api/video_codecs/video_codec.h"
+#include "modules/rtp_rtcp/source/byte_io.h"
+#include "modules/video_coding/utility/ivf_defines.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+
+// TODO(palmkvist): make logging more informative in the absence of a file name
+// (or get one)
+
+namespace webrtc {
+
+namespace {
+
+constexpr int kDefaultWidth = 1280;
+constexpr int kDefaultHeight = 720;
+} // namespace
+
+IvfFileWriter::IvfFileWriter(FileWrapper file, size_t byte_limit)
+ : codec_type_(kVideoCodecGeneric),
+ bytes_written_(0),
+ byte_limit_(byte_limit),
+ num_frames_(0),
+ width_(0),
+ height_(0),
+ last_timestamp_(-1),
+ using_capture_timestamps_(false),
+ file_(std::move(file)) {
+ RTC_DCHECK(byte_limit == 0 || webrtc::kIvfHeaderSize <= byte_limit)
+ << "The byte_limit is too low, not even the header will fit.";
+}
+
+IvfFileWriter::~IvfFileWriter() {
+ Close();
+}
+
+std::unique_ptr<IvfFileWriter> IvfFileWriter::Wrap(FileWrapper file,
+ size_t byte_limit) {
+ return std::unique_ptr<IvfFileWriter>(
+ new IvfFileWriter(std::move(file), byte_limit));
+}
+
+bool IvfFileWriter::WriteHeader() {
+ if (!file_.Rewind()) {
+ RTC_LOG(LS_WARNING) << "Unable to rewind ivf output file.";
+ return false;
+ }
+
+ uint8_t ivf_header[webrtc::kIvfHeaderSize] = {0};
+ ivf_header[0] = 'D';
+ ivf_header[1] = 'K';
+ ivf_header[2] = 'I';
+ ivf_header[3] = 'F';
+ ByteWriter<uint16_t>::WriteLittleEndian(&ivf_header[4], 0); // Version.
+ ByteWriter<uint16_t>::WriteLittleEndian(&ivf_header[6], 32); // Header size.
+
+ switch (codec_type_) {
+ case kVideoCodecVP8:
+ ivf_header[8] = 'V';
+ ivf_header[9] = 'P';
+ ivf_header[10] = '8';
+ ivf_header[11] = '0';
+ break;
+ case kVideoCodecVP9:
+ ivf_header[8] = 'V';
+ ivf_header[9] = 'P';
+ ivf_header[10] = '9';
+ ivf_header[11] = '0';
+ break;
+ case kVideoCodecAV1:
+ ivf_header[8] = 'A';
+ ivf_header[9] = 'V';
+ ivf_header[10] = '0';
+ ivf_header[11] = '1';
+ break;
+ case kVideoCodecH264:
+ ivf_header[8] = 'H';
+ ivf_header[9] = '2';
+ ivf_header[10] = '6';
+ ivf_header[11] = '4';
+ break;
+ default:
+ // For unknown codec type use **** code. You can specify actual payload
+ // format when playing the video with ffplay: ffplay -f H263 file.ivf
+ ivf_header[8] = '*';
+ ivf_header[9] = '*';
+ ivf_header[10] = '*';
+ ivf_header[11] = '*';
+ break;
+ }
+
+ ByteWriter<uint16_t>::WriteLittleEndian(&ivf_header[12], width_);
+ ByteWriter<uint16_t>::WriteLittleEndian(&ivf_header[14], height_);
+ // Render timestamps are in ms (1/1000 scale), while RTP timestamps use a
+ // 90kHz clock.
+ ByteWriter<uint32_t>::WriteLittleEndian(
+ &ivf_header[16], using_capture_timestamps_ ? 1000 : 90000);
+ ByteWriter<uint32_t>::WriteLittleEndian(&ivf_header[20], 1);
+ ByteWriter<uint32_t>::WriteLittleEndian(&ivf_header[24],
+ static_cast<uint32_t>(num_frames_));
+ ByteWriter<uint32_t>::WriteLittleEndian(&ivf_header[28], 0); // Reserved.
+
+ if (!file_.Write(ivf_header, webrtc::kIvfHeaderSize)) {
+ RTC_LOG(LS_ERROR) << "Unable to write IVF header for ivf output file.";
+ return false;
+ }
+
+ if (bytes_written_ < webrtc::kIvfHeaderSize) {
+ bytes_written_ = webrtc::kIvfHeaderSize;
+ }
+
+ return true;
+}
+
+bool IvfFileWriter::InitFromFirstFrame(const EncodedImage& encoded_image,
+ VideoCodecType codec_type) {
+ if (encoded_image._encodedWidth == 0 || encoded_image._encodedHeight == 0) {
+ width_ = kDefaultWidth;
+ height_ = kDefaultHeight;
+ } else {
+ width_ = encoded_image._encodedWidth;
+ height_ = encoded_image._encodedHeight;
+ }
+
+ using_capture_timestamps_ = encoded_image.Timestamp() == 0;
+
+ codec_type_ = codec_type;
+
+ if (!WriteHeader())
+ return false;
+
+ const char* codec_name = CodecTypeToPayloadString(codec_type_);
+ RTC_LOG(LS_WARNING) << "Created IVF file for codec data of type "
+ << codec_name << " at resolution " << width_ << " x "
+ << height_ << ", using "
+ << (using_capture_timestamps_ ? "1" : "90")
+ << "kHz clock resolution.";
+ return true;
+}
+
+bool IvfFileWriter::WriteFrame(const EncodedImage& encoded_image,
+ VideoCodecType codec_type) {
+ if (!file_.is_open())
+ return false;
+
+ if (num_frames_ == 0 && !InitFromFirstFrame(encoded_image, codec_type))
+ return false;
+ RTC_DCHECK_EQ(codec_type_, codec_type);
+
+ if ((encoded_image._encodedWidth > 0 || encoded_image._encodedHeight > 0) &&
+ (encoded_image._encodedHeight != height_ ||
+ encoded_image._encodedWidth != width_)) {
+ RTC_LOG(LS_WARNING)
+ << "Incoming frame has resolution different from previous: (" << width_
+ << "x" << height_ << ") -> (" << encoded_image._encodedWidth << "x"
+ << encoded_image._encodedHeight << ")";
+ }
+
+ int64_t timestamp = using_capture_timestamps_
+ ? encoded_image.capture_time_ms_
+ : wrap_handler_.Unwrap(encoded_image.Timestamp());
+ if (last_timestamp_ != -1 && timestamp <= last_timestamp_) {
+ RTC_LOG(LS_WARNING) << "Timestamp no increasing: " << last_timestamp_
+ << " -> " << timestamp;
+ }
+ last_timestamp_ = timestamp;
+
+ bool written_frames = false;
+ size_t max_sl_index = encoded_image.SpatialIndex().value_or(0);
+ const uint8_t* data = encoded_image.data();
+ for (size_t sl_idx = 0; sl_idx <= max_sl_index; ++sl_idx) {
+ size_t cur_size = encoded_image.SpatialLayerFrameSize(sl_idx).value_or(0);
+ if (cur_size > 0) {
+ written_frames = true;
+ if (!WriteOneSpatialLayer(timestamp, data, cur_size)) {
+ return false;
+ }
+ data += cur_size;
+ }
+ }
+
+ // If frame has only one spatial layer it won't have any spatial layers'
+ // sizes. Therefore this case should be addressed separately.
+ if (!written_frames) {
+ return WriteOneSpatialLayer(timestamp, data, encoded_image.size());
+ } else {
+ return true;
+ }
+}
+
+bool IvfFileWriter::WriteOneSpatialLayer(int64_t timestamp,
+ const uint8_t* data,
+ size_t size) {
+ const size_t kFrameHeaderSize = 12;
+ if (byte_limit_ != 0 &&
+ bytes_written_ + kFrameHeaderSize + size > byte_limit_) {
+ RTC_LOG(LS_WARNING) << "Closing IVF file due to reaching size limit: "
+ << byte_limit_ << " bytes.";
+ Close();
+ return false;
+ }
+ uint8_t frame_header[kFrameHeaderSize] = {};
+ ByteWriter<uint32_t>::WriteLittleEndian(&frame_header[0],
+ static_cast<uint32_t>(size));
+ ByteWriter<uint64_t>::WriteLittleEndian(&frame_header[4], timestamp);
+ if (!file_.Write(frame_header, kFrameHeaderSize) ||
+ !file_.Write(data, size)) {
+ RTC_LOG(LS_ERROR) << "Unable to write frame to file.";
+ return false;
+ }
+
+ bytes_written_ += kFrameHeaderSize + size;
+
+ ++num_frames_;
+ return true;
+}
+
+bool IvfFileWriter::Close() {
+ if (!file_.is_open())
+ return false;
+
+ if (num_frames_ == 0) {
+ file_.Close();
+ return true;
+ }
+
+ bool ret = WriteHeader();
+ file_.Close();
+ return ret;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/utility/ivf_file_writer.h b/third_party/libwebrtc/modules/video_coding/utility/ivf_file_writer.h
new file mode 100644
index 0000000000..ec8a7bf9e1
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/utility/ivf_file_writer.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_UTILITY_IVF_FILE_WRITER_H_
+#define MODULES_VIDEO_CODING_UTILITY_IVF_FILE_WRITER_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <memory>
+
+#include "api/video/encoded_image.h"
+#include "api/video/video_codec_type.h"
+#include "rtc_base/numerics/sequence_number_unwrapper.h"
+#include "rtc_base/system/file_wrapper.h"
+
+namespace webrtc {
+
+class IvfFileWriter {
+ public:
+ // Takes ownership of the file, which will be closed either through
+ // Close or ~IvfFileWriter. If writing a frame would take the file above the
+ // `byte_limit` the file will be closed, the write (and all future writes)
+ // will fail. A `byte_limit` of 0 is equivalent to no limit.
+ static std::unique_ptr<IvfFileWriter> Wrap(FileWrapper file,
+ size_t byte_limit);
+ ~IvfFileWriter();
+
+ IvfFileWriter(const IvfFileWriter&) = delete;
+ IvfFileWriter& operator=(const IvfFileWriter&) = delete;
+
+ bool WriteFrame(const EncodedImage& encoded_image, VideoCodecType codec_type);
+ bool Close();
+
+ private:
+ explicit IvfFileWriter(FileWrapper file, size_t byte_limit);
+
+ bool WriteHeader();
+ bool InitFromFirstFrame(const EncodedImage& encoded_image,
+ VideoCodecType codec_type);
+ bool WriteOneSpatialLayer(int64_t timestamp,
+ const uint8_t* data,
+ size_t size);
+
+ VideoCodecType codec_type_;
+ size_t bytes_written_;
+ size_t byte_limit_;
+ size_t num_frames_;
+ uint16_t width_;
+ uint16_t height_;
+ int64_t last_timestamp_;
+ bool using_capture_timestamps_;
+ RtpTimestampUnwrapper wrap_handler_;
+ FileWrapper file_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_UTILITY_IVF_FILE_WRITER_H_
diff --git a/third_party/libwebrtc/modules/video_coding/utility/ivf_file_writer_unittest.cc b/third_party/libwebrtc/modules/video_coding/utility/ivf_file_writer_unittest.cc
new file mode 100644
index 0000000000..c5d30a1286
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/utility/ivf_file_writer_unittest.cc
@@ -0,0 +1,311 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/utility/ivf_file_writer.h"
+
+#include <string.h>
+
+#include <memory>
+#include <string>
+
+#include "modules/rtp_rtcp/source/byte_io.h"
+#include "test/gtest.h"
+#include "test/testsupport/file_utils.h"
+
+namespace webrtc {
+
+namespace {
+static const int kHeaderSize = 32;
+static const int kFrameHeaderSize = 12;
+static uint8_t dummy_payload[4] = {0, 1, 2, 3};
+// As the default parameter when the width and height of encodedImage are 0,
+// the values are copied from ivf_file_writer.cc
+constexpr int kDefaultWidth = 1280;
+constexpr int kDefaultHeight = 720;
+} // namespace
+
+class IvfFileWriterTest : public ::testing::Test {
+ protected:
+ void SetUp() override {
+ file_name_ =
+ webrtc::test::TempFilename(webrtc::test::OutputPath(), "test_file");
+ }
+ void TearDown() override { webrtc::test::RemoveFile(file_name_); }
+
+ bool WriteDummyTestFrames(VideoCodecType codec_type,
+ int width,
+ int height,
+ int num_frames,
+ bool use_capture_tims_ms) {
+ EncodedImage frame;
+ frame.SetEncodedData(
+ EncodedImageBuffer::Create(dummy_payload, sizeof(dummy_payload)));
+ frame._encodedWidth = width;
+ frame._encodedHeight = height;
+ for (int i = 1; i <= num_frames; ++i) {
+ frame.set_size(i % sizeof(dummy_payload));
+ if (use_capture_tims_ms) {
+ frame.capture_time_ms_ = i;
+ } else {
+ frame.SetTimestamp(i);
+ }
+ if (!file_writer_->WriteFrame(frame, codec_type))
+ return false;
+ }
+ return true;
+ }
+
+ void VerifyIvfHeader(FileWrapper* file,
+ const uint8_t fourcc[4],
+ int width,
+ int height,
+ uint32_t num_frames,
+ bool use_capture_tims_ms) {
+ ASSERT_TRUE(file->is_open());
+ uint8_t data[kHeaderSize];
+ ASSERT_EQ(static_cast<size_t>(kHeaderSize), file->Read(data, kHeaderSize));
+
+ uint8_t dkif[4] = {'D', 'K', 'I', 'F'};
+ EXPECT_EQ(0, memcmp(dkif, data, 4));
+ EXPECT_EQ(0u, ByteReader<uint16_t>::ReadLittleEndian(&data[4]));
+ EXPECT_EQ(32u, ByteReader<uint16_t>::ReadLittleEndian(&data[6]));
+ EXPECT_EQ(0, memcmp(fourcc, &data[8], 4));
+ EXPECT_EQ(width, ByteReader<uint16_t>::ReadLittleEndian(&data[12]));
+ EXPECT_EQ(height, ByteReader<uint16_t>::ReadLittleEndian(&data[14]));
+ EXPECT_EQ(use_capture_tims_ms ? 1000u : 90000u,
+ ByteReader<uint32_t>::ReadLittleEndian(&data[16]));
+ EXPECT_EQ(1u, ByteReader<uint32_t>::ReadLittleEndian(&data[20]));
+ EXPECT_EQ(num_frames, ByteReader<uint32_t>::ReadLittleEndian(&data[24]));
+ EXPECT_EQ(0u, ByteReader<uint32_t>::ReadLittleEndian(&data[28]));
+ }
+
+ void VerifyDummyTestFrames(FileWrapper* file, uint32_t num_frames) {
+ const int kMaxFrameSize = 4;
+ for (uint32_t i = 1; i <= num_frames; ++i) {
+ uint8_t frame_header[kFrameHeaderSize];
+ ASSERT_EQ(static_cast<unsigned int>(kFrameHeaderSize),
+ file->Read(frame_header, kFrameHeaderSize));
+ uint32_t frame_length =
+ ByteReader<uint32_t>::ReadLittleEndian(&frame_header[0]);
+ EXPECT_EQ(i % 4, frame_length);
+ uint64_t timestamp =
+ ByteReader<uint64_t>::ReadLittleEndian(&frame_header[4]);
+ EXPECT_EQ(i, timestamp);
+
+ uint8_t data[kMaxFrameSize] = {};
+ ASSERT_EQ(frame_length,
+ static_cast<uint32_t>(file->Read(data, frame_length)));
+ EXPECT_EQ(0, memcmp(data, dummy_payload, frame_length));
+ }
+ }
+
+ void RunBasicFileStructureTest(VideoCodecType codec_type,
+ const uint8_t fourcc[4],
+ bool use_capture_tims_ms) {
+ file_writer_ =
+ IvfFileWriter::Wrap(FileWrapper::OpenWriteOnly(file_name_), 0);
+ ASSERT_TRUE(file_writer_.get());
+ const int kWidth = 320;
+ const int kHeight = 240;
+ const int kNumFrames = 257;
+ ASSERT_TRUE(WriteDummyTestFrames(codec_type, kWidth, kHeight, kNumFrames,
+ use_capture_tims_ms));
+ EXPECT_TRUE(file_writer_->Close());
+
+ FileWrapper out_file = FileWrapper::OpenReadOnly(file_name_);
+ VerifyIvfHeader(&out_file, fourcc, kWidth, kHeight, kNumFrames,
+ use_capture_tims_ms);
+ VerifyDummyTestFrames(&out_file, kNumFrames);
+
+ out_file.Close();
+ }
+
+ std::string file_name_;
+ std::unique_ptr<IvfFileWriter> file_writer_;
+};
+
+TEST_F(IvfFileWriterTest, WritesBasicVP8FileNtpTimestamp) {
+ const uint8_t fourcc[4] = {'V', 'P', '8', '0'};
+ RunBasicFileStructureTest(kVideoCodecVP8, fourcc, false);
+}
+
+TEST_F(IvfFileWriterTest, WritesBasicVP8FileMsTimestamp) {
+ const uint8_t fourcc[4] = {'V', 'P', '8', '0'};
+ RunBasicFileStructureTest(kVideoCodecVP8, fourcc, true);
+}
+
+TEST_F(IvfFileWriterTest, WritesBasicVP9FileNtpTimestamp) {
+ const uint8_t fourcc[4] = {'V', 'P', '9', '0'};
+ RunBasicFileStructureTest(kVideoCodecVP9, fourcc, false);
+}
+
+TEST_F(IvfFileWriterTest, WritesBasicVP9FileMsTimestamp) {
+ const uint8_t fourcc[4] = {'V', 'P', '9', '0'};
+ RunBasicFileStructureTest(kVideoCodecVP9, fourcc, true);
+}
+
+TEST_F(IvfFileWriterTest, WritesBasicAv1FileNtpTimestamp) {
+ const uint8_t fourcc[4] = {'A', 'V', '0', '1'};
+ RunBasicFileStructureTest(kVideoCodecAV1, fourcc, false);
+}
+
+TEST_F(IvfFileWriterTest, WritesBasicAv1FileMsTimestamp) {
+ const uint8_t fourcc[4] = {'A', 'V', '0', '1'};
+ RunBasicFileStructureTest(kVideoCodecAV1, fourcc, true);
+}
+
+TEST_F(IvfFileWriterTest, WritesBasicH264FileNtpTimestamp) {
+ const uint8_t fourcc[4] = {'H', '2', '6', '4'};
+ RunBasicFileStructureTest(kVideoCodecH264, fourcc, false);
+}
+
+TEST_F(IvfFileWriterTest, WritesBasicH264FileMsTimestamp) {
+ const uint8_t fourcc[4] = {'H', '2', '6', '4'};
+ RunBasicFileStructureTest(kVideoCodecH264, fourcc, true);
+}
+
+TEST_F(IvfFileWriterTest, WritesBasicUnknownCodecFileMsTimestamp) {
+ const uint8_t fourcc[4] = {'*', '*', '*', '*'};
+ RunBasicFileStructureTest(kVideoCodecGeneric, fourcc, true);
+}
+
+TEST_F(IvfFileWriterTest, ClosesWhenReachesLimit) {
+ const uint8_t fourcc[4] = {'V', 'P', '8', '0'};
+ const int kWidth = 320;
+ const int kHeight = 240;
+ const int kNumFramesToWrite = 2;
+ const int kNumFramesToFit = 1;
+
+ file_writer_ = IvfFileWriter::Wrap(
+ FileWrapper::OpenWriteOnly(file_name_),
+ kHeaderSize +
+ kNumFramesToFit * (kFrameHeaderSize + sizeof(dummy_payload)));
+ ASSERT_TRUE(file_writer_.get());
+
+ ASSERT_FALSE(WriteDummyTestFrames(kVideoCodecVP8, kWidth, kHeight,
+ kNumFramesToWrite, true));
+ ASSERT_FALSE(file_writer_->Close());
+
+ FileWrapper out_file = FileWrapper::OpenReadOnly(file_name_);
+ VerifyIvfHeader(&out_file, fourcc, kWidth, kHeight, kNumFramesToFit, true);
+ VerifyDummyTestFrames(&out_file, kNumFramesToFit);
+
+ out_file.Close();
+}
+
+TEST_F(IvfFileWriterTest, UseDefaultValueWhenWidthAndHeightAreZero) {
+ const uint8_t fourcc[4] = {'V', 'P', '8', '0'};
+ const int kWidth = 0;
+ const int kHeight = 0;
+ const int kNumFramesToWrite = 2;
+ const int kNumFramesToFit = 1;
+
+ file_writer_ = IvfFileWriter::Wrap(
+ FileWrapper::OpenWriteOnly(file_name_),
+ kHeaderSize +
+ kNumFramesToFit * (kFrameHeaderSize + sizeof(dummy_payload)));
+ ASSERT_TRUE(file_writer_.get());
+
+ ASSERT_FALSE(WriteDummyTestFrames(kVideoCodecVP8, kWidth, kHeight,
+ kNumFramesToWrite, true));
+ ASSERT_FALSE(file_writer_->Close());
+
+ FileWrapper out_file = FileWrapper::OpenReadOnly(file_name_);
+ // When the width and height are zero, we should expect the width and height
+ // in IvfHeader to be kDefaultWidth and kDefaultHeight instead of kWidth and
+ // kHeight.
+ VerifyIvfHeader(&out_file, fourcc, kDefaultWidth, kDefaultHeight,
+ kNumFramesToFit, true);
+ VerifyDummyTestFrames(&out_file, kNumFramesToFit);
+
+ out_file.Close();
+}
+
+TEST_F(IvfFileWriterTest, UseDefaultValueWhenOnlyWidthIsZero) {
+ const uint8_t fourcc[4] = {'V', 'P', '8', '0'};
+ const int kWidth = 0;
+ const int kHeight = 360;
+ const int kNumFramesToWrite = 2;
+ const int kNumFramesToFit = 1;
+
+ file_writer_ = IvfFileWriter::Wrap(
+ FileWrapper::OpenWriteOnly(file_name_),
+ kHeaderSize +
+ kNumFramesToFit * (kFrameHeaderSize + sizeof(dummy_payload)));
+ ASSERT_TRUE(file_writer_.get());
+
+ ASSERT_FALSE(WriteDummyTestFrames(kVideoCodecVP8, kWidth, kHeight,
+ kNumFramesToWrite, true));
+ ASSERT_FALSE(file_writer_->Close());
+
+ FileWrapper out_file = FileWrapper::OpenReadOnly(file_name_);
+ // When the width and height are zero, we should expect the width and height
+ // in IvfHeader to be kDefaultWidth and kDefaultHeight instead of kWidth and
+ // kHeight.
+ VerifyIvfHeader(&out_file, fourcc, kDefaultWidth, kDefaultHeight,
+ kNumFramesToFit, true);
+ VerifyDummyTestFrames(&out_file, kNumFramesToFit);
+
+ out_file.Close();
+}
+
+TEST_F(IvfFileWriterTest, UseDefaultValueWhenOnlyHeightIsZero) {
+ const uint8_t fourcc[4] = {'V', 'P', '8', '0'};
+ const int kWidth = 240;
+ const int kHeight = 0;
+ const int kNumFramesToWrite = 2;
+ const int kNumFramesToFit = 1;
+
+ file_writer_ = IvfFileWriter::Wrap(
+ FileWrapper::OpenWriteOnly(file_name_),
+ kHeaderSize +
+ kNumFramesToFit * (kFrameHeaderSize + sizeof(dummy_payload)));
+ ASSERT_TRUE(file_writer_.get());
+
+ ASSERT_FALSE(WriteDummyTestFrames(kVideoCodecVP8, kWidth, kHeight,
+ kNumFramesToWrite, true));
+ ASSERT_FALSE(file_writer_->Close());
+
+ FileWrapper out_file = FileWrapper::OpenReadOnly(file_name_);
+ // When the width and height are zero, we should expect the width and height
+ // in IvfHeader to be kDefaultWidth and kDefaultHeight instead of kWidth and
+ // kHeight.
+ VerifyIvfHeader(&out_file, fourcc, kDefaultWidth, kDefaultHeight,
+ kNumFramesToFit, true);
+ VerifyDummyTestFrames(&out_file, kNumFramesToFit);
+
+ out_file.Close();
+}
+
+TEST_F(IvfFileWriterTest, UseDefaultValueWhenHeightAndWidthAreNotZero) {
+ const uint8_t fourcc[4] = {'V', 'P', '8', '0'};
+ const int kWidth = 360;
+ const int kHeight = 240;
+ const int kNumFramesToWrite = 2;
+ const int kNumFramesToFit = 1;
+
+ file_writer_ = IvfFileWriter::Wrap(
+ FileWrapper::OpenWriteOnly(file_name_),
+ kHeaderSize +
+ kNumFramesToFit * (kFrameHeaderSize + sizeof(dummy_payload)));
+ ASSERT_TRUE(file_writer_.get());
+
+ ASSERT_FALSE(WriteDummyTestFrames(kVideoCodecVP8, kWidth, kHeight,
+ kNumFramesToWrite, true));
+ ASSERT_FALSE(file_writer_->Close());
+
+ FileWrapper out_file = FileWrapper::OpenReadOnly(file_name_);
+ VerifyIvfHeader(&out_file, fourcc, kWidth, kHeight, kNumFramesToFit, true);
+ VerifyDummyTestFrames(&out_file, kNumFramesToFit);
+
+ out_file.Close();
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/utility/qp_parser.cc b/third_party/libwebrtc/modules/video_coding/utility/qp_parser.cc
new file mode 100644
index 0000000000..18f225447d
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/utility/qp_parser.cc
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/utility/qp_parser.h"
+
+#include "modules/video_coding/utility/vp8_header_parser.h"
+#include "modules/video_coding/utility/vp9_uncompressed_header_parser.h"
+
+namespace webrtc {
+
+absl::optional<uint32_t> QpParser::Parse(VideoCodecType codec_type,
+ size_t spatial_idx,
+ const uint8_t* frame_data,
+ size_t frame_size) {
+ if (frame_data == nullptr || frame_size == 0 ||
+ spatial_idx >= kMaxSimulcastStreams) {
+ return absl::nullopt;
+ }
+
+ if (codec_type == kVideoCodecVP8) {
+ int qp = -1;
+ if (vp8::GetQp(frame_data, frame_size, &qp)) {
+ return qp;
+ }
+ } else if (codec_type == kVideoCodecVP9) {
+ int qp = -1;
+ if (vp9::GetQp(frame_data, frame_size, &qp)) {
+ return qp;
+ }
+ } else if (codec_type == kVideoCodecH264) {
+ return h264_parsers_[spatial_idx].Parse(frame_data, frame_size);
+ }
+
+ return absl::nullopt;
+}
+
+absl::optional<uint32_t> QpParser::H264QpParser::Parse(
+ const uint8_t* frame_data,
+ size_t frame_size) {
+ MutexLock lock(&mutex_);
+ bitstream_parser_.ParseBitstream(
+ rtc::ArrayView<const uint8_t>(frame_data, frame_size));
+ return bitstream_parser_.GetLastSliceQp();
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/utility/qp_parser.h b/third_party/libwebrtc/modules/video_coding/utility/qp_parser.h
new file mode 100644
index 0000000000..f132ff9337
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/utility/qp_parser.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_UTILITY_QP_PARSER_H_
+#define MODULES_VIDEO_CODING_UTILITY_QP_PARSER_H_
+
+#include "absl/types/optional.h"
+#include "api/video/video_codec_constants.h"
+#include "api/video/video_codec_type.h"
+#include "common_video/h264/h264_bitstream_parser.h"
+#include "rtc_base/synchronization/mutex.h"
+
+namespace webrtc {
+class QpParser {
+ public:
+ absl::optional<uint32_t> Parse(VideoCodecType codec_type,
+ size_t spatial_idx,
+ const uint8_t* frame_data,
+ size_t frame_size);
+
+ private:
+ // A thread safe wrapper for H264 bitstream parser.
+ class H264QpParser {
+ public:
+ absl::optional<uint32_t> Parse(const uint8_t* frame_data,
+ size_t frame_size);
+
+ private:
+ Mutex mutex_;
+ H264BitstreamParser bitstream_parser_ RTC_GUARDED_BY(mutex_);
+ };
+
+ H264QpParser h264_parsers_[kMaxSimulcastStreams];
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_UTILITY_QP_PARSER_H_
diff --git a/third_party/libwebrtc/modules/video_coding/utility/qp_parser_unittest.cc b/third_party/libwebrtc/modules/video_coding/utility/qp_parser_unittest.cc
new file mode 100644
index 0000000000..1131288f26
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/utility/qp_parser_unittest.cc
@@ -0,0 +1,118 @@
+/*
+ * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/utility/qp_parser.h"
+
+#include <stddef.h>
+
+#include "test/gtest.h"
+
+namespace webrtc {
+
+namespace {
+// ffmpeg -s 16x16 -f rawvideo -pix_fmt rgb24 -r 30 -i /dev/zero -c:v libvpx
+// -qmin 20 -qmax 20 -crf 20 -frames:v 1 -y out.ivf
+const uint8_t kCodedFrameVp8Qp25[] = {
+ 0x10, 0x02, 0x00, 0x9d, 0x01, 0x2a, 0x10, 0x00, 0x10, 0x00,
+ 0x02, 0x47, 0x08, 0x85, 0x85, 0x88, 0x85, 0x84, 0x88, 0x0c,
+ 0x82, 0x00, 0x0c, 0x0d, 0x60, 0x00, 0xfe, 0xfc, 0x5c, 0xd0};
+
+// ffmpeg -s 16x16 -f rawvideo -pix_fmt rgb24 -r 30 -i /dev/zero -c:v libvpx-vp9
+// -qmin 24 -qmax 24 -crf 24 -frames:v 1 -y out.ivf
+const uint8_t kCodedFrameVp9Qp96[] = {
+ 0xa2, 0x49, 0x83, 0x42, 0xe0, 0x00, 0xf0, 0x00, 0xf6, 0x00,
+ 0x38, 0x24, 0x1c, 0x18, 0xc0, 0x00, 0x00, 0x30, 0x70, 0x00,
+ 0x00, 0x4a, 0xa7, 0xff, 0xfc, 0xb9, 0x01, 0xbf, 0xff, 0xff,
+ 0x97, 0x20, 0xdb, 0xff, 0xff, 0xcb, 0x90, 0x5d, 0x40};
+
+// ffmpeg -s 16x16 -f rawvideo -pix_fmt yuv420p -r 30 -i /dev/zero -c:v libx264
+// -qmin 38 -qmax 38 -crf 38 -profile:v baseline -frames:v 2 -y out.264
+const uint8_t kCodedFrameH264SpsPpsIdrQp38[] = {
+ 0x00, 0x00, 0x00, 0x01, 0x67, 0x42, 0xc0, 0x0a, 0xd9, 0x1e, 0x84,
+ 0x00, 0x00, 0x03, 0x00, 0x04, 0x00, 0x00, 0x03, 0x00, 0xf0, 0x3c,
+ 0x48, 0x99, 0x20, 0x00, 0x00, 0x00, 0x01, 0x68, 0xcb, 0x80, 0xc4,
+ 0xb2, 0x00, 0x00, 0x01, 0x65, 0x88, 0x84, 0xf1, 0x18, 0xa0, 0x00,
+ 0x20, 0x5b, 0x1c, 0x00, 0x04, 0x07, 0xe3, 0x80, 0x00, 0x80, 0xfe};
+
+const uint8_t kCodedFrameH264SpsPpsIdrQp49[] = {
+ 0x00, 0x00, 0x00, 0x01, 0x67, 0x42, 0xc0, 0x0a, 0xd9, 0x1e, 0x84,
+ 0x00, 0x00, 0x03, 0x00, 0x04, 0x00, 0x00, 0x03, 0x00, 0xf0, 0x3c,
+ 0x48, 0x99, 0x20, 0x00, 0x00, 0x00, 0x01, 0x68, 0xcb, 0x80, 0x5d,
+ 0x2c, 0x80, 0x00, 0x00, 0x01, 0x65, 0x88, 0x84, 0xf1, 0x18, 0xa0,
+ 0x00, 0x5e, 0x38, 0x00, 0x08, 0x03, 0xc7, 0x00, 0x01, 0x00, 0x7c};
+
+const uint8_t kCodedFrameH264InterSliceQpDelta0[] = {0x00, 0x00, 0x00, 0x01,
+ 0x41, 0x9a, 0x39, 0xea};
+
+} // namespace
+
+TEST(QpParserTest, ParseQpVp8) {
+ QpParser parser;
+ absl::optional<uint32_t> qp = parser.Parse(
+ kVideoCodecVP8, 0, kCodedFrameVp8Qp25, sizeof(kCodedFrameVp8Qp25));
+ EXPECT_EQ(qp, 25u);
+}
+
+TEST(QpParserTest, ParseQpVp9) {
+ QpParser parser;
+ absl::optional<uint32_t> qp = parser.Parse(
+ kVideoCodecVP9, 0, kCodedFrameVp9Qp96, sizeof(kCodedFrameVp9Qp96));
+ EXPECT_EQ(qp, 96u);
+}
+
+TEST(QpParserTest, ParseQpH264) {
+ QpParser parser;
+ absl::optional<uint32_t> qp = parser.Parse(
+ VideoCodecType::kVideoCodecH264, 0, kCodedFrameH264SpsPpsIdrQp38,
+ sizeof(kCodedFrameH264SpsPpsIdrQp38));
+ EXPECT_EQ(qp, 38u);
+
+ qp = parser.Parse(kVideoCodecH264, 1, kCodedFrameH264SpsPpsIdrQp49,
+ sizeof(kCodedFrameH264SpsPpsIdrQp49));
+ EXPECT_EQ(qp, 49u);
+
+ qp = parser.Parse(kVideoCodecH264, 0, kCodedFrameH264InterSliceQpDelta0,
+ sizeof(kCodedFrameH264InterSliceQpDelta0));
+ EXPECT_EQ(qp, 38u);
+
+ qp = parser.Parse(kVideoCodecH264, 1, kCodedFrameH264InterSliceQpDelta0,
+ sizeof(kCodedFrameH264InterSliceQpDelta0));
+ EXPECT_EQ(qp, 49u);
+}
+
+TEST(QpParserTest, ParseQpUnsupportedCodecType) {
+ QpParser parser;
+ absl::optional<uint32_t> qp = parser.Parse(
+ kVideoCodecGeneric, 0, kCodedFrameVp8Qp25, sizeof(kCodedFrameVp8Qp25));
+ EXPECT_FALSE(qp.has_value());
+}
+
+TEST(QpParserTest, ParseQpNullData) {
+ QpParser parser;
+ absl::optional<uint32_t> qp = parser.Parse(kVideoCodecVP8, 0, nullptr, 100);
+ EXPECT_FALSE(qp.has_value());
+}
+
+TEST(QpParserTest, ParseQpEmptyData) {
+ QpParser parser;
+ absl::optional<uint32_t> qp =
+ parser.Parse(kVideoCodecVP8, 0, kCodedFrameVp8Qp25, 0);
+ EXPECT_FALSE(qp.has_value());
+}
+
+TEST(QpParserTest, ParseQpSpatialIdxExceedsMax) {
+ QpParser parser;
+ absl::optional<uint32_t> qp =
+ parser.Parse(kVideoCodecVP8, kMaxSimulcastStreams, kCodedFrameVp8Qp25,
+ sizeof(kCodedFrameVp8Qp25));
+ EXPECT_FALSE(qp.has_value());
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/utility/quality_scaler.cc b/third_party/libwebrtc/modules/video_coding/utility/quality_scaler.cc
new file mode 100644
index 0000000000..9fb41a0ad7
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/utility/quality_scaler.cc
@@ -0,0 +1,334 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/utility/quality_scaler.h"
+
+#include <memory>
+#include <utility>
+
+#include "api/units/time_delta.h"
+#include "api/video/video_adaptation_reason.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/experiments/quality_scaler_settings.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/numerics/exp_filter.h"
+#include "rtc_base/weak_ptr.h"
+
+namespace webrtc {
+
+namespace {
+// Threshold constant used until first downscale (to permit fast rampup).
+static const int kMeasureMs = 2000;
+static const float kSamplePeriodScaleFactor = 2.5;
+static const int kFramedropPercentThreshold = 60;
+static const size_t kMinFramesNeededToScale = 2 * 30;
+
+} // namespace
+
+class QualityScaler::QpSmoother {
+ public:
+ explicit QpSmoother(float alpha)
+ : alpha_(alpha),
+ // The initial value of last_sample_ms doesn't matter since the smoother
+ // will ignore the time delta for the first update.
+ last_sample_ms_(0),
+ smoother_(alpha) {}
+
+ absl::optional<int> GetAvg() const {
+ float value = smoother_.filtered();
+ if (value == rtc::ExpFilter::kValueUndefined) {
+ return absl::nullopt;
+ }
+ return static_cast<int>(value);
+ }
+
+ void Add(float sample, int64_t time_sent_us) {
+ int64_t now_ms = time_sent_us / 1000;
+ smoother_.Apply(static_cast<float>(now_ms - last_sample_ms_), sample);
+ last_sample_ms_ = now_ms;
+ }
+
+ void Reset() { smoother_.Reset(alpha_); }
+
+ private:
+ const float alpha_;
+ int64_t last_sample_ms_;
+ rtc::ExpFilter smoother_;
+};
+
+// The QualityScaler checks for QP periodically by queuing CheckQpTasks. The
+// task will either run to completion and trigger a new task being queued, or it
+// will be destroyed because the QualityScaler is destroyed.
+//
+// When high or low QP is reported, the task will be pending until a callback is
+// invoked. This lets the QualityScalerQpUsageHandlerInterface react to QP usage
+// asynchronously and prevents checking for QP until the stream has potentially
+// been reconfigured.
+class QualityScaler::CheckQpTask {
+ public:
+ // The result of one CheckQpTask may influence the delay of the next
+ // CheckQpTask.
+ struct Result {
+ bool observed_enough_frames = false;
+ bool qp_usage_reported = false;
+ };
+
+ CheckQpTask(QualityScaler* quality_scaler, Result previous_task_result)
+ : quality_scaler_(quality_scaler),
+ state_(State::kNotStarted),
+ previous_task_result_(previous_task_result),
+ weak_ptr_factory_(this) {}
+
+ void StartDelayedTask() {
+ RTC_DCHECK_EQ(state_, State::kNotStarted);
+ state_ = State::kCheckingQp;
+ TaskQueueBase::Current()->PostDelayedTask(
+ [this_weak_ptr = weak_ptr_factory_.GetWeakPtr(), this] {
+ if (!this_weak_ptr) {
+ // The task has been cancelled through destruction.
+ return;
+ }
+ RTC_DCHECK_EQ(state_, State::kCheckingQp);
+ RTC_DCHECK_RUN_ON(&quality_scaler_->task_checker_);
+ switch (quality_scaler_->CheckQp()) {
+ case QualityScaler::CheckQpResult::kInsufficientSamples: {
+ result_.observed_enough_frames = false;
+ // After this line, `this` may be deleted.
+ break;
+ }
+ case QualityScaler::CheckQpResult::kNormalQp: {
+ result_.observed_enough_frames = true;
+ break;
+ }
+ case QualityScaler::CheckQpResult::kHighQp: {
+ result_.observed_enough_frames = true;
+ result_.qp_usage_reported = true;
+ quality_scaler_->fast_rampup_ = false;
+ quality_scaler_->handler_->OnReportQpUsageHigh();
+ quality_scaler_->ClearSamples();
+ break;
+ }
+ case QualityScaler::CheckQpResult::kLowQp: {
+ result_.observed_enough_frames = true;
+ result_.qp_usage_reported = true;
+ quality_scaler_->handler_->OnReportQpUsageLow();
+ quality_scaler_->ClearSamples();
+ break;
+ }
+ }
+ state_ = State::kCompleted;
+ // Starting the next task deletes the pending task. After this line,
+ // `this` has been deleted.
+ quality_scaler_->StartNextCheckQpTask();
+ },
+ TimeDelta::Millis(GetCheckingQpDelayMs()));
+ }
+
+ bool HasCompletedTask() const { return state_ == State::kCompleted; }
+
+ Result result() const {
+ RTC_DCHECK(HasCompletedTask());
+ return result_;
+ }
+
+ private:
+ enum class State {
+ kNotStarted,
+ kCheckingQp,
+ kCompleted,
+ };
+
+ // Determines the sampling period of CheckQpTasks.
+ int64_t GetCheckingQpDelayMs() const {
+ RTC_DCHECK_RUN_ON(&quality_scaler_->task_checker_);
+ if (quality_scaler_->fast_rampup_) {
+ return quality_scaler_->sampling_period_ms_;
+ }
+ if (quality_scaler_->experiment_enabled_ &&
+ !previous_task_result_.observed_enough_frames) {
+ // Use half the interval while waiting for enough frames.
+ return quality_scaler_->sampling_period_ms_ / 2;
+ }
+ if (quality_scaler_->scale_factor_ &&
+ !previous_task_result_.qp_usage_reported) {
+ // Last CheckQp did not call AdaptDown/Up, possibly reduce interval.
+ return quality_scaler_->sampling_period_ms_ *
+ quality_scaler_->scale_factor_.value();
+ }
+ return quality_scaler_->sampling_period_ms_ *
+ quality_scaler_->initial_scale_factor_;
+ }
+
+ QualityScaler* const quality_scaler_;
+ State state_;
+ const Result previous_task_result_;
+ Result result_;
+
+ rtc::WeakPtrFactory<CheckQpTask> weak_ptr_factory_;
+};
+
+QualityScaler::QualityScaler(QualityScalerQpUsageHandlerInterface* handler,
+ VideoEncoder::QpThresholds thresholds)
+ : QualityScaler(handler, thresholds, kMeasureMs) {}
+
+// Protected ctor, should not be called directly.
+QualityScaler::QualityScaler(QualityScalerQpUsageHandlerInterface* handler,
+ VideoEncoder::QpThresholds thresholds,
+ int64_t default_sampling_period_ms)
+ : handler_(handler),
+ thresholds_(thresholds),
+ sampling_period_ms_(QualityScalerSettings::ParseFromFieldTrials()
+ .SamplingPeriodMs()
+ .value_or(default_sampling_period_ms)),
+ fast_rampup_(true),
+ // Arbitrarily choose size based on 30 fps for 5 seconds.
+ average_qp_(QualityScalerSettings::ParseFromFieldTrials()
+ .AverageQpWindow()
+ .value_or(5 * 30)),
+ framedrop_percent_media_opt_(5 * 30),
+ framedrop_percent_all_(5 * 30),
+ experiment_enabled_(QualityScalingExperiment::Enabled()),
+ min_frames_needed_(
+ QualityScalerSettings::ParseFromFieldTrials().MinFrames().value_or(
+ kMinFramesNeededToScale)),
+ initial_scale_factor_(QualityScalerSettings::ParseFromFieldTrials()
+ .InitialScaleFactor()
+ .value_or(kSamplePeriodScaleFactor)),
+ scale_factor_(
+ QualityScalerSettings::ParseFromFieldTrials().ScaleFactor()) {
+ RTC_DCHECK_RUN_ON(&task_checker_);
+ if (experiment_enabled_) {
+ config_ = QualityScalingExperiment::GetConfig();
+ qp_smoother_high_.reset(new QpSmoother(config_.alpha_high));
+ qp_smoother_low_.reset(new QpSmoother(config_.alpha_low));
+ }
+ RTC_DCHECK(handler_ != nullptr);
+ StartNextCheckQpTask();
+ RTC_LOG(LS_INFO) << "QP thresholds: low: " << thresholds_.low
+ << ", high: " << thresholds_.high;
+}
+
+QualityScaler::~QualityScaler() {
+ RTC_DCHECK_RUN_ON(&task_checker_);
+}
+
+void QualityScaler::StartNextCheckQpTask() {
+ RTC_DCHECK_RUN_ON(&task_checker_);
+ RTC_DCHECK(!pending_qp_task_ || pending_qp_task_->HasCompletedTask())
+ << "A previous CheckQpTask has not completed yet!";
+ CheckQpTask::Result previous_task_result;
+ if (pending_qp_task_) {
+ previous_task_result = pending_qp_task_->result();
+ }
+ pending_qp_task_ = std::make_unique<CheckQpTask>(this, previous_task_result);
+ pending_qp_task_->StartDelayedTask();
+}
+
+void QualityScaler::SetQpThresholds(VideoEncoder::QpThresholds thresholds) {
+ RTC_DCHECK_RUN_ON(&task_checker_);
+ thresholds_ = thresholds;
+}
+
+void QualityScaler::ReportDroppedFrameByMediaOpt() {
+ RTC_DCHECK_RUN_ON(&task_checker_);
+ framedrop_percent_media_opt_.AddSample(100);
+ framedrop_percent_all_.AddSample(100);
+}
+
+void QualityScaler::ReportDroppedFrameByEncoder() {
+ RTC_DCHECK_RUN_ON(&task_checker_);
+ framedrop_percent_all_.AddSample(100);
+}
+
+void QualityScaler::ReportQp(int qp, int64_t time_sent_us) {
+ RTC_DCHECK_RUN_ON(&task_checker_);
+ framedrop_percent_media_opt_.AddSample(0);
+ framedrop_percent_all_.AddSample(0);
+ average_qp_.AddSample(qp);
+ if (qp_smoother_high_)
+ qp_smoother_high_->Add(qp, time_sent_us);
+ if (qp_smoother_low_)
+ qp_smoother_low_->Add(qp, time_sent_us);
+}
+
+bool QualityScaler::QpFastFilterLow() const {
+ RTC_DCHECK_RUN_ON(&task_checker_);
+ size_t num_frames = config_.use_all_drop_reasons
+ ? framedrop_percent_all_.Size()
+ : framedrop_percent_media_opt_.Size();
+ const size_t kMinNumFrames = 10;
+ if (num_frames < kMinNumFrames) {
+ return false; // Wait for more frames before making a decision.
+ }
+ absl::optional<int> avg_qp_high = qp_smoother_high_
+ ? qp_smoother_high_->GetAvg()
+ : average_qp_.GetAverageRoundedDown();
+ return (avg_qp_high) ? (avg_qp_high.value() <= thresholds_.low) : false;
+}
+
+QualityScaler::CheckQpResult QualityScaler::CheckQp() const {
+ RTC_DCHECK_RUN_ON(&task_checker_);
+ // Should be set through InitEncode -> Should be set by now.
+ RTC_DCHECK_GE(thresholds_.low, 0);
+
+ // If we have not observed at least this many frames we can't make a good
+ // scaling decision.
+ const size_t frames = config_.use_all_drop_reasons
+ ? framedrop_percent_all_.Size()
+ : framedrop_percent_media_opt_.Size();
+ if (frames < min_frames_needed_) {
+ return CheckQpResult::kInsufficientSamples;
+ }
+
+ // Check if we should scale down due to high frame drop.
+ const absl::optional<int> drop_rate =
+ config_.use_all_drop_reasons
+ ? framedrop_percent_all_.GetAverageRoundedDown()
+ : framedrop_percent_media_opt_.GetAverageRoundedDown();
+ if (drop_rate && *drop_rate >= kFramedropPercentThreshold) {
+ RTC_LOG(LS_INFO) << "Reporting high QP, framedrop percent " << *drop_rate;
+ return CheckQpResult::kHighQp;
+ }
+
+ // Check if we should scale up or down based on QP.
+ const absl::optional<int> avg_qp_high =
+ qp_smoother_high_ ? qp_smoother_high_->GetAvg()
+ : average_qp_.GetAverageRoundedDown();
+ const absl::optional<int> avg_qp_low =
+ qp_smoother_low_ ? qp_smoother_low_->GetAvg()
+ : average_qp_.GetAverageRoundedDown();
+ if (avg_qp_high && avg_qp_low) {
+ RTC_LOG(LS_INFO) << "Checking average QP " << *avg_qp_high << " ("
+ << *avg_qp_low << ").";
+ if (*avg_qp_high > thresholds_.high) {
+ return CheckQpResult::kHighQp;
+ }
+ if (*avg_qp_low <= thresholds_.low) {
+ // QP has been low. We want to try a higher resolution.
+ return CheckQpResult::kLowQp;
+ }
+ }
+ return CheckQpResult::kNormalQp;
+}
+
+void QualityScaler::ClearSamples() {
+ RTC_DCHECK_RUN_ON(&task_checker_);
+ framedrop_percent_media_opt_.Reset();
+ framedrop_percent_all_.Reset();
+ average_qp_.Reset();
+ if (qp_smoother_high_)
+ qp_smoother_high_->Reset();
+ if (qp_smoother_low_)
+ qp_smoother_low_->Reset();
+}
+
+QualityScalerQpUsageHandlerInterface::~QualityScalerQpUsageHandlerInterface() {}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/utility/quality_scaler.h b/third_party/libwebrtc/modules/video_coding/utility/quality_scaler.h
new file mode 100644
index 0000000000..93014e36a7
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/utility/quality_scaler.h
@@ -0,0 +1,120 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_UTILITY_QUALITY_SCALER_H_
+#define MODULES_VIDEO_CODING_UTILITY_QUALITY_SCALER_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <memory>
+
+#include "absl/types/optional.h"
+#include "api/scoped_refptr.h"
+#include "api/sequence_checker.h"
+#include "api/video_codecs/video_encoder.h"
+#include "rtc_base/experiments/quality_scaling_experiment.h"
+#include "rtc_base/numerics/moving_average.h"
+#include "rtc_base/ref_count.h"
+#include "rtc_base/system/no_unique_address.h"
+
+namespace webrtc {
+
+class QualityScalerQpUsageHandlerCallbackInterface;
+class QualityScalerQpUsageHandlerInterface;
+
+// QualityScaler runs asynchronously and monitors QP values of encoded frames.
+// It holds a reference to a QualityScalerQpUsageHandlerInterface implementation
+// to signal an overuse or underuse of QP (which indicate a desire to scale the
+// video stream down or up).
+class QualityScaler {
+ public:
+ // Construct a QualityScaler with given `thresholds` and `handler`.
+ // This starts the quality scaler periodically checking what the average QP
+ // has been recently.
+ QualityScaler(QualityScalerQpUsageHandlerInterface* handler,
+ VideoEncoder::QpThresholds thresholds);
+ virtual ~QualityScaler();
+ // Should be called each time a frame is dropped at encoding.
+ void ReportDroppedFrameByMediaOpt();
+ void ReportDroppedFrameByEncoder();
+ // Inform the QualityScaler of the last seen QP.
+ void ReportQp(int qp, int64_t time_sent_us);
+
+ void SetQpThresholds(VideoEncoder::QpThresholds thresholds);
+ bool QpFastFilterLow() const;
+
+ // The following members declared protected for testing purposes.
+ protected:
+ QualityScaler(QualityScalerQpUsageHandlerInterface* handler,
+ VideoEncoder::QpThresholds thresholds,
+ int64_t sampling_period_ms);
+
+ private:
+ class QpSmoother;
+ class CheckQpTask;
+ class CheckQpTaskHandlerCallback;
+
+ enum class CheckQpResult {
+ kInsufficientSamples,
+ kNormalQp,
+ kHighQp,
+ kLowQp,
+ };
+
+ // Starts checking for QP in a delayed task. When the resulting CheckQpTask
+ // completes, it will invoke this method again, ensuring that we always
+ // periodically check for QP. See CheckQpTask for more details. We never run
+ // more than one CheckQpTask at a time.
+ void StartNextCheckQpTask();
+
+ CheckQpResult CheckQp() const;
+ void ClearSamples();
+
+ std::unique_ptr<CheckQpTask> pending_qp_task_ RTC_GUARDED_BY(&task_checker_);
+ QualityScalerQpUsageHandlerInterface* const handler_
+ RTC_GUARDED_BY(&task_checker_);
+ RTC_NO_UNIQUE_ADDRESS SequenceChecker task_checker_;
+
+ VideoEncoder::QpThresholds thresholds_ RTC_GUARDED_BY(&task_checker_);
+ const int64_t sampling_period_ms_;
+ bool fast_rampup_ RTC_GUARDED_BY(&task_checker_);
+ rtc::MovingAverage average_qp_ RTC_GUARDED_BY(&task_checker_);
+ rtc::MovingAverage framedrop_percent_media_opt_
+ RTC_GUARDED_BY(&task_checker_);
+ rtc::MovingAverage framedrop_percent_all_ RTC_GUARDED_BY(&task_checker_);
+
+ // Used by QualityScalingExperiment.
+ const bool experiment_enabled_;
+ QualityScalingExperiment::Config config_ RTC_GUARDED_BY(&task_checker_);
+ std::unique_ptr<QpSmoother> qp_smoother_high_ RTC_GUARDED_BY(&task_checker_);
+ std::unique_ptr<QpSmoother> qp_smoother_low_ RTC_GUARDED_BY(&task_checker_);
+
+ const size_t min_frames_needed_;
+ const double initial_scale_factor_;
+ const absl::optional<double> scale_factor_;
+};
+
+// Reacts to QP being too high or too low. For best quality, when QP is high it
+// is desired to decrease the resolution or frame rate of the stream and when QP
+// is low it is desired to increase the resolution or frame rate of the stream.
+// Whether to reconfigure the stream is ultimately up to the handler, which is
+// able to respond asynchronously.
+class QualityScalerQpUsageHandlerInterface {
+ public:
+ virtual ~QualityScalerQpUsageHandlerInterface();
+
+ virtual void OnReportQpUsageHigh() = 0;
+ virtual void OnReportQpUsageLow() = 0;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_UTILITY_QUALITY_SCALER_H_
diff --git a/third_party/libwebrtc/modules/video_coding/utility/quality_scaler_unittest.cc b/third_party/libwebrtc/modules/video_coding/utility/quality_scaler_unittest.cc
new file mode 100644
index 0000000000..c17159fb64
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/utility/quality_scaler_unittest.cc
@@ -0,0 +1,254 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/utility/quality_scaler.h"
+
+#include <memory>
+#include <string>
+
+#include "api/units/time_delta.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/event.h"
+#include "rtc_base/task_queue_for_test.h"
+#include "test/field_trial.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+static const int kFramerate = 30;
+static const int kLowQp = 15;
+static const int kHighQp = 40;
+static const int kMinFramesNeededToScale = 60; // From quality_scaler.cc.
+static constexpr TimeDelta kDefaultTimeout = TimeDelta::Millis(150);
+} // namespace
+
+class FakeQpUsageHandler : public QualityScalerQpUsageHandlerInterface {
+ public:
+ ~FakeQpUsageHandler() override = default;
+
+ // QualityScalerQpUsageHandlerInterface implementation.
+ void OnReportQpUsageHigh() override {
+ adapt_down_events_++;
+ event.Set();
+ }
+
+ void OnReportQpUsageLow() override {
+ adapt_up_events_++;
+ event.Set();
+ }
+
+ rtc::Event event;
+ int adapt_up_events_ = 0;
+ int adapt_down_events_ = 0;
+};
+
+// Pass a lower sampling period to speed up the tests.
+class QualityScalerUnderTest : public QualityScaler {
+ public:
+ explicit QualityScalerUnderTest(QualityScalerQpUsageHandlerInterface* handler,
+ VideoEncoder::QpThresholds thresholds)
+ : QualityScaler(handler, thresholds, 5) {}
+};
+
+class QualityScalerTest : public ::testing::Test,
+ public ::testing::WithParamInterface<std::string> {
+ protected:
+ enum ScaleDirection {
+ kKeepScaleAboveLowQp,
+ kKeepScaleAtHighQp,
+ kScaleDown,
+ kScaleDownAboveHighQp,
+ kScaleUp
+ };
+
+ QualityScalerTest()
+ : scoped_field_trial_(GetParam()),
+ task_queue_("QualityScalerTestQueue"),
+ handler_(std::make_unique<FakeQpUsageHandler>()) {
+ task_queue_.SendTask(
+ [this] {
+ qs_ = std::unique_ptr<QualityScaler>(new QualityScalerUnderTest(
+ handler_.get(), VideoEncoder::QpThresholds(kLowQp, kHighQp)));
+ });
+ }
+
+ ~QualityScalerTest() override {
+ task_queue_.SendTask([this] { qs_ = nullptr; });
+ }
+
+ void TriggerScale(ScaleDirection scale_direction) {
+ for (int i = 0; i < kFramerate * 5; ++i) {
+ switch (scale_direction) {
+ case kKeepScaleAboveLowQp:
+ qs_->ReportQp(kLowQp + 1, 0);
+ break;
+ case kScaleUp:
+ qs_->ReportQp(kLowQp, 0);
+ break;
+ case kScaleDown:
+ qs_->ReportDroppedFrameByMediaOpt();
+ break;
+ case kKeepScaleAtHighQp:
+ qs_->ReportQp(kHighQp, 0);
+ break;
+ case kScaleDownAboveHighQp:
+ qs_->ReportQp(kHighQp + 1, 0);
+ break;
+ }
+ }
+ }
+
+ test::ScopedFieldTrials scoped_field_trial_;
+ TaskQueueForTest task_queue_;
+ std::unique_ptr<QualityScaler> qs_;
+ std::unique_ptr<FakeQpUsageHandler> handler_;
+};
+
+INSTANTIATE_TEST_SUITE_P(
+ FieldTrials,
+ QualityScalerTest,
+ ::testing::Values(
+ "WebRTC-Video-QualityScaling/Enabled-1,2,3,4,5,6,7,8,0.9,0.99,1/",
+ "WebRTC-Video-QualityScaling/Disabled/"));
+
+TEST_P(QualityScalerTest, DownscalesAfterContinuousFramedrop) {
+ task_queue_.SendTask([this] { TriggerScale(kScaleDown); });
+ EXPECT_TRUE(handler_->event.Wait(kDefaultTimeout));
+ EXPECT_EQ(1, handler_->adapt_down_events_);
+ EXPECT_EQ(0, handler_->adapt_up_events_);
+}
+
+TEST_P(QualityScalerTest, KeepsScaleAtHighQp) {
+ task_queue_.SendTask([this] { TriggerScale(kKeepScaleAtHighQp); });
+ EXPECT_FALSE(handler_->event.Wait(kDefaultTimeout));
+ EXPECT_EQ(0, handler_->adapt_down_events_);
+ EXPECT_EQ(0, handler_->adapt_up_events_);
+}
+
+TEST_P(QualityScalerTest, DownscalesAboveHighQp) {
+ task_queue_.SendTask([this] { TriggerScale(kScaleDownAboveHighQp); });
+ EXPECT_TRUE(handler_->event.Wait(kDefaultTimeout));
+ EXPECT_EQ(1, handler_->adapt_down_events_);
+ EXPECT_EQ(0, handler_->adapt_up_events_);
+}
+
+TEST_P(QualityScalerTest, DownscalesAfterTwoThirdsFramedrop) {
+ task_queue_.SendTask([this] {
+ for (int i = 0; i < kFramerate * 5; ++i) {
+ qs_->ReportDroppedFrameByMediaOpt();
+ qs_->ReportDroppedFrameByMediaOpt();
+ qs_->ReportQp(kHighQp, 0);
+ }
+ });
+ EXPECT_TRUE(handler_->event.Wait(kDefaultTimeout));
+ EXPECT_EQ(1, handler_->adapt_down_events_);
+ EXPECT_EQ(0, handler_->adapt_up_events_);
+}
+
+TEST_P(QualityScalerTest, DoesNotDownscaleAfterHalfFramedrop) {
+ task_queue_.SendTask([this] {
+ for (int i = 0; i < kFramerate * 5; ++i) {
+ qs_->ReportDroppedFrameByMediaOpt();
+ qs_->ReportQp(kHighQp, 0);
+ }
+ });
+ EXPECT_FALSE(handler_->event.Wait(kDefaultTimeout));
+ EXPECT_EQ(0, handler_->adapt_down_events_);
+ EXPECT_EQ(0, handler_->adapt_up_events_);
+}
+
+TEST_P(QualityScalerTest, DownscalesAfterTwoThirdsIfFieldTrialEnabled) {
+ const bool kDownScaleExpected =
+ GetParam().find("Enabled") != std::string::npos;
+ task_queue_.SendTask([this] {
+ for (int i = 0; i < kFramerate * 5; ++i) {
+ qs_->ReportDroppedFrameByMediaOpt();
+ qs_->ReportDroppedFrameByEncoder();
+ qs_->ReportQp(kHighQp, 0);
+ }
+ });
+ EXPECT_EQ(kDownScaleExpected, handler_->event.Wait(kDefaultTimeout));
+ EXPECT_EQ(kDownScaleExpected ? 1 : 0, handler_->adapt_down_events_);
+ EXPECT_EQ(0, handler_->adapt_up_events_);
+}
+
+TEST_P(QualityScalerTest, KeepsScaleOnNormalQp) {
+ task_queue_.SendTask([this] { TriggerScale(kKeepScaleAboveLowQp); });
+ EXPECT_FALSE(handler_->event.Wait(kDefaultTimeout));
+ EXPECT_EQ(0, handler_->adapt_down_events_);
+ EXPECT_EQ(0, handler_->adapt_up_events_);
+}
+
+TEST_P(QualityScalerTest, UpscalesAfterLowQp) {
+ task_queue_.SendTask([this] { TriggerScale(kScaleUp); });
+ EXPECT_TRUE(handler_->event.Wait(kDefaultTimeout));
+ EXPECT_EQ(0, handler_->adapt_down_events_);
+ EXPECT_EQ(1, handler_->adapt_up_events_);
+}
+
+TEST_P(QualityScalerTest, ScalesDownAndBackUp) {
+ task_queue_.SendTask([this] { TriggerScale(kScaleDown); });
+ EXPECT_TRUE(handler_->event.Wait(kDefaultTimeout));
+ EXPECT_EQ(1, handler_->adapt_down_events_);
+ EXPECT_EQ(0, handler_->adapt_up_events_);
+ task_queue_.SendTask([this] { TriggerScale(kScaleUp); });
+ EXPECT_TRUE(handler_->event.Wait(kDefaultTimeout));
+ EXPECT_EQ(1, handler_->adapt_down_events_);
+ EXPECT_EQ(1, handler_->adapt_up_events_);
+}
+
+TEST_P(QualityScalerTest, DoesNotScaleUntilEnoughFramesObserved) {
+ task_queue_.SendTask([this] {
+ // Not enough frames to make a decision.
+ for (int i = 0; i < kMinFramesNeededToScale - 1; ++i) {
+ qs_->ReportQp(kLowQp, 0);
+ }
+ });
+ EXPECT_FALSE(handler_->event.Wait(kDefaultTimeout));
+ task_queue_.SendTask([this] {
+ // Send 1 more. Enough frames observed, should result in an adapt
+ // request.
+ qs_->ReportQp(kLowQp, 0);
+ });
+ EXPECT_TRUE(handler_->event.Wait(kDefaultTimeout));
+ EXPECT_EQ(0, handler_->adapt_down_events_);
+ EXPECT_EQ(1, handler_->adapt_up_events_);
+
+ // Samples should be cleared after an adapt request.
+ task_queue_.SendTask([this] {
+ // Not enough frames to make a decision.
+ qs_->ReportQp(kLowQp, 0);
+ });
+ EXPECT_FALSE(handler_->event.Wait(kDefaultTimeout));
+ EXPECT_EQ(0, handler_->adapt_down_events_);
+ EXPECT_EQ(1, handler_->adapt_up_events_);
+}
+
+TEST_P(QualityScalerTest, ScalesDownAndBackUpWithMinFramesNeeded) {
+ task_queue_.SendTask([this] {
+ for (int i = 0; i < kMinFramesNeededToScale; ++i) {
+ qs_->ReportQp(kHighQp + 1, 0);
+ }
+ });
+ EXPECT_TRUE(handler_->event.Wait(kDefaultTimeout));
+ EXPECT_EQ(1, handler_->adapt_down_events_);
+ EXPECT_EQ(0, handler_->adapt_up_events_);
+ // Samples cleared.
+ task_queue_.SendTask([this] {
+ for (int i = 0; i < kMinFramesNeededToScale; ++i) {
+ qs_->ReportQp(kLowQp, 0);
+ }
+ });
+ EXPECT_TRUE(handler_->event.Wait(kDefaultTimeout));
+ EXPECT_EQ(1, handler_->adapt_down_events_);
+ EXPECT_EQ(1, handler_->adapt_up_events_);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/utility/simulcast_rate_allocator.cc b/third_party/libwebrtc/modules/video_coding/utility/simulcast_rate_allocator.cc
new file mode 100644
index 0000000000..1496934e1c
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/utility/simulcast_rate_allocator.cc
@@ -0,0 +1,343 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/utility/simulcast_rate_allocator.h"
+
+#include <stdio.h>
+
+#include <algorithm>
+#include <cmath>
+#include <cstdint>
+#include <numeric>
+#include <string>
+#include <tuple>
+#include <vector>
+
+#include "rtc_base/checks.h"
+#include "rtc_base/experiments/rate_control_settings.h"
+#include "system_wrappers/include/field_trial.h"
+
+namespace webrtc {
+namespace {
+// Ratio allocation between temporal streams:
+// Values as required for the VP8 codec (accumulating).
+static const float
+ kLayerRateAllocation[kMaxTemporalStreams][kMaxTemporalStreams] = {
+ {1.0f, 1.0f, 1.0f, 1.0f}, // 1 layer
+ {0.6f, 1.0f, 1.0f, 1.0f}, // 2 layers {60%, 40%}
+ {0.4f, 0.6f, 1.0f, 1.0f}, // 3 layers {40%, 20%, 40%}
+ {0.25f, 0.4f, 0.6f, 1.0f} // 4 layers {25%, 15%, 20%, 40%}
+};
+
+static const float kBaseHeavy3TlRateAllocation[kMaxTemporalStreams] = {
+ 0.6f, 0.8f, 1.0f, 1.0f // 3 layers {60%, 20%, 20%}
+};
+
+const uint32_t kLegacyScreenshareTl0BitrateKbps = 200;
+const uint32_t kLegacyScreenshareTl1BitrateKbps = 1000;
+} // namespace
+
+float SimulcastRateAllocator::GetTemporalRateAllocation(
+ int num_layers,
+ int temporal_id,
+ bool base_heavy_tl3_alloc) {
+ RTC_CHECK_GT(num_layers, 0);
+ RTC_CHECK_LE(num_layers, kMaxTemporalStreams);
+ RTC_CHECK_GE(temporal_id, 0);
+ RTC_CHECK_LT(temporal_id, num_layers);
+ if (num_layers == 3 && base_heavy_tl3_alloc) {
+ return kBaseHeavy3TlRateAllocation[temporal_id];
+ }
+ return kLayerRateAllocation[num_layers - 1][temporal_id];
+}
+
+SimulcastRateAllocator::SimulcastRateAllocator(const VideoCodec& codec)
+ : codec_(codec),
+ stable_rate_settings_(StableTargetRateExperiment::ParseFromFieldTrials()),
+ rate_control_settings_(RateControlSettings::ParseFromFieldTrials()),
+ legacy_conference_mode_(false) {}
+
+SimulcastRateAllocator::~SimulcastRateAllocator() = default;
+
+VideoBitrateAllocation SimulcastRateAllocator::Allocate(
+ VideoBitrateAllocationParameters parameters) {
+ VideoBitrateAllocation allocated_bitrates;
+ DataRate stable_rate = parameters.total_bitrate;
+ if (stable_rate_settings_.IsEnabled() &&
+ parameters.stable_bitrate > DataRate::Zero()) {
+ stable_rate = std::min(parameters.stable_bitrate, parameters.total_bitrate);
+ }
+ DistributeAllocationToSimulcastLayers(parameters.total_bitrate, stable_rate,
+ &allocated_bitrates);
+ DistributeAllocationToTemporalLayers(&allocated_bitrates);
+ return allocated_bitrates;
+}
+
+void SimulcastRateAllocator::DistributeAllocationToSimulcastLayers(
+ DataRate total_bitrate,
+ DataRate stable_bitrate,
+ VideoBitrateAllocation* allocated_bitrates) {
+ DataRate left_in_total_allocation = total_bitrate;
+ DataRate left_in_stable_allocation = stable_bitrate;
+
+ if (codec_.maxBitrate) {
+ DataRate max_rate = DataRate::KilobitsPerSec(codec_.maxBitrate);
+ left_in_total_allocation = std::min(left_in_total_allocation, max_rate);
+ left_in_stable_allocation = std::min(left_in_stable_allocation, max_rate);
+ }
+
+ if (codec_.numberOfSimulcastStreams == 0) {
+ // No simulcast, just set the target as this has been capped already.
+ if (codec_.active) {
+ allocated_bitrates->SetBitrate(
+ 0, 0,
+ std::max(DataRate::KilobitsPerSec(codec_.minBitrate),
+ left_in_total_allocation)
+ .bps());
+ }
+ return;
+ }
+
+ // Sort the layers by maxFramerate, they might not always be from smallest
+ // to biggest
+ std::vector<size_t> layer_index(codec_.numberOfSimulcastStreams);
+ std::iota(layer_index.begin(), layer_index.end(), 0);
+ std::stable_sort(layer_index.begin(), layer_index.end(),
+ [this](size_t a, size_t b) {
+ return std::tie(codec_.simulcastStream[a].maxBitrate) <
+ std::tie(codec_.simulcastStream[b].maxBitrate);
+ });
+
+ // Find the first active layer. We don't allocate to inactive layers.
+ size_t active_layer = 0;
+ for (; active_layer < codec_.numberOfSimulcastStreams; ++active_layer) {
+ if (codec_.simulcastStream[layer_index[active_layer]].active) {
+ // Found the first active layer.
+ break;
+ }
+ }
+ // All streams could be inactive, and nothing more to do.
+ if (active_layer == codec_.numberOfSimulcastStreams) {
+ return;
+ }
+
+ // Always allocate enough bitrate for the minimum bitrate of the first
+ // active layer. Suspending below min bitrate is controlled outside the
+ // codec implementation and is not overridden by this.
+ DataRate min_rate = DataRate::KilobitsPerSec(
+ codec_.simulcastStream[layer_index[active_layer]].minBitrate);
+ left_in_total_allocation = std::max(left_in_total_allocation, min_rate);
+ left_in_stable_allocation = std::max(left_in_stable_allocation, min_rate);
+
+ // Begin by allocating bitrate to simulcast streams, putting all bitrate in
+ // temporal layer 0. We'll then distribute this bitrate, across potential
+ // temporal layers, when stream allocation is done.
+
+ bool first_allocation = false;
+ if (stream_enabled_.empty()) {
+ // First time allocating, this means we should not include hysteresis in
+ // case this is a reconfiguration of an existing enabled stream.
+ first_allocation = true;
+ stream_enabled_.resize(codec_.numberOfSimulcastStreams, false);
+ }
+
+ size_t top_active_layer = active_layer;
+ // Allocate up to the target bitrate for each active simulcast layer.
+ for (; active_layer < codec_.numberOfSimulcastStreams; ++active_layer) {
+ const SimulcastStream& stream =
+ codec_.simulcastStream[layer_index[active_layer]];
+ if (!stream.active) {
+ stream_enabled_[layer_index[active_layer]] = false;
+ continue;
+ }
+ // If we can't allocate to the current layer we can't allocate to higher
+ // layers because they require a higher minimum bitrate.
+ DataRate min_bitrate = DataRate::KilobitsPerSec(stream.minBitrate);
+ DataRate target_bitrate = DataRate::KilobitsPerSec(stream.targetBitrate);
+ double hysteresis_factor =
+ codec_.mode == VideoCodecMode::kRealtimeVideo
+ ? stable_rate_settings_.GetVideoHysteresisFactor()
+ : stable_rate_settings_.GetScreenshareHysteresisFactor();
+ if (!first_allocation && !stream_enabled_[layer_index[active_layer]]) {
+ min_bitrate = std::min(hysteresis_factor * min_bitrate, target_bitrate);
+ }
+ if (left_in_stable_allocation < min_bitrate) {
+ allocated_bitrates->set_bw_limited(true);
+ break;
+ }
+
+ // We are allocating to this layer so it is the current active allocation.
+ top_active_layer = layer_index[active_layer];
+ stream_enabled_[layer_index[active_layer]] = true;
+ DataRate layer_rate = std::min(left_in_total_allocation, target_bitrate);
+ allocated_bitrates->SetBitrate(layer_index[active_layer], 0,
+ layer_rate.bps());
+ left_in_total_allocation -= layer_rate;
+ left_in_stable_allocation -=
+ std::min(left_in_stable_allocation, target_bitrate);
+ }
+
+ // All layers above this one are not active.
+ for (; active_layer < codec_.numberOfSimulcastStreams; ++active_layer) {
+ stream_enabled_[layer_index[active_layer]] = false;
+ }
+
+ // Next, try allocate remaining bitrate, up to max bitrate, in top active
+ // stream.
+ // TODO(sprang): Allocate up to max bitrate for all layers once we have a
+ // better idea of possible performance implications.
+ if (left_in_total_allocation > DataRate::Zero()) {
+ const SimulcastStream& stream = codec_.simulcastStream[top_active_layer];
+ DataRate initial_layer_rate = DataRate::BitsPerSec(
+ allocated_bitrates->GetSpatialLayerSum(top_active_layer));
+ DataRate additional_allocation = std::min(
+ left_in_total_allocation,
+ DataRate::KilobitsPerSec(stream.maxBitrate) - initial_layer_rate);
+ allocated_bitrates->SetBitrate(
+ top_active_layer, 0,
+ (initial_layer_rate + additional_allocation).bps());
+ }
+}
+
+void SimulcastRateAllocator::DistributeAllocationToTemporalLayers(
+ VideoBitrateAllocation* allocated_bitrates_bps) const {
+ const int num_spatial_streams =
+ std::max(1, static_cast<int>(codec_.numberOfSimulcastStreams));
+
+ // Finally, distribute the bitrate for the simulcast streams across the
+ // available temporal layers.
+ for (int simulcast_id = 0; simulcast_id < num_spatial_streams;
+ ++simulcast_id) {
+ uint32_t target_bitrate_kbps =
+ allocated_bitrates_bps->GetBitrate(simulcast_id, 0) / 1000;
+ if (target_bitrate_kbps == 0) {
+ continue;
+ }
+
+ const uint32_t expected_allocated_bitrate_kbps = target_bitrate_kbps;
+ RTC_DCHECK_EQ(
+ target_bitrate_kbps,
+ allocated_bitrates_bps->GetSpatialLayerSum(simulcast_id) / 1000);
+ const int num_temporal_streams = NumTemporalStreams(simulcast_id);
+ uint32_t max_bitrate_kbps;
+ // Legacy temporal-layered only screenshare, or simulcast screenshare
+ // with legacy mode for simulcast stream 0.
+ if (codec_.mode == VideoCodecMode::kScreensharing &&
+ legacy_conference_mode_ && simulcast_id == 0) {
+ // TODO(holmer): This is a "temporary" hack for screensharing, where we
+ // interpret the startBitrate as the encoder target bitrate. This is
+ // to allow for a different max bitrate, so if the codec can't meet
+ // the target we still allow it to overshoot up to the max before dropping
+ // frames. This hack should be improved.
+ max_bitrate_kbps =
+ std::min(kLegacyScreenshareTl1BitrateKbps, target_bitrate_kbps);
+ target_bitrate_kbps =
+ std::min(kLegacyScreenshareTl0BitrateKbps, target_bitrate_kbps);
+ } else if (num_spatial_streams == 1) {
+ max_bitrate_kbps = codec_.maxBitrate;
+ } else {
+ max_bitrate_kbps = codec_.simulcastStream[simulcast_id].maxBitrate;
+ }
+
+ std::vector<uint32_t> tl_allocation;
+ if (num_temporal_streams == 1) {
+ tl_allocation.push_back(target_bitrate_kbps);
+ } else {
+ if (codec_.mode == VideoCodecMode::kScreensharing &&
+ legacy_conference_mode_ && simulcast_id == 0) {
+ tl_allocation = ScreenshareTemporalLayerAllocation(
+ target_bitrate_kbps, max_bitrate_kbps, simulcast_id);
+ } else {
+ tl_allocation = DefaultTemporalLayerAllocation(
+ target_bitrate_kbps, max_bitrate_kbps, simulcast_id);
+ }
+ }
+ RTC_DCHECK_GT(tl_allocation.size(), 0);
+ RTC_DCHECK_LE(tl_allocation.size(), num_temporal_streams);
+
+ uint64_t tl_allocation_sum_kbps = 0;
+ for (size_t tl_index = 0; tl_index < tl_allocation.size(); ++tl_index) {
+ uint32_t layer_rate_kbps = tl_allocation[tl_index];
+ if (layer_rate_kbps > 0) {
+ allocated_bitrates_bps->SetBitrate(simulcast_id, tl_index,
+ layer_rate_kbps * 1000);
+ }
+ tl_allocation_sum_kbps += layer_rate_kbps;
+ }
+ RTC_DCHECK_LE(tl_allocation_sum_kbps, expected_allocated_bitrate_kbps);
+ }
+}
+
+std::vector<uint32_t> SimulcastRateAllocator::DefaultTemporalLayerAllocation(
+ int bitrate_kbps,
+ int max_bitrate_kbps,
+ int simulcast_id) const {
+ const size_t num_temporal_layers = NumTemporalStreams(simulcast_id);
+ std::vector<uint32_t> bitrates;
+ for (size_t i = 0; i < num_temporal_layers; ++i) {
+ float layer_bitrate =
+ bitrate_kbps *
+ GetTemporalRateAllocation(
+ num_temporal_layers, i,
+ rate_control_settings_.Vp8BaseHeavyTl3RateAllocation());
+ bitrates.push_back(static_cast<uint32_t>(layer_bitrate + 0.5));
+ }
+
+ // Allocation table is of aggregates, transform to individual rates.
+ uint32_t sum = 0;
+ for (size_t i = 0; i < num_temporal_layers; ++i) {
+ uint32_t layer_bitrate = bitrates[i];
+ RTC_DCHECK_LE(sum, bitrates[i]);
+ bitrates[i] -= sum;
+ sum = layer_bitrate;
+
+ if (sum >= static_cast<uint32_t>(bitrate_kbps)) {
+ // Sum adds up; any subsequent layers will be 0.
+ bitrates.resize(i + 1);
+ break;
+ }
+ }
+
+ return bitrates;
+}
+
+std::vector<uint32_t>
+SimulcastRateAllocator::ScreenshareTemporalLayerAllocation(
+ int bitrate_kbps,
+ int max_bitrate_kbps,
+ int simulcast_id) const {
+ if (simulcast_id > 0) {
+ return DefaultTemporalLayerAllocation(bitrate_kbps, max_bitrate_kbps,
+ simulcast_id);
+ }
+ std::vector<uint32_t> allocation;
+ allocation.push_back(bitrate_kbps);
+ if (max_bitrate_kbps > bitrate_kbps)
+ allocation.push_back(max_bitrate_kbps - bitrate_kbps);
+ return allocation;
+}
+
+const VideoCodec& webrtc::SimulcastRateAllocator::GetCodec() const {
+ return codec_;
+}
+
+int SimulcastRateAllocator::NumTemporalStreams(size_t simulcast_id) const {
+ return std::max<uint8_t>(
+ 1,
+ codec_.codecType == kVideoCodecVP8 && codec_.numberOfSimulcastStreams == 0
+ ? codec_.VP8().numberOfTemporalLayers
+ : codec_.simulcastStream[simulcast_id].numberOfTemporalLayers);
+}
+
+void SimulcastRateAllocator::SetLegacyConferenceMode(bool enabled) {
+ legacy_conference_mode_ = enabled;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/utility/simulcast_rate_allocator.h b/third_party/libwebrtc/modules/video_coding/utility/simulcast_rate_allocator.h
new file mode 100644
index 0000000000..6f93dbde74
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/utility/simulcast_rate_allocator.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_UTILITY_SIMULCAST_RATE_ALLOCATOR_H_
+#define MODULES_VIDEO_CODING_UTILITY_SIMULCAST_RATE_ALLOCATOR_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <vector>
+
+#include "api/video/video_bitrate_allocation.h"
+#include "api/video/video_bitrate_allocator.h"
+#include "api/video_codecs/video_codec.h"
+#include "rtc_base/experiments/rate_control_settings.h"
+#include "rtc_base/experiments/stable_target_rate_experiment.h"
+
+namespace webrtc {
+
+class SimulcastRateAllocator : public VideoBitrateAllocator {
+ public:
+ explicit SimulcastRateAllocator(const VideoCodec& codec);
+ ~SimulcastRateAllocator() override;
+
+ SimulcastRateAllocator(const SimulcastRateAllocator&) = delete;
+ SimulcastRateAllocator& operator=(const SimulcastRateAllocator&) = delete;
+
+ VideoBitrateAllocation Allocate(
+ VideoBitrateAllocationParameters parameters) override;
+ const VideoCodec& GetCodec() const;
+
+ static float GetTemporalRateAllocation(int num_layers,
+ int temporal_id,
+ bool base_heavy_tl3_alloc);
+
+ void SetLegacyConferenceMode(bool mode) override;
+
+ private:
+ void DistributeAllocationToSimulcastLayers(
+ DataRate total_bitrate,
+ DataRate stable_bitrate,
+ VideoBitrateAllocation* allocated_bitrates);
+ void DistributeAllocationToTemporalLayers(
+ VideoBitrateAllocation* allocated_bitrates) const;
+ std::vector<uint32_t> DefaultTemporalLayerAllocation(int bitrate_kbps,
+ int max_bitrate_kbps,
+ int simulcast_id) const;
+ std::vector<uint32_t> ScreenshareTemporalLayerAllocation(
+ int bitrate_kbps,
+ int max_bitrate_kbps,
+ int simulcast_id) const;
+ int NumTemporalStreams(size_t simulcast_id) const;
+
+ const VideoCodec codec_;
+ const StableTargetRateExperiment stable_rate_settings_;
+ const RateControlSettings rate_control_settings_;
+ std::vector<bool> stream_enabled_;
+ bool legacy_conference_mode_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_UTILITY_SIMULCAST_RATE_ALLOCATOR_H_
diff --git a/third_party/libwebrtc/modules/video_coding/utility/simulcast_rate_allocator_unittest.cc b/third_party/libwebrtc/modules/video_coding/utility/simulcast_rate_allocator_unittest.cc
new file mode 100644
index 0000000000..24d7c58bcd
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/utility/simulcast_rate_allocator_unittest.cc
@@ -0,0 +1,824 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/utility/simulcast_rate_allocator.h"
+
+#include <limits>
+#include <memory>
+#include <utility>
+#include <vector>
+
+#include "api/video_codecs/vp8_frame_buffer_controller.h"
+#include "api/video_codecs/vp8_frame_config.h"
+#include "api/video_codecs/vp8_temporal_layers.h"
+#include "rtc_base/checks.h"
+#include "test/field_trial.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+using ::testing::_;
+
+constexpr uint32_t kFramerateFps = 5;
+constexpr uint32_t kMinBitrateKbps = 50;
+// These correspond to kLegacyScreenshareTl(0|1)BitrateKbps in cc.
+constexpr uint32_t kLegacyScreenshareTargetBitrateKbps = 200;
+constexpr uint32_t kLegacyScreenshareMaxBitrateKbps = 1000;
+// Bitrates for upper simulcast screenshare layer.
+constexpr uint32_t kSimulcastScreenshareMinBitrateKbps = 600;
+constexpr uint32_t kSimulcastScreenshareMaxBitrateKbps = 1250;
+// Default video hysteresis factor: allocatable bitrate for next layer must
+// exceed 20% of min setting in order to be initially turned on.
+const double kDefaultHysteresis = 1.2;
+
+class MockTemporalLayers : public Vp8FrameBufferController {
+ public:
+ MOCK_METHOD(Vp8FrameConfig, NextFrameConfig, (size_t, uint32_t), (override));
+ MOCK_METHOD(void,
+ OnRatesUpdated,
+ (size_t, const std::vector<uint32_t>&, int),
+ (override));
+ MOCK_METHOD(Vp8EncoderConfig, UpdateConfiguration, (size_t), (override));
+ MOCK_METHOD(void,
+ OnEncodeDone,
+ (size_t, uint32_t, size_t, bool, int, CodecSpecificInfo*),
+ (override));
+};
+} // namespace
+
+class SimulcastRateAllocatorTest : public ::testing::TestWithParam<bool> {
+ public:
+ SimulcastRateAllocatorTest() {
+ codec_.codecType = kVideoCodecVP8;
+ codec_.minBitrate = kMinBitrateKbps;
+ codec_.maxBitrate = kLegacyScreenshareMaxBitrateKbps;
+ codec_.active = true;
+ CreateAllocator();
+ }
+ virtual ~SimulcastRateAllocatorTest() {}
+
+ template <size_t S>
+ void ExpectEqual(uint32_t (&expected)[S],
+ const std::vector<uint32_t>& actual) {
+ EXPECT_EQ(S, actual.size());
+ for (size_t i = 0; i < S; ++i)
+ EXPECT_EQ(expected[i], actual[i]) << "Mismatch at index " << i;
+ }
+
+ template <size_t S>
+ void ExpectEqual(uint32_t (&expected)[S],
+ const VideoBitrateAllocation& actual) {
+ // EXPECT_EQ(S, actual.size());
+ uint32_t sum = 0;
+ for (size_t i = 0; i < S; ++i) {
+ uint32_t layer_bitrate = actual.GetSpatialLayerSum(i);
+ if (layer_bitrate == 0) {
+ EXPECT_FALSE(actual.IsSpatialLayerUsed(i));
+ }
+ EXPECT_EQ(expected[i] * 1000U, layer_bitrate)
+ << "Mismatch at index " << i;
+ sum += layer_bitrate;
+ }
+ EXPECT_EQ(sum, actual.get_sum_bps());
+ }
+
+ void CreateAllocator(bool legacy_conference_mode = false) {
+ allocator_.reset(new SimulcastRateAllocator(codec_));
+ allocator_->SetLegacyConferenceMode(legacy_conference_mode);
+ }
+
+ void SetupCodec3SL3TL(const std::vector<bool>& active_streams) {
+ const size_t num_simulcast_layers = 3;
+ RTC_DCHECK_GE(active_streams.size(), num_simulcast_layers);
+ SetupCodec2SL3TL(active_streams);
+ codec_.numberOfSimulcastStreams = num_simulcast_layers;
+ codec_.simulcastStream[2].numberOfTemporalLayers = 3;
+ codec_.simulcastStream[2].maxBitrate = 4000;
+ codec_.simulcastStream[2].targetBitrate = 3000;
+ codec_.simulcastStream[2].minBitrate = 2000;
+ codec_.simulcastStream[2].active = active_streams[2];
+ }
+
+ void SetupCodec2SL3TL(const std::vector<bool>& active_streams) {
+ const size_t num_simulcast_layers = 2;
+ RTC_DCHECK_GE(active_streams.size(), num_simulcast_layers);
+ SetupCodec1SL3TL(active_streams);
+ codec_.numberOfSimulcastStreams = num_simulcast_layers;
+ codec_.simulcastStream[1].numberOfTemporalLayers = 3;
+ codec_.simulcastStream[1].maxBitrate = 1000;
+ codec_.simulcastStream[1].targetBitrate = 500;
+ codec_.simulcastStream[1].minBitrate = 50;
+ codec_.simulcastStream[1].active = active_streams[1];
+ }
+
+ void SetupCodec1SL3TL(const std::vector<bool>& active_streams) {
+ const size_t num_simulcast_layers = 2;
+ RTC_DCHECK_GE(active_streams.size(), num_simulcast_layers);
+ SetupCodec3TL();
+ codec_.numberOfSimulcastStreams = num_simulcast_layers;
+ codec_.simulcastStream[0].numberOfTemporalLayers = 3;
+ codec_.simulcastStream[0].maxBitrate = 500;
+ codec_.simulcastStream[0].targetBitrate = 100;
+ codec_.simulcastStream[0].minBitrate = 10;
+ codec_.simulcastStream[0].active = active_streams[0];
+ }
+
+ void SetupCodec3TL() {
+ codec_.maxBitrate = 0;
+ codec_.VP8()->numberOfTemporalLayers = 3;
+ }
+
+ VideoBitrateAllocation GetAllocation(uint32_t target_bitrate) {
+ return allocator_->Allocate(VideoBitrateAllocationParameters(
+ DataRate::KilobitsPerSec(target_bitrate), kDefaultFrameRate));
+ }
+
+ VideoBitrateAllocation GetAllocation(DataRate target_rate,
+ DataRate stable_rate) {
+ return allocator_->Allocate(VideoBitrateAllocationParameters(
+ target_rate, stable_rate, kDefaultFrameRate));
+ }
+
+ DataRate MinRate(size_t layer_index) const {
+ return DataRate::KilobitsPerSec(
+ codec_.simulcastStream[layer_index].minBitrate);
+ }
+
+ DataRate TargetRate(size_t layer_index) const {
+ return DataRate::KilobitsPerSec(
+ codec_.simulcastStream[layer_index].targetBitrate);
+ }
+
+ DataRate MaxRate(size_t layer_index) const {
+ return DataRate::KilobitsPerSec(
+ codec_.simulcastStream[layer_index].maxBitrate);
+ }
+
+ protected:
+ static const int kDefaultFrameRate = 30;
+ VideoCodec codec_;
+ std::unique_ptr<SimulcastRateAllocator> allocator_;
+};
+
+TEST_F(SimulcastRateAllocatorTest, NoSimulcastBelowMin) {
+ uint32_t expected[] = {codec_.minBitrate};
+ codec_.active = true;
+ ExpectEqual(expected, GetAllocation(codec_.minBitrate - 1));
+ ExpectEqual(expected, GetAllocation(1));
+ ExpectEqual(expected, GetAllocation(0));
+}
+
+TEST_F(SimulcastRateAllocatorTest, NoSimulcastAboveMax) {
+ uint32_t expected[] = {codec_.maxBitrate};
+ codec_.active = true;
+ ExpectEqual(expected, GetAllocation(codec_.maxBitrate + 1));
+ ExpectEqual(expected, GetAllocation(std::numeric_limits<uint32_t>::max()));
+}
+
+TEST_F(SimulcastRateAllocatorTest, NoSimulcastNoMax) {
+ const uint32_t kMax = VideoBitrateAllocation::kMaxBitrateBps / 1000;
+ codec_.active = true;
+ codec_.maxBitrate = 0;
+ CreateAllocator();
+
+ uint32_t expected[] = {kMax};
+ ExpectEqual(expected, GetAllocation(kMax));
+}
+
+TEST_F(SimulcastRateAllocatorTest, NoSimulcastWithinLimits) {
+ codec_.active = true;
+ for (uint32_t bitrate = codec_.minBitrate; bitrate <= codec_.maxBitrate;
+ ++bitrate) {
+ uint32_t expected[] = {bitrate};
+ ExpectEqual(expected, GetAllocation(bitrate));
+ }
+}
+
+// Tests that when we aren't using simulcast and the codec is marked inactive no
+// bitrate will be allocated.
+TEST_F(SimulcastRateAllocatorTest, NoSimulcastInactive) {
+ codec_.active = false;
+ uint32_t expected[] = {0};
+ CreateAllocator();
+
+ ExpectEqual(expected, GetAllocation(kMinBitrateKbps - 10));
+ ExpectEqual(expected, GetAllocation(kLegacyScreenshareTargetBitrateKbps));
+ ExpectEqual(expected, GetAllocation(kLegacyScreenshareMaxBitrateKbps + 10));
+}
+
+TEST_F(SimulcastRateAllocatorTest, SingleSimulcastBelowMin) {
+ // With simulcast, use the min bitrate from the ss spec instead of the global.
+ codec_.numberOfSimulcastStreams = 1;
+ const uint32_t kMin = codec_.minBitrate - 10;
+ codec_.simulcastStream[0].minBitrate = kMin;
+ codec_.simulcastStream[0].targetBitrate = kLegacyScreenshareTargetBitrateKbps;
+ codec_.simulcastStream[0].active = true;
+ CreateAllocator();
+
+ uint32_t expected[] = {kMin};
+ ExpectEqual(expected, GetAllocation(kMin - 1));
+ ExpectEqual(expected, GetAllocation(1));
+ ExpectEqual(expected, GetAllocation(0));
+}
+
+TEST_F(SimulcastRateAllocatorTest, SignalsBwLimited) {
+ // Enough to enable all layers.
+ const int kVeryBigBitrate = 100000;
+
+ // With simulcast, use the min bitrate from the ss spec instead of the global.
+ SetupCodec3SL3TL({true, true, true});
+ CreateAllocator();
+
+ EXPECT_TRUE(
+ GetAllocation(codec_.simulcastStream[0].minBitrate - 10).is_bw_limited());
+ EXPECT_TRUE(
+ GetAllocation(codec_.simulcastStream[0].targetBitrate).is_bw_limited());
+ EXPECT_TRUE(GetAllocation(codec_.simulcastStream[0].targetBitrate +
+ codec_.simulcastStream[1].minBitrate)
+ .is_bw_limited());
+ EXPECT_FALSE(
+ GetAllocation(
+ codec_.simulcastStream[0].targetBitrate +
+ codec_.simulcastStream[1].targetBitrate +
+ static_cast<uint32_t>(
+ codec_.simulcastStream[2].minBitrate * kDefaultHysteresis + 0.5))
+ .is_bw_limited());
+ EXPECT_FALSE(GetAllocation(kVeryBigBitrate).is_bw_limited());
+}
+
+TEST_F(SimulcastRateAllocatorTest, SingleSimulcastAboveMax) {
+ codec_.numberOfSimulcastStreams = 1;
+ codec_.simulcastStream[0].minBitrate = kMinBitrateKbps;
+ const uint32_t kMax = codec_.simulcastStream[0].maxBitrate + 1000;
+ codec_.simulcastStream[0].maxBitrate = kMax;
+ codec_.simulcastStream[0].active = true;
+ CreateAllocator();
+
+ uint32_t expected[] = {kMax};
+ ExpectEqual(expected, GetAllocation(kMax));
+ ExpectEqual(expected, GetAllocation(kMax + 1));
+ ExpectEqual(expected, GetAllocation(std::numeric_limits<uint32_t>::max()));
+}
+
+TEST_F(SimulcastRateAllocatorTest, SingleSimulcastWithinLimits) {
+ codec_.numberOfSimulcastStreams = 1;
+ codec_.simulcastStream[0].minBitrate = kMinBitrateKbps;
+ codec_.simulcastStream[0].targetBitrate = kLegacyScreenshareTargetBitrateKbps;
+ codec_.simulcastStream[0].maxBitrate = kLegacyScreenshareMaxBitrateKbps;
+ codec_.simulcastStream[0].active = true;
+ CreateAllocator();
+
+ for (uint32_t bitrate = kMinBitrateKbps;
+ bitrate <= kLegacyScreenshareMaxBitrateKbps; ++bitrate) {
+ uint32_t expected[] = {bitrate};
+ ExpectEqual(expected, GetAllocation(bitrate));
+ }
+}
+
+TEST_F(SimulcastRateAllocatorTest, Regular3TLTemporalRateAllocation) {
+ SetupCodec3SL3TL({true, true, true});
+ CreateAllocator();
+
+ const VideoBitrateAllocation alloc = GetAllocation(kMinBitrateKbps);
+ // 40/20/40.
+ EXPECT_EQ(static_cast<uint32_t>(0.4 * kMinBitrateKbps),
+ alloc.GetBitrate(0, 0) / 1000);
+ EXPECT_EQ(static_cast<uint32_t>(0.2 * kMinBitrateKbps),
+ alloc.GetBitrate(0, 1) / 1000);
+ EXPECT_EQ(static_cast<uint32_t>(0.4 * kMinBitrateKbps),
+ alloc.GetBitrate(0, 2) / 1000);
+}
+
+TEST_F(SimulcastRateAllocatorTest, BaseHeavy3TLTemporalRateAllocation) {
+ test::ScopedFieldTrials field_trials(
+ "WebRTC-UseBaseHeavyVP8TL3RateAllocation/Enabled/");
+
+ SetupCodec3SL3TL({true, true, true});
+ CreateAllocator();
+
+ const VideoBitrateAllocation alloc = GetAllocation(kMinBitrateKbps);
+ // 60/20/20.
+ EXPECT_EQ(static_cast<uint32_t>(0.6 * kMinBitrateKbps),
+ alloc.GetBitrate(0, 0) / 1000);
+ EXPECT_EQ(static_cast<uint32_t>(0.2 * kMinBitrateKbps),
+ alloc.GetBitrate(0, 1) / 1000);
+ EXPECT_EQ(static_cast<uint32_t>(0.2 * kMinBitrateKbps),
+ alloc.GetBitrate(0, 2) / 1000);
+}
+
+TEST_F(SimulcastRateAllocatorTest, SingleSimulcastInactive) {
+ codec_.numberOfSimulcastStreams = 1;
+ codec_.simulcastStream[0].minBitrate = kMinBitrateKbps;
+ codec_.simulcastStream[0].targetBitrate = kLegacyScreenshareTargetBitrateKbps;
+ codec_.simulcastStream[0].maxBitrate = kLegacyScreenshareMaxBitrateKbps;
+ codec_.simulcastStream[0].active = false;
+ CreateAllocator();
+
+ uint32_t expected[] = {0};
+ ExpectEqual(expected, GetAllocation(kMinBitrateKbps - 10));
+ ExpectEqual(expected, GetAllocation(kLegacyScreenshareTargetBitrateKbps));
+ ExpectEqual(expected, GetAllocation(kLegacyScreenshareMaxBitrateKbps + 10));
+}
+
+TEST_F(SimulcastRateAllocatorTest, OneToThreeStreams) {
+ SetupCodec3SL3TL({true, true, true});
+ CreateAllocator();
+
+ {
+ // Single stream, min bitrate.
+ const uint32_t bitrate = codec_.simulcastStream[0].minBitrate;
+ uint32_t expected[] = {bitrate, 0, 0};
+ ExpectEqual(expected, GetAllocation(bitrate));
+ }
+
+ {
+ // Single stream at target bitrate.
+ const uint32_t bitrate = codec_.simulcastStream[0].targetBitrate;
+ uint32_t expected[] = {bitrate, 0, 0};
+ ExpectEqual(expected, GetAllocation(bitrate));
+ }
+
+ uint32_t kMinInitialRateTwoLayers =
+ codec_.simulcastStream[0].targetBitrate +
+ static_cast<uint32_t>(codec_.simulcastStream[1].minBitrate *
+ kDefaultHysteresis);
+ {
+ // Bitrate above target for first stream, but below min for the next one.
+ const uint32_t bitrate = kMinInitialRateTwoLayers - 1;
+ uint32_t expected[] = {bitrate, 0, 0};
+ ExpectEqual(expected, GetAllocation(bitrate));
+ }
+
+ {
+ // Just enough for two streams.
+ const uint32_t bitrate = kMinInitialRateTwoLayers;
+ uint32_t expected[] = {
+ codec_.simulcastStream[0].targetBitrate,
+ kMinInitialRateTwoLayers - codec_.simulcastStream[0].targetBitrate, 0};
+ ExpectEqual(expected, GetAllocation(bitrate));
+ }
+
+ {
+ // Second stream maxed out, but not enough for third.
+ const uint32_t bitrate = codec_.simulcastStream[0].targetBitrate +
+ codec_.simulcastStream[1].maxBitrate;
+ uint32_t expected[] = {codec_.simulcastStream[0].targetBitrate,
+ codec_.simulcastStream[1].maxBitrate, 0};
+ ExpectEqual(expected, GetAllocation(bitrate));
+ }
+
+ uint32_t kMinInitialRateThreeLayers =
+ codec_.simulcastStream[0].targetBitrate +
+ codec_.simulcastStream[1].targetBitrate +
+ static_cast<uint32_t>(codec_.simulcastStream[2].minBitrate *
+ kDefaultHysteresis);
+ {
+ // First two streams maxed out, but not enough for third. Nowhere to put
+ // remaining bits.
+ const uint32_t bitrate = kMinInitialRateThreeLayers - 1;
+ uint32_t expected[] = {codec_.simulcastStream[0].targetBitrate,
+ codec_.simulcastStream[1].maxBitrate, 0};
+ ExpectEqual(expected, GetAllocation(bitrate));
+ }
+
+ {
+ // Just enough for all three streams.
+ const uint32_t bitrate = kMinInitialRateThreeLayers;
+ uint32_t expected[] = {
+ codec_.simulcastStream[0].targetBitrate,
+ codec_.simulcastStream[1].targetBitrate,
+ static_cast<uint32_t>(codec_.simulcastStream[2].minBitrate *
+ kDefaultHysteresis)};
+ ExpectEqual(expected, GetAllocation(bitrate));
+ }
+
+ {
+ // Third maxed out.
+ const uint32_t bitrate = codec_.simulcastStream[0].targetBitrate +
+ codec_.simulcastStream[1].targetBitrate +
+ codec_.simulcastStream[2].maxBitrate;
+ uint32_t expected[] = {codec_.simulcastStream[0].targetBitrate,
+ codec_.simulcastStream[1].targetBitrate,
+ codec_.simulcastStream[2].maxBitrate};
+ ExpectEqual(expected, GetAllocation(bitrate));
+ }
+
+ {
+ // Enough to max out all streams which will allocate the target amount to
+ // the lower streams.
+ const uint32_t bitrate = codec_.simulcastStream[0].maxBitrate +
+ codec_.simulcastStream[1].maxBitrate +
+ codec_.simulcastStream[2].maxBitrate;
+ uint32_t expected[] = {codec_.simulcastStream[0].targetBitrate,
+ codec_.simulcastStream[1].targetBitrate,
+ codec_.simulcastStream[2].maxBitrate};
+ ExpectEqual(expected, GetAllocation(bitrate));
+ }
+}
+
+// If three simulcast streams that are all inactive, none of them should be
+// allocated bitrate.
+TEST_F(SimulcastRateAllocatorTest, ThreeStreamsInactive) {
+ SetupCodec3SL3TL({false, false, false});
+ CreateAllocator();
+
+ // Just enough to allocate the min.
+ const uint32_t min_bitrate = codec_.simulcastStream[0].minBitrate +
+ codec_.simulcastStream[1].minBitrate +
+ codec_.simulcastStream[2].minBitrate;
+ // Enough bitrate to allocate target to all streams.
+ const uint32_t target_bitrate = codec_.simulcastStream[0].targetBitrate +
+ codec_.simulcastStream[1].targetBitrate +
+ codec_.simulcastStream[2].targetBitrate;
+ // Enough bitrate to allocate max to all streams.
+ const uint32_t max_bitrate = codec_.simulcastStream[0].maxBitrate +
+ codec_.simulcastStream[1].maxBitrate +
+ codec_.simulcastStream[2].maxBitrate;
+ uint32_t expected[] = {0, 0, 0};
+ ExpectEqual(expected, GetAllocation(0));
+ ExpectEqual(expected, GetAllocation(min_bitrate));
+ ExpectEqual(expected, GetAllocation(target_bitrate));
+ ExpectEqual(expected, GetAllocation(max_bitrate));
+}
+
+// If there are two simulcast streams, we expect the high active stream to be
+// allocated as if it is a single active stream.
+TEST_F(SimulcastRateAllocatorTest, TwoStreamsLowInactive) {
+ SetupCodec2SL3TL({false, true});
+ CreateAllocator();
+
+ const uint32_t kActiveStreamMinBitrate = codec_.simulcastStream[1].minBitrate;
+ const uint32_t kActiveStreamTargetBitrate =
+ codec_.simulcastStream[1].targetBitrate;
+ const uint32_t kActiveStreamMaxBitrate = codec_.simulcastStream[1].maxBitrate;
+ {
+ // Expect that the stream is always allocated its min bitrate.
+ uint32_t expected[] = {0, kActiveStreamMinBitrate};
+ ExpectEqual(expected, GetAllocation(0));
+ ExpectEqual(expected, GetAllocation(kActiveStreamMinBitrate - 10));
+ ExpectEqual(expected, GetAllocation(kActiveStreamMinBitrate));
+ }
+
+ {
+ // The stream should be allocated its target bitrate.
+ uint32_t expected[] = {0, kActiveStreamTargetBitrate};
+ ExpectEqual(expected, GetAllocation(kActiveStreamTargetBitrate));
+ }
+
+ {
+ // The stream should be allocated its max if the target input is sufficient.
+ uint32_t expected[] = {0, kActiveStreamMaxBitrate};
+ ExpectEqual(expected, GetAllocation(kActiveStreamMaxBitrate));
+ ExpectEqual(expected, GetAllocation(std::numeric_limits<uint32_t>::max()));
+ }
+}
+
+// If there are two simulcast streams, we expect the low active stream to be
+// allocated as if it is a single active stream.
+TEST_F(SimulcastRateAllocatorTest, TwoStreamsHighInactive) {
+ SetupCodec2SL3TL({true, false});
+ CreateAllocator();
+
+ const uint32_t kActiveStreamMinBitrate = codec_.simulcastStream[0].minBitrate;
+ const uint32_t kActiveStreamTargetBitrate =
+ codec_.simulcastStream[0].targetBitrate;
+ const uint32_t kActiveStreamMaxBitrate = codec_.simulcastStream[0].maxBitrate;
+ {
+ // Expect that the stream is always allocated its min bitrate.
+ uint32_t expected[] = {kActiveStreamMinBitrate, 0};
+ ExpectEqual(expected, GetAllocation(0));
+ ExpectEqual(expected, GetAllocation(kActiveStreamMinBitrate - 10));
+ ExpectEqual(expected, GetAllocation(kActiveStreamMinBitrate));
+ }
+
+ {
+ // The stream should be allocated its target bitrate.
+ uint32_t expected[] = {kActiveStreamTargetBitrate, 0};
+ ExpectEqual(expected, GetAllocation(kActiveStreamTargetBitrate));
+ }
+
+ {
+ // The stream should be allocated its max if the target input is sufficent.
+ uint32_t expected[] = {kActiveStreamMaxBitrate, 0};
+ ExpectEqual(expected, GetAllocation(kActiveStreamMaxBitrate));
+ ExpectEqual(expected, GetAllocation(std::numeric_limits<uint32_t>::max()));
+ }
+}
+
+// If there are three simulcast streams and the middle stream is inactive, the
+// other two streams should be allocated bitrate the same as if they are two
+// active simulcast streams.
+TEST_F(SimulcastRateAllocatorTest, ThreeStreamsMiddleInactive) {
+ SetupCodec3SL3TL({true, false, true});
+ CreateAllocator();
+
+ {
+ const uint32_t kLowStreamMinBitrate = codec_.simulcastStream[0].minBitrate;
+ // The lowest stream should always be allocated its minimum bitrate.
+ uint32_t expected[] = {kLowStreamMinBitrate, 0, 0};
+ ExpectEqual(expected, GetAllocation(0));
+ ExpectEqual(expected, GetAllocation(kLowStreamMinBitrate - 10));
+ ExpectEqual(expected, GetAllocation(kLowStreamMinBitrate));
+ }
+
+ {
+ // The lowest stream gets its target bitrate.
+ uint32_t expected[] = {codec_.simulcastStream[0].targetBitrate, 0, 0};
+ ExpectEqual(expected,
+ GetAllocation(codec_.simulcastStream[0].targetBitrate));
+ }
+
+ {
+ // The lowest stream gets its max bitrate, but not enough for the high
+ // stream.
+ const uint32_t bitrate = codec_.simulcastStream[0].targetBitrate +
+ codec_.simulcastStream[2].minBitrate - 1;
+ uint32_t expected[] = {codec_.simulcastStream[0].maxBitrate, 0, 0};
+ ExpectEqual(expected, GetAllocation(bitrate));
+ }
+
+ {
+ // Both active streams get allocated target bitrate.
+ const uint32_t bitrate = codec_.simulcastStream[0].targetBitrate +
+ codec_.simulcastStream[2].targetBitrate;
+ uint32_t expected[] = {codec_.simulcastStream[0].targetBitrate, 0,
+ codec_.simulcastStream[2].targetBitrate};
+ ExpectEqual(expected, GetAllocation(bitrate));
+ }
+
+ {
+ // Lowest stream gets its target bitrate, high stream gets its max bitrate.
+ uint32_t bitrate = codec_.simulcastStream[0].targetBitrate +
+ codec_.simulcastStream[2].maxBitrate;
+ uint32_t expected[] = {codec_.simulcastStream[0].targetBitrate, 0,
+ codec_.simulcastStream[2].maxBitrate};
+ ExpectEqual(expected, GetAllocation(bitrate));
+ ExpectEqual(expected, GetAllocation(bitrate + 10));
+ ExpectEqual(expected, GetAllocation(std::numeric_limits<uint32_t>::max()));
+ }
+}
+
+TEST_F(SimulcastRateAllocatorTest, NonConferenceModeScreenshare) {
+ codec_.mode = VideoCodecMode::kScreensharing;
+ SetupCodec3SL3TL({true, true, true});
+ CreateAllocator();
+
+ // Make sure we have enough bitrate for all 3 simulcast layers
+ const uint32_t bitrate = codec_.simulcastStream[0].maxBitrate +
+ codec_.simulcastStream[1].maxBitrate +
+ codec_.simulcastStream[2].maxBitrate;
+ const VideoBitrateAllocation alloc = GetAllocation(bitrate);
+
+ EXPECT_EQ(alloc.GetTemporalLayerAllocation(0).size(), 3u);
+ EXPECT_EQ(alloc.GetTemporalLayerAllocation(1).size(), 3u);
+ EXPECT_EQ(alloc.GetTemporalLayerAllocation(2).size(), 3u);
+}
+
+TEST_F(SimulcastRateAllocatorTest, StableRate) {
+ webrtc::test::ScopedFieldTrials field_trials(
+ "WebRTC-StableTargetRate/"
+ "enabled:true,"
+ "video_hysteresis_factor:1.1/");
+
+ SetupCodec3SL3TL({true, true, true});
+ CreateAllocator();
+
+ // Let the volatile rate always be be enough for all streams, in this test we
+ // are only interested in how the stable rate affects enablement.
+ const DataRate volatile_rate =
+ (TargetRate(0) + TargetRate(1) + MinRate(2)) * 1.1;
+
+ {
+ // On the first call to a new SimulcastRateAllocator instance, hysteresis
+ // is disabled, but stable rate still caps layers.
+ uint32_t expected[] = {TargetRate(0).kbps<uint32_t>(),
+ MaxRate(1).kbps<uint32_t>()};
+ ExpectEqual(expected,
+ GetAllocation(volatile_rate, TargetRate(0) + MinRate(1)));
+ }
+
+ {
+ // Let stable rate go to a bitrate below what is needed for two streams.
+ uint32_t expected[] = {MaxRate(0).kbps<uint32_t>(), 0};
+ ExpectEqual(expected,
+ GetAllocation(volatile_rate, TargetRate(0) + MinRate(1) -
+ DataRate::BitsPerSec(1)));
+ }
+
+ {
+ // Don't enable stream as we need to get up above hysteresis threshold.
+ uint32_t expected[] = {MaxRate(0).kbps<uint32_t>(), 0};
+ ExpectEqual(expected,
+ GetAllocation(volatile_rate, TargetRate(0) + MinRate(1)));
+ }
+
+ {
+ // Above threshold with hysteresis, enable second stream.
+ uint32_t expected[] = {TargetRate(0).kbps<uint32_t>(),
+ MaxRate(1).kbps<uint32_t>()};
+ ExpectEqual(expected, GetAllocation(volatile_rate,
+ (TargetRate(0) + MinRate(1)) * 1.1));
+ }
+
+ {
+ // Enough to enable all thee layers.
+ uint32_t expected[] = {
+ TargetRate(0).kbps<uint32_t>(), TargetRate(1).kbps<uint32_t>(),
+ (volatile_rate - TargetRate(0) - TargetRate(1)).kbps<uint32_t>()};
+ ExpectEqual(expected, GetAllocation(volatile_rate, volatile_rate));
+ }
+
+ {
+ // Drop hysteresis, all three still on.
+ uint32_t expected[] = {
+ TargetRate(0).kbps<uint32_t>(), TargetRate(1).kbps<uint32_t>(),
+ (volatile_rate - TargetRate(0) - TargetRate(1)).kbps<uint32_t>()};
+ ExpectEqual(expected,
+ GetAllocation(volatile_rate,
+ TargetRate(0) + TargetRate(1) + MinRate(2)));
+ }
+}
+
+class ScreenshareRateAllocationTest : public SimulcastRateAllocatorTest {
+ public:
+ void SetupConferenceScreenshare(bool use_simulcast, bool active = true) {
+ codec_.mode = VideoCodecMode::kScreensharing;
+ codec_.minBitrate = kMinBitrateKbps;
+ codec_.maxBitrate =
+ kLegacyScreenshareMaxBitrateKbps + kSimulcastScreenshareMaxBitrateKbps;
+ if (use_simulcast) {
+ codec_.numberOfSimulcastStreams = 2;
+ codec_.simulcastStream[0].minBitrate = kMinBitrateKbps;
+ codec_.simulcastStream[0].targetBitrate =
+ kLegacyScreenshareTargetBitrateKbps;
+ codec_.simulcastStream[0].maxBitrate = kLegacyScreenshareMaxBitrateKbps;
+ codec_.simulcastStream[0].numberOfTemporalLayers = 2;
+ codec_.simulcastStream[0].active = active;
+
+ codec_.simulcastStream[1].minBitrate =
+ kSimulcastScreenshareMinBitrateKbps;
+ codec_.simulcastStream[1].targetBitrate =
+ kSimulcastScreenshareMaxBitrateKbps;
+ codec_.simulcastStream[1].maxBitrate =
+ kSimulcastScreenshareMaxBitrateKbps;
+ codec_.simulcastStream[1].numberOfTemporalLayers = 2;
+ codec_.simulcastStream[1].active = active;
+ } else {
+ codec_.numberOfSimulcastStreams = 0;
+ codec_.VP8()->numberOfTemporalLayers = 2;
+ codec_.active = active;
+ }
+ }
+};
+
+INSTANTIATE_TEST_SUITE_P(ScreenshareTest,
+ ScreenshareRateAllocationTest,
+ ::testing::Bool());
+
+TEST_P(ScreenshareRateAllocationTest, ConferenceBitrateBelowTl0) {
+ SetupConferenceScreenshare(GetParam());
+ CreateAllocator(true);
+
+ VideoBitrateAllocation allocation =
+ allocator_->Allocate(VideoBitrateAllocationParameters(
+ kLegacyScreenshareTargetBitrateKbps * 1000, kFramerateFps));
+
+ // All allocation should go in TL0.
+ EXPECT_EQ(kLegacyScreenshareTargetBitrateKbps, allocation.get_sum_kbps());
+ EXPECT_EQ(kLegacyScreenshareTargetBitrateKbps,
+ allocation.GetBitrate(0, 0) / 1000);
+ EXPECT_EQ(allocation.is_bw_limited(), GetParam());
+}
+
+TEST_P(ScreenshareRateAllocationTest, ConferenceBitrateAboveTl0) {
+ SetupConferenceScreenshare(GetParam());
+ CreateAllocator(true);
+
+ uint32_t target_bitrate_kbps =
+ (kLegacyScreenshareTargetBitrateKbps + kLegacyScreenshareMaxBitrateKbps) /
+ 2;
+ VideoBitrateAllocation allocation =
+ allocator_->Allocate(VideoBitrateAllocationParameters(
+ target_bitrate_kbps * 1000, kFramerateFps));
+
+ // Fill TL0, then put the rest in TL1.
+ EXPECT_EQ(target_bitrate_kbps, allocation.get_sum_kbps());
+ EXPECT_EQ(kLegacyScreenshareTargetBitrateKbps,
+ allocation.GetBitrate(0, 0) / 1000);
+ EXPECT_EQ(target_bitrate_kbps - kLegacyScreenshareTargetBitrateKbps,
+ allocation.GetBitrate(0, 1) / 1000);
+ EXPECT_EQ(allocation.is_bw_limited(), GetParam());
+}
+
+TEST_F(ScreenshareRateAllocationTest, ConferenceBitrateAboveTl1) {
+ // This test is only for the non-simulcast case.
+ SetupConferenceScreenshare(false);
+ CreateAllocator(true);
+
+ VideoBitrateAllocation allocation =
+ allocator_->Allocate(VideoBitrateAllocationParameters(
+ kLegacyScreenshareMaxBitrateKbps * 2000, kFramerateFps));
+
+ // Fill both TL0 and TL1, but no more.
+ EXPECT_EQ(kLegacyScreenshareMaxBitrateKbps, allocation.get_sum_kbps());
+ EXPECT_EQ(kLegacyScreenshareTargetBitrateKbps,
+ allocation.GetBitrate(0, 0) / 1000);
+ EXPECT_EQ(
+ kLegacyScreenshareMaxBitrateKbps - kLegacyScreenshareTargetBitrateKbps,
+ allocation.GetBitrate(0, 1) / 1000);
+ EXPECT_FALSE(allocation.is_bw_limited());
+}
+
+// This tests when the screenshare is inactive it should be allocated 0 bitrate
+// for all layers.
+TEST_P(ScreenshareRateAllocationTest, InactiveScreenshare) {
+ SetupConferenceScreenshare(GetParam(), false);
+ CreateAllocator();
+
+ // Enough bitrate for TL0 and TL1.
+ uint32_t target_bitrate_kbps =
+ (kLegacyScreenshareTargetBitrateKbps + kLegacyScreenshareMaxBitrateKbps) /
+ 2;
+ VideoBitrateAllocation allocation =
+ allocator_->Allocate(VideoBitrateAllocationParameters(
+ target_bitrate_kbps * 1000, kFramerateFps));
+
+ EXPECT_EQ(0U, allocation.get_sum_kbps());
+}
+
+TEST_F(ScreenshareRateAllocationTest, Hysteresis) {
+ // This test is only for the simulcast case.
+ SetupConferenceScreenshare(true);
+ CreateAllocator();
+
+ // The bitrate at which we would normally enable the upper simulcast stream.
+ const uint32_t default_enable_rate_bps =
+ codec_.simulcastStream[0].targetBitrate +
+ codec_.simulcastStream[1].minBitrate;
+ const uint32_t enable_rate_with_hysteresis_bps =
+ (default_enable_rate_bps * 135) / 100;
+
+ {
+ // On the first call to a new SimulcastRateAllocator instance, hysteresis
+ // is disabled.
+ const uint32_t bitrate = default_enable_rate_bps;
+ uint32_t expected[] = {codec_.simulcastStream[0].targetBitrate,
+ codec_.simulcastStream[1].minBitrate};
+ ExpectEqual(expected, GetAllocation(bitrate));
+ }
+
+ {
+ // Go down to a bitrate below what is needed for two streams.
+ const uint32_t bitrate = default_enable_rate_bps - 1;
+ uint32_t expected[] = {bitrate, 0};
+ ExpectEqual(expected, GetAllocation(bitrate));
+ }
+
+ {
+ // Don't enable stream as we need to get up above hysteresis threshold.
+ const uint32_t bitrate = default_enable_rate_bps;
+ uint32_t expected[] = {bitrate, 0};
+ ExpectEqual(expected, GetAllocation(bitrate));
+ }
+
+ {
+ // Above threshold, enable second stream.
+ const uint32_t bitrate = enable_rate_with_hysteresis_bps;
+ uint32_t expected[] = {codec_.simulcastStream[0].targetBitrate,
+ enable_rate_with_hysteresis_bps -
+ codec_.simulcastStream[0].targetBitrate};
+ ExpectEqual(expected, GetAllocation(bitrate));
+ }
+
+ {
+ // Go down again, still keep the second stream alive.
+ const uint32_t bitrate = default_enable_rate_bps;
+ uint32_t expected[] = {codec_.simulcastStream[0].targetBitrate,
+ codec_.simulcastStream[1].minBitrate};
+ ExpectEqual(expected, GetAllocation(bitrate));
+ }
+
+ {
+ // Go down below default enable, second stream is shut down again.
+ const uint32_t bitrate = default_enable_rate_bps - 1;
+ uint32_t expected[] = {bitrate, 0};
+ ExpectEqual(expected, GetAllocation(bitrate));
+ }
+
+ {
+ // Go up, hysteresis is blocking us again.
+ const uint32_t bitrate = default_enable_rate_bps;
+ uint32_t expected[] = {bitrate, 0};
+ ExpectEqual(expected, GetAllocation(bitrate));
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/utility/simulcast_test_fixture_impl.cc b/third_party/libwebrtc/modules/video_coding/utility/simulcast_test_fixture_impl.cc
new file mode 100644
index 0000000000..35224b17ed
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/utility/simulcast_test_fixture_impl.cc
@@ -0,0 +1,967 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/utility/simulcast_test_fixture_impl.h"
+
+#include <algorithm>
+#include <map>
+#include <memory>
+#include <vector>
+
+#include "api/video/encoded_image.h"
+#include "api/video_codecs/sdp_video_format.h"
+#include "api/video_codecs/video_encoder.h"
+#include "common_video/libyuv/include/webrtc_libyuv.h"
+#include "modules/video_coding/include/video_codec_interface.h"
+#include "modules/video_coding/include/video_coding_defines.h"
+#include "rtc_base/checks.h"
+#include "test/gtest.h"
+
+using ::testing::_;
+using ::testing::AllOf;
+using ::testing::Field;
+using ::testing::Return;
+
+namespace webrtc {
+namespace test {
+
+namespace {
+
+const int kDefaultWidth = 1280;
+const int kDefaultHeight = 720;
+const int kNumberOfSimulcastStreams = 3;
+const int kColorY = 66;
+const int kColorU = 22;
+const int kColorV = 33;
+const int kMaxBitrates[kNumberOfSimulcastStreams] = {150, 600, 1200};
+const int kMinBitrates[kNumberOfSimulcastStreams] = {50, 150, 600};
+const int kTargetBitrates[kNumberOfSimulcastStreams] = {100, 450, 1000};
+const float kMaxFramerates[kNumberOfSimulcastStreams] = {30, 30, 30};
+const int kScaleResolutionDownBy[kNumberOfSimulcastStreams] = {4, 2, 1};
+const int kDefaultTemporalLayerProfile[3] = {3, 3, 3};
+const int kNoTemporalLayerProfile[3] = {0, 0, 0};
+
+const VideoEncoder::Capabilities kCapabilities(false);
+const VideoEncoder::Settings kSettings(kCapabilities, 1, 1200);
+
+template <typename T>
+void SetExpectedValues3(T value0, T value1, T value2, T* expected_values) {
+ expected_values[0] = value0;
+ expected_values[1] = value1;
+ expected_values[2] = value2;
+}
+
+enum PlaneType {
+ kYPlane = 0,
+ kUPlane = 1,
+ kVPlane = 2,
+ kNumOfPlanes = 3,
+};
+
+} // namespace
+
+class SimulcastTestFixtureImpl::TestEncodedImageCallback
+ : public EncodedImageCallback {
+ public:
+ TestEncodedImageCallback() {
+ memset(temporal_layer_, -1, sizeof(temporal_layer_));
+ memset(layer_sync_, false, sizeof(layer_sync_));
+ }
+
+ Result OnEncodedImage(const EncodedImage& encoded_image,
+ const CodecSpecificInfo* codec_specific_info) override {
+ bool is_vp8 = (codec_specific_info->codecType == kVideoCodecVP8);
+ bool is_h264 = (codec_specific_info->codecType == kVideoCodecH264);
+ // Only store the base layer.
+ if (encoded_image.SpatialIndex().value_or(0) == 0) {
+ if (encoded_image._frameType == VideoFrameType::kVideoFrameKey) {
+ encoded_key_frame_.SetEncodedData(EncodedImageBuffer::Create(
+ encoded_image.data(), encoded_image.size()));
+ encoded_key_frame_._frameType = VideoFrameType::kVideoFrameKey;
+ } else {
+ encoded_frame_.SetEncodedData(EncodedImageBuffer::Create(
+ encoded_image.data(), encoded_image.size()));
+ }
+ }
+ if (is_vp8) {
+ layer_sync_[encoded_image.SpatialIndex().value_or(0)] =
+ codec_specific_info->codecSpecific.VP8.layerSync;
+ temporal_layer_[encoded_image.SpatialIndex().value_or(0)] =
+ codec_specific_info->codecSpecific.VP8.temporalIdx;
+ } else if (is_h264) {
+ layer_sync_[encoded_image.SpatialIndex().value_or(0)] =
+ codec_specific_info->codecSpecific.H264.base_layer_sync;
+ temporal_layer_[encoded_image.SpatialIndex().value_or(0)] =
+ codec_specific_info->codecSpecific.H264.temporal_idx;
+ }
+ return Result(Result::OK, encoded_image.Timestamp());
+ }
+ // This method only makes sense for VP8.
+ void GetLastEncodedFrameInfo(int* temporal_layer,
+ bool* layer_sync,
+ int stream) {
+ *temporal_layer = temporal_layer_[stream];
+ *layer_sync = layer_sync_[stream];
+ }
+ void GetLastEncodedKeyFrame(EncodedImage* encoded_key_frame) {
+ *encoded_key_frame = encoded_key_frame_;
+ }
+ void GetLastEncodedFrame(EncodedImage* encoded_frame) {
+ *encoded_frame = encoded_frame_;
+ }
+
+ private:
+ EncodedImage encoded_key_frame_;
+ EncodedImage encoded_frame_;
+ int temporal_layer_[kNumberOfSimulcastStreams];
+ bool layer_sync_[kNumberOfSimulcastStreams];
+};
+
+class SimulcastTestFixtureImpl::TestDecodedImageCallback
+ : public DecodedImageCallback {
+ public:
+ TestDecodedImageCallback() : decoded_frames_(0) {}
+ int32_t Decoded(VideoFrame& decoded_image) override {
+ rtc::scoped_refptr<I420BufferInterface> i420_buffer =
+ decoded_image.video_frame_buffer()->ToI420();
+ for (int i = 0; i < decoded_image.width(); ++i) {
+ EXPECT_NEAR(kColorY, i420_buffer->DataY()[i], 1);
+ }
+
+ // TODO(mikhal): Verify the difference between U,V and the original.
+ for (int i = 0; i < i420_buffer->ChromaWidth(); ++i) {
+ EXPECT_NEAR(kColorU, i420_buffer->DataU()[i], 4);
+ EXPECT_NEAR(kColorV, i420_buffer->DataV()[i], 4);
+ }
+ decoded_frames_++;
+ return 0;
+ }
+ int32_t Decoded(VideoFrame& decoded_image, int64_t decode_time_ms) override {
+ RTC_DCHECK_NOTREACHED();
+ return -1;
+ }
+ void Decoded(VideoFrame& decoded_image,
+ absl::optional<int32_t> decode_time_ms,
+ absl::optional<uint8_t> qp) override {
+ Decoded(decoded_image);
+ }
+ int DecodedFrames() { return decoded_frames_; }
+
+ private:
+ int decoded_frames_;
+};
+
+namespace {
+
+void SetPlane(uint8_t* data, uint8_t value, int width, int height, int stride) {
+ for (int i = 0; i < height; i++, data += stride) {
+ // Setting allocated area to zero - setting only image size to
+ // requested values - will make it easier to distinguish between image
+ // size and frame size (accounting for stride).
+ memset(data, value, width);
+ memset(data + width, 0, stride - width);
+ }
+}
+
+// Fills in an I420Buffer from `plane_colors`.
+void CreateImage(const rtc::scoped_refptr<I420Buffer>& buffer,
+ int plane_colors[kNumOfPlanes]) {
+ SetPlane(buffer->MutableDataY(), plane_colors[0], buffer->width(),
+ buffer->height(), buffer->StrideY());
+
+ SetPlane(buffer->MutableDataU(), plane_colors[1], buffer->ChromaWidth(),
+ buffer->ChromaHeight(), buffer->StrideU());
+
+ SetPlane(buffer->MutableDataV(), plane_colors[2], buffer->ChromaWidth(),
+ buffer->ChromaHeight(), buffer->StrideV());
+}
+
+void ConfigureStream(int width,
+ int height,
+ int max_bitrate,
+ int min_bitrate,
+ int target_bitrate,
+ float max_framerate,
+ SimulcastStream* stream,
+ int num_temporal_layers) {
+ RTC_DCHECK(stream);
+ stream->width = width;
+ stream->height = height;
+ stream->maxBitrate = max_bitrate;
+ stream->minBitrate = min_bitrate;
+ stream->targetBitrate = target_bitrate;
+ stream->maxFramerate = max_framerate;
+ if (num_temporal_layers >= 0) {
+ stream->numberOfTemporalLayers = num_temporal_layers;
+ }
+ stream->qpMax = 45;
+ stream->active = true;
+}
+
+} // namespace
+
+void SimulcastTestFixtureImpl::DefaultSettings(
+ VideoCodec* settings,
+ const int* temporal_layer_profile,
+ VideoCodecType codec_type,
+ bool reverse_layer_order) {
+ RTC_CHECK(settings);
+ *settings = {};
+ settings->codecType = codec_type;
+ settings->startBitrate = 300;
+ settings->minBitrate = 30;
+ settings->maxBitrate = 0;
+ settings->maxFramerate = 30;
+ settings->width = kDefaultWidth;
+ settings->height = kDefaultHeight;
+ settings->numberOfSimulcastStreams = kNumberOfSimulcastStreams;
+ settings->active = true;
+ ASSERT_EQ(3, kNumberOfSimulcastStreams);
+ int layer_order[3] = {0, 1, 2};
+ if (reverse_layer_order) {
+ layer_order[0] = 2;
+ layer_order[2] = 0;
+ }
+ settings->timing_frame_thresholds = {kDefaultTimingFramesDelayMs,
+ kDefaultOutlierFrameSizePercent};
+ ConfigureStream(kDefaultWidth / 4, kDefaultHeight / 4, kMaxBitrates[0],
+ kMinBitrates[0], kTargetBitrates[0], kMaxFramerates[0],
+ &settings->simulcastStream[layer_order[0]],
+ temporal_layer_profile[0]);
+ ConfigureStream(kDefaultWidth / 2, kDefaultHeight / 2, kMaxBitrates[1],
+ kMinBitrates[1], kTargetBitrates[1], kMaxFramerates[1],
+ &settings->simulcastStream[layer_order[1]],
+ temporal_layer_profile[1]);
+ ConfigureStream(kDefaultWidth, kDefaultHeight, kMaxBitrates[2],
+ kMinBitrates[2], kTargetBitrates[2], kMaxFramerates[2],
+ &settings->simulcastStream[layer_order[2]],
+ temporal_layer_profile[2]);
+ settings->SetFrameDropEnabled(true);
+ if (codec_type == kVideoCodecVP8) {
+ settings->VP8()->denoisingOn = true;
+ settings->VP8()->automaticResizeOn = false;
+ settings->VP8()->keyFrameInterval = 3000;
+ } else {
+ settings->H264()->keyFrameInterval = 3000;
+ }
+}
+
+SimulcastTestFixtureImpl::SimulcastTestFixtureImpl(
+ std::unique_ptr<VideoEncoderFactory> encoder_factory,
+ std::unique_ptr<VideoDecoderFactory> decoder_factory,
+ SdpVideoFormat video_format)
+ : codec_type_(PayloadStringToCodecType(video_format.name)) {
+ encoder_ = encoder_factory->CreateVideoEncoder(video_format);
+ decoder_ = decoder_factory->CreateVideoDecoder(video_format);
+ SetUpCodec((codec_type_ == kVideoCodecVP8 || codec_type_ == kVideoCodecH264)
+ ? kDefaultTemporalLayerProfile
+ : kNoTemporalLayerProfile);
+}
+
+SimulcastTestFixtureImpl::~SimulcastTestFixtureImpl() {
+ encoder_->Release();
+ decoder_->Release();
+}
+
+void SimulcastTestFixtureImpl::SetUpCodec(const int* temporal_layer_profile) {
+ encoder_->RegisterEncodeCompleteCallback(&encoder_callback_);
+ decoder_->RegisterDecodeCompleteCallback(&decoder_callback_);
+ DefaultSettings(&settings_, temporal_layer_profile, codec_type_);
+ SetUpRateAllocator();
+ EXPECT_EQ(0, encoder_->InitEncode(&settings_, kSettings));
+ VideoDecoder::Settings decoder_settings;
+ decoder_settings.set_max_render_resolution({kDefaultWidth, kDefaultHeight});
+ decoder_settings.set_codec_type(codec_type_);
+ EXPECT_TRUE(decoder_->Configure(decoder_settings));
+ input_buffer_ = I420Buffer::Create(kDefaultWidth, kDefaultHeight);
+ input_buffer_->InitializeData();
+ input_frame_ = std::make_unique<webrtc::VideoFrame>(
+ webrtc::VideoFrame::Builder()
+ .set_video_frame_buffer(input_buffer_)
+ .set_rotation(webrtc::kVideoRotation_0)
+ .set_timestamp_us(0)
+ .build());
+}
+
+void SimulcastTestFixtureImpl::SetUpRateAllocator() {
+ rate_allocator_.reset(new SimulcastRateAllocator(settings_));
+}
+
+void SimulcastTestFixtureImpl::SetRates(uint32_t bitrate_kbps, uint32_t fps) {
+ encoder_->SetRates(VideoEncoder::RateControlParameters(
+ rate_allocator_->Allocate(
+ VideoBitrateAllocationParameters(bitrate_kbps * 1000, fps)),
+ static_cast<double>(fps)));
+}
+
+void SimulcastTestFixtureImpl::RunActiveStreamsTest(
+ const std::vector<bool> active_streams) {
+ std::vector<VideoFrameType> frame_types(kNumberOfSimulcastStreams,
+ VideoFrameType::kVideoFrameDelta);
+ UpdateActiveStreams(active_streams);
+ // Set sufficient bitrate for all streams so we can test active without
+ // bitrate being an issue.
+ SetRates(kMaxBitrates[0] + kMaxBitrates[1] + kMaxBitrates[2], 30);
+
+ ExpectStreams(VideoFrameType::kVideoFrameKey, active_streams);
+ input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
+
+ ExpectStreams(VideoFrameType::kVideoFrameDelta, active_streams);
+ input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
+}
+
+void SimulcastTestFixtureImpl::UpdateActiveStreams(
+ const std::vector<bool> active_streams) {
+ ASSERT_EQ(static_cast<int>(active_streams.size()), kNumberOfSimulcastStreams);
+ for (size_t i = 0; i < active_streams.size(); ++i) {
+ settings_.simulcastStream[i].active = active_streams[i];
+ }
+ // Re initialize the allocator and encoder with the new settings.
+ // TODO(bugs.webrtc.org/8807): Currently, we do a full "hard"
+ // reconfiguration of the allocator and encoder. When the video bitrate
+ // allocator has support for updating active streams without a
+ // reinitialization, we can just call that here instead.
+ SetUpRateAllocator();
+ EXPECT_EQ(0, encoder_->InitEncode(&settings_, kSettings));
+}
+
+void SimulcastTestFixtureImpl::ExpectStream(VideoFrameType frame_type,
+ int scaleResolutionDownBy) {
+ EXPECT_CALL(
+ encoder_callback_,
+ OnEncodedImage(AllOf(Field(&EncodedImage::_frameType, frame_type),
+ Field(&EncodedImage::_encodedWidth,
+ kDefaultWidth / scaleResolutionDownBy),
+ Field(&EncodedImage::_encodedHeight,
+ kDefaultHeight / scaleResolutionDownBy)),
+ _))
+ .Times(1)
+ .WillRepeatedly(Return(
+ EncodedImageCallback::Result(EncodedImageCallback::Result::OK, 0)));
+}
+
+void SimulcastTestFixtureImpl::ExpectStreams(
+ VideoFrameType frame_type,
+ const std::vector<bool> expected_streams_active) {
+ ASSERT_EQ(static_cast<int>(expected_streams_active.size()),
+ kNumberOfSimulcastStreams);
+ for (size_t i = 0; i < kNumberOfSimulcastStreams; i++) {
+ if (expected_streams_active[i]) {
+ ExpectStream(frame_type, kScaleResolutionDownBy[i]);
+ }
+ }
+}
+
+void SimulcastTestFixtureImpl::ExpectStreams(VideoFrameType frame_type,
+ int expected_video_streams) {
+ ASSERT_GE(expected_video_streams, 0);
+ ASSERT_LE(expected_video_streams, kNumberOfSimulcastStreams);
+ std::vector<bool> expected_streams_active(kNumberOfSimulcastStreams, false);
+ for (int i = 0; i < expected_video_streams; ++i) {
+ expected_streams_active[i] = true;
+ }
+ ExpectStreams(frame_type, expected_streams_active);
+}
+
+void SimulcastTestFixtureImpl::VerifyTemporalIdxAndSyncForAllSpatialLayers(
+ TestEncodedImageCallback* encoder_callback,
+ const int* expected_temporal_idx,
+ const bool* expected_layer_sync,
+ int num_spatial_layers) {
+ int temporal_layer = -1;
+ bool layer_sync = false;
+ for (int i = 0; i < num_spatial_layers; i++) {
+ encoder_callback->GetLastEncodedFrameInfo(&temporal_layer, &layer_sync, i);
+ EXPECT_EQ(expected_temporal_idx[i], temporal_layer);
+ EXPECT_EQ(expected_layer_sync[i], layer_sync);
+ }
+}
+
+// For some codecs (VP8) expect all active streams to generate a key frame even
+// though a key frame was only requested for some of them.
+void SimulcastTestFixtureImpl::TestKeyFrameRequestsOnAllStreams() {
+ SetRates(kMaxBitrates[2], 30); // To get all three streams.
+ std::vector<VideoFrameType> frame_types(kNumberOfSimulcastStreams,
+ VideoFrameType::kVideoFrameDelta);
+ ExpectStreams(VideoFrameType::kVideoFrameKey, kNumberOfSimulcastStreams);
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
+
+ ExpectStreams(VideoFrameType::kVideoFrameDelta, kNumberOfSimulcastStreams);
+ input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
+
+ frame_types[0] = VideoFrameType::kVideoFrameKey;
+ ExpectStreams(VideoFrameType::kVideoFrameKey, kNumberOfSimulcastStreams);
+ input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
+
+ std::fill(frame_types.begin(), frame_types.end(),
+ VideoFrameType::kVideoFrameDelta);
+ frame_types[1] = VideoFrameType::kVideoFrameKey;
+ ExpectStreams(VideoFrameType::kVideoFrameKey, kNumberOfSimulcastStreams);
+ input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
+
+ std::fill(frame_types.begin(), frame_types.end(),
+ VideoFrameType::kVideoFrameDelta);
+ frame_types[2] = VideoFrameType::kVideoFrameKey;
+ ExpectStreams(VideoFrameType::kVideoFrameKey, kNumberOfSimulcastStreams);
+ input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
+
+ std::fill(frame_types.begin(), frame_types.end(),
+ VideoFrameType::kVideoFrameDelta);
+ ExpectStreams(VideoFrameType::kVideoFrameDelta, kNumberOfSimulcastStreams);
+ input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
+}
+
+// For some codecs (H264) expect only particular active streams to generate a
+// key frame when a key frame was only requested for some of them.
+void SimulcastTestFixtureImpl::TestKeyFrameRequestsOnSpecificStreams() {
+ SetRates(kMaxBitrates[2], 30); // To get all three streams.
+ std::vector<VideoFrameType> frame_types(kNumberOfSimulcastStreams,
+ VideoFrameType::kVideoFrameDelta);
+ ExpectStreams(VideoFrameType::kVideoFrameKey, kNumberOfSimulcastStreams);
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
+
+ ExpectStreams(VideoFrameType::kVideoFrameDelta, kNumberOfSimulcastStreams);
+ input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
+
+ frame_types[0] = VideoFrameType::kVideoFrameKey;
+ ExpectStream(VideoFrameType::kVideoFrameKey, kScaleResolutionDownBy[0]);
+ ExpectStream(VideoFrameType::kVideoFrameDelta, kScaleResolutionDownBy[1]);
+ ExpectStream(VideoFrameType::kVideoFrameDelta, kScaleResolutionDownBy[2]);
+ input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
+
+ std::fill(frame_types.begin(), frame_types.end(),
+ VideoFrameType::kVideoFrameDelta);
+ frame_types[1] = VideoFrameType::kVideoFrameKey;
+ ExpectStream(VideoFrameType::kVideoFrameDelta, kScaleResolutionDownBy[0]);
+ ExpectStream(VideoFrameType::kVideoFrameKey, kScaleResolutionDownBy[1]);
+ ExpectStream(VideoFrameType::kVideoFrameDelta, kScaleResolutionDownBy[2]);
+ input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
+
+ std::fill(frame_types.begin(), frame_types.end(),
+ VideoFrameType::kVideoFrameDelta);
+ frame_types[2] = VideoFrameType::kVideoFrameKey;
+ ExpectStream(VideoFrameType::kVideoFrameDelta, kScaleResolutionDownBy[0]);
+ ExpectStream(VideoFrameType::kVideoFrameDelta, kScaleResolutionDownBy[1]);
+ ExpectStream(VideoFrameType::kVideoFrameKey, kScaleResolutionDownBy[2]);
+ input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
+
+ std::fill(frame_types.begin(), frame_types.end(),
+ VideoFrameType::kVideoFrameDelta);
+ frame_types[0] = VideoFrameType::kVideoFrameKey;
+ frame_types[2] = VideoFrameType::kVideoFrameKey;
+ ExpectStream(VideoFrameType::kVideoFrameKey, kScaleResolutionDownBy[0]);
+ ExpectStream(VideoFrameType::kVideoFrameDelta, kScaleResolutionDownBy[1]);
+ ExpectStream(VideoFrameType::kVideoFrameKey, kScaleResolutionDownBy[2]);
+ input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
+
+ std::fill(frame_types.begin(), frame_types.end(),
+ VideoFrameType::kVideoFrameKey);
+ ExpectStream(VideoFrameType::kVideoFrameKey, kScaleResolutionDownBy[0]);
+ ExpectStream(VideoFrameType::kVideoFrameKey, kScaleResolutionDownBy[1]);
+ ExpectStream(VideoFrameType::kVideoFrameKey, kScaleResolutionDownBy[2]);
+ input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
+
+ std::fill(frame_types.begin(), frame_types.end(),
+ VideoFrameType::kVideoFrameDelta);
+ ExpectStreams(VideoFrameType::kVideoFrameDelta, kNumberOfSimulcastStreams);
+ input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
+}
+
+void SimulcastTestFixtureImpl::TestPaddingAllStreams() {
+ // We should always encode the base layer.
+ SetRates(kMinBitrates[0] - 1, 30);
+ std::vector<VideoFrameType> frame_types(kNumberOfSimulcastStreams,
+ VideoFrameType::kVideoFrameDelta);
+ ExpectStreams(VideoFrameType::kVideoFrameKey, 1);
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
+
+ ExpectStreams(VideoFrameType::kVideoFrameDelta, 1);
+ input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
+}
+
+void SimulcastTestFixtureImpl::TestPaddingTwoStreams() {
+ // We have just enough to get only the first stream and padding for two.
+ SetRates(kMinBitrates[0], 30);
+ std::vector<VideoFrameType> frame_types(kNumberOfSimulcastStreams,
+ VideoFrameType::kVideoFrameDelta);
+ ExpectStreams(VideoFrameType::kVideoFrameKey, 1);
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
+
+ ExpectStreams(VideoFrameType::kVideoFrameDelta, 1);
+ input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
+}
+
+void SimulcastTestFixtureImpl::TestPaddingTwoStreamsOneMaxedOut() {
+ // We are just below limit of sending second stream, so we should get
+ // the first stream maxed out (at `maxBitrate`), and padding for two.
+ SetRates(kTargetBitrates[0] + kMinBitrates[1] - 1, 30);
+ std::vector<VideoFrameType> frame_types(kNumberOfSimulcastStreams,
+ VideoFrameType::kVideoFrameDelta);
+ ExpectStreams(VideoFrameType::kVideoFrameKey, 1);
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
+
+ ExpectStreams(VideoFrameType::kVideoFrameDelta, 1);
+ input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
+}
+
+void SimulcastTestFixtureImpl::TestPaddingOneStream() {
+ // We have just enough to send two streams, so padding for one stream.
+ SetRates(kTargetBitrates[0] + kMinBitrates[1], 30);
+ std::vector<VideoFrameType> frame_types(kNumberOfSimulcastStreams,
+ VideoFrameType::kVideoFrameDelta);
+ ExpectStreams(VideoFrameType::kVideoFrameKey, 2);
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
+
+ ExpectStreams(VideoFrameType::kVideoFrameDelta, 2);
+ input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
+}
+
+void SimulcastTestFixtureImpl::TestPaddingOneStreamTwoMaxedOut() {
+ // We are just below limit of sending third stream, so we should get
+ // first stream's rate maxed out at `targetBitrate`, second at `maxBitrate`.
+ SetRates(kTargetBitrates[0] + kTargetBitrates[1] + kMinBitrates[2] - 1, 30);
+ std::vector<VideoFrameType> frame_types(kNumberOfSimulcastStreams,
+ VideoFrameType::kVideoFrameDelta);
+ ExpectStreams(VideoFrameType::kVideoFrameKey, 2);
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
+
+ ExpectStreams(VideoFrameType::kVideoFrameDelta, 2);
+ input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
+}
+
+void SimulcastTestFixtureImpl::TestSendAllStreams() {
+ // We have just enough to send all streams.
+ SetRates(kTargetBitrates[0] + kTargetBitrates[1] + kMinBitrates[2], 30);
+ std::vector<VideoFrameType> frame_types(kNumberOfSimulcastStreams,
+ VideoFrameType::kVideoFrameDelta);
+ ExpectStreams(VideoFrameType::kVideoFrameKey, 3);
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
+
+ ExpectStreams(VideoFrameType::kVideoFrameDelta, 3);
+ input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
+}
+
+void SimulcastTestFixtureImpl::TestDisablingStreams() {
+ // We should get three media streams.
+ SetRates(kMaxBitrates[0] + kMaxBitrates[1] + kMaxBitrates[2], 30);
+ std::vector<VideoFrameType> frame_types(kNumberOfSimulcastStreams,
+ VideoFrameType::kVideoFrameDelta);
+ ExpectStreams(VideoFrameType::kVideoFrameKey, 3);
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
+
+ ExpectStreams(VideoFrameType::kVideoFrameDelta, 3);
+ input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
+
+ // We should only get two streams and padding for one.
+ SetRates(kTargetBitrates[0] + kTargetBitrates[1] + kMinBitrates[2] / 2, 30);
+ ExpectStreams(VideoFrameType::kVideoFrameDelta, 2);
+ input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
+
+ // We should only get the first stream and padding for two.
+ SetRates(kTargetBitrates[0] + kMinBitrates[1] / 2, 30);
+ ExpectStreams(VideoFrameType::kVideoFrameDelta, 1);
+ input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
+
+ // We don't have enough bitrate for the thumbnail stream, but we should get
+ // it anyway with current configuration.
+ SetRates(kTargetBitrates[0] - 1, 30);
+ ExpectStreams(VideoFrameType::kVideoFrameDelta, 1);
+ input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
+
+ // We should only get two streams and padding for one.
+ SetRates(kTargetBitrates[0] + kTargetBitrates[1] + kMinBitrates[2] / 2, 30);
+ // We get a key frame because a new stream is being enabled.
+ ExpectStreams(VideoFrameType::kVideoFrameKey, 2);
+ input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
+
+ // We should get all three streams.
+ SetRates(kTargetBitrates[0] + kTargetBitrates[1] + kTargetBitrates[2], 30);
+ // We get a key frame because a new stream is being enabled.
+ ExpectStreams(VideoFrameType::kVideoFrameKey, 3);
+ input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
+}
+
+void SimulcastTestFixtureImpl::TestActiveStreams() {
+ // All streams on.
+ RunActiveStreamsTest({true, true, true});
+ // All streams off.
+ RunActiveStreamsTest({false, false, false});
+ // Low stream off.
+ RunActiveStreamsTest({false, true, true});
+ // Middle stream off.
+ RunActiveStreamsTest({true, false, true});
+ // High stream off.
+ RunActiveStreamsTest({true, true, false});
+ // Only low stream turned on.
+ RunActiveStreamsTest({true, false, false});
+ // Only middle stream turned on.
+ RunActiveStreamsTest({false, true, false});
+ // Only high stream turned on.
+ RunActiveStreamsTest({false, false, true});
+}
+
+void SimulcastTestFixtureImpl::SwitchingToOneStream(int width, int height) {
+ const int* temporal_layer_profile = nullptr;
+ // Disable all streams except the last and set the bitrate of the last to
+ // 100 kbps. This verifies the way GTP switches to screenshare mode.
+ if (codec_type_ == kVideoCodecVP8) {
+ settings_.VP8()->numberOfTemporalLayers = 1;
+ temporal_layer_profile = kDefaultTemporalLayerProfile;
+ } else {
+ settings_.H264()->numberOfTemporalLayers = 1;
+ temporal_layer_profile = kNoTemporalLayerProfile;
+ }
+ settings_.maxBitrate = 100;
+ settings_.startBitrate = 100;
+ settings_.width = width;
+ settings_.height = height;
+ for (int i = 0; i < settings_.numberOfSimulcastStreams - 1; ++i) {
+ settings_.simulcastStream[i].maxBitrate = 0;
+ settings_.simulcastStream[i].width = settings_.width;
+ settings_.simulcastStream[i].height = settings_.height;
+ settings_.simulcastStream[i].numberOfTemporalLayers = 1;
+ }
+ // Setting input image to new resolution.
+ input_buffer_ = I420Buffer::Create(settings_.width, settings_.height);
+ input_buffer_->InitializeData();
+
+ input_frame_ = std::make_unique<webrtc::VideoFrame>(
+ webrtc::VideoFrame::Builder()
+ .set_video_frame_buffer(input_buffer_)
+ .set_rotation(webrtc::kVideoRotation_0)
+ .set_timestamp_us(0)
+ .build());
+
+ // The for loop above did not set the bitrate of the highest layer.
+ settings_.simulcastStream[settings_.numberOfSimulcastStreams - 1].maxBitrate =
+ 0;
+ // The highest layer has to correspond to the non-simulcast resolution.
+ settings_.simulcastStream[settings_.numberOfSimulcastStreams - 1].width =
+ settings_.width;
+ settings_.simulcastStream[settings_.numberOfSimulcastStreams - 1].height =
+ settings_.height;
+ SetUpRateAllocator();
+ EXPECT_EQ(0, encoder_->InitEncode(&settings_, kSettings));
+
+ // Encode one frame and verify.
+ SetRates(kMaxBitrates[0] + kMaxBitrates[1], 30);
+ std::vector<VideoFrameType> frame_types(kNumberOfSimulcastStreams,
+ VideoFrameType::kVideoFrameDelta);
+ EXPECT_CALL(
+ encoder_callback_,
+ OnEncodedImage(AllOf(Field(&EncodedImage::_frameType,
+ VideoFrameType::kVideoFrameKey),
+ Field(&EncodedImage::_encodedWidth, width),
+ Field(&EncodedImage::_encodedHeight, height)),
+ _))
+ .Times(1)
+ .WillRepeatedly(Return(
+ EncodedImageCallback::Result(EncodedImageCallback::Result::OK, 0)));
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
+
+ // Switch back.
+ DefaultSettings(&settings_, temporal_layer_profile, codec_type_);
+ // Start at the lowest bitrate for enabling base stream.
+ settings_.startBitrate = kMinBitrates[0];
+ SetUpRateAllocator();
+ EXPECT_EQ(0, encoder_->InitEncode(&settings_, kSettings));
+ SetRates(settings_.startBitrate, 30);
+ ExpectStreams(VideoFrameType::kVideoFrameKey, 1);
+ // Resize `input_frame_` to the new resolution.
+ input_buffer_ = I420Buffer::Create(settings_.width, settings_.height);
+ input_buffer_->InitializeData();
+ input_frame_ = std::make_unique<webrtc::VideoFrame>(
+ webrtc::VideoFrame::Builder()
+ .set_video_frame_buffer(input_buffer_)
+ .set_rotation(webrtc::kVideoRotation_0)
+ .set_timestamp_us(0)
+ .build());
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
+}
+
+void SimulcastTestFixtureImpl::TestSwitchingToOneStream() {
+ SwitchingToOneStream(1024, 768);
+}
+
+void SimulcastTestFixtureImpl::TestSwitchingToOneOddStream() {
+ SwitchingToOneStream(1023, 769);
+}
+
+void SimulcastTestFixtureImpl::TestSwitchingToOneSmallStream() {
+ SwitchingToOneStream(4, 4);
+}
+
+// Test the layer pattern and sync flag for various spatial-temporal patterns.
+// 3-3-3 pattern: 3 temporal layers for all spatial streams, so same
+// temporal_layer id and layer_sync is expected for all streams.
+void SimulcastTestFixtureImpl::TestSpatioTemporalLayers333PatternEncoder() {
+ bool is_h264 = codec_type_ == kVideoCodecH264;
+ TestEncodedImageCallback encoder_callback;
+ encoder_->RegisterEncodeCompleteCallback(&encoder_callback);
+ SetRates(kMaxBitrates[2], 30); // To get all three streams.
+
+ int expected_temporal_idx[3] = {-1, -1, -1};
+ bool expected_layer_sync[3] = {false, false, false};
+
+ // First frame: #0.
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL));
+ SetExpectedValues3<int>(0, 0, 0, expected_temporal_idx);
+ SetExpectedValues3<bool>(!is_h264, !is_h264, !is_h264, expected_layer_sync);
+ VerifyTemporalIdxAndSyncForAllSpatialLayers(
+ &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
+
+ // Next frame: #1.
+ input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL));
+ SetExpectedValues3<int>(2, 2, 2, expected_temporal_idx);
+ SetExpectedValues3<bool>(true, true, true, expected_layer_sync);
+ VerifyTemporalIdxAndSyncForAllSpatialLayers(
+ &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
+
+ // Next frame: #2.
+ input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL));
+ SetExpectedValues3<int>(1, 1, 1, expected_temporal_idx);
+ SetExpectedValues3<bool>(true, true, true, expected_layer_sync);
+ VerifyTemporalIdxAndSyncForAllSpatialLayers(
+ &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
+
+ // Next frame: #3.
+ input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL));
+ SetExpectedValues3<int>(2, 2, 2, expected_temporal_idx);
+ SetExpectedValues3<bool>(false, false, false, expected_layer_sync);
+ VerifyTemporalIdxAndSyncForAllSpatialLayers(
+ &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
+
+ // Next frame: #4.
+ input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL));
+ SetExpectedValues3<int>(0, 0, 0, expected_temporal_idx);
+ SetExpectedValues3<bool>(false, false, false, expected_layer_sync);
+ VerifyTemporalIdxAndSyncForAllSpatialLayers(
+ &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
+
+ // Next frame: #5.
+ input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL));
+ SetExpectedValues3<int>(2, 2, 2, expected_temporal_idx);
+ SetExpectedValues3<bool>(is_h264, is_h264, is_h264, expected_layer_sync);
+ VerifyTemporalIdxAndSyncForAllSpatialLayers(
+ &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
+}
+
+// Test the layer pattern and sync flag for various spatial-temporal patterns.
+// 3-2-1 pattern: 3 temporal layers for lowest resolution, 2 for middle, and
+// 1 temporal layer for highest resolution.
+// For this profile, we expect the temporal index pattern to be:
+// 1st stream: 0, 2, 1, 2, ....
+// 2nd stream: 0, 1, 0, 1, ...
+// 3rd stream: -1, -1, -1, -1, ....
+// Regarding the 3rd stream, note that a stream/encoder with 1 temporal layer
+// should always have temporal layer idx set to kNoTemporalIdx = -1.
+// Since CodecSpecificInfoVP8.temporalIdx is uint8_t, this will wrap to 255.
+// TODO(marpan): Although this seems safe for now, we should fix this.
+void SimulcastTestFixtureImpl::TestSpatioTemporalLayers321PatternEncoder() {
+ EXPECT_EQ(codec_type_, kVideoCodecVP8);
+ int temporal_layer_profile[3] = {3, 2, 1};
+ SetUpCodec(temporal_layer_profile);
+ TestEncodedImageCallback encoder_callback;
+ encoder_->RegisterEncodeCompleteCallback(&encoder_callback);
+ SetRates(kMaxBitrates[2], 30); // To get all three streams.
+
+ int expected_temporal_idx[3] = {-1, -1, -1};
+ bool expected_layer_sync[3] = {false, false, false};
+
+ // First frame: #0.
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL));
+ SetExpectedValues3<int>(0, 0, 255, expected_temporal_idx);
+ SetExpectedValues3<bool>(true, true, false, expected_layer_sync);
+ VerifyTemporalIdxAndSyncForAllSpatialLayers(
+ &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
+
+ // Next frame: #1.
+ input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL));
+ SetExpectedValues3<int>(2, 1, 255, expected_temporal_idx);
+ SetExpectedValues3<bool>(true, true, false, expected_layer_sync);
+ VerifyTemporalIdxAndSyncForAllSpatialLayers(
+ &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
+
+ // Next frame: #2.
+ input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL));
+ SetExpectedValues3<int>(1, 0, 255, expected_temporal_idx);
+ SetExpectedValues3<bool>(true, false, false, expected_layer_sync);
+ VerifyTemporalIdxAndSyncForAllSpatialLayers(
+ &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
+
+ // Next frame: #3.
+ input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL));
+ SetExpectedValues3<int>(2, 1, 255, expected_temporal_idx);
+ SetExpectedValues3<bool>(false, false, false, expected_layer_sync);
+ VerifyTemporalIdxAndSyncForAllSpatialLayers(
+ &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
+
+ // Next frame: #4.
+ input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL));
+ SetExpectedValues3<int>(0, 0, 255, expected_temporal_idx);
+ SetExpectedValues3<bool>(false, false, false, expected_layer_sync);
+ VerifyTemporalIdxAndSyncForAllSpatialLayers(
+ &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
+
+ // Next frame: #5.
+ input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL));
+ SetExpectedValues3<int>(2, 1, 255, expected_temporal_idx);
+ SetExpectedValues3<bool>(false, true, false, expected_layer_sync);
+ VerifyTemporalIdxAndSyncForAllSpatialLayers(
+ &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
+}
+
+void SimulcastTestFixtureImpl::TestStrideEncodeDecode() {
+ TestEncodedImageCallback encoder_callback;
+ TestDecodedImageCallback decoder_callback;
+ encoder_->RegisterEncodeCompleteCallback(&encoder_callback);
+ decoder_->RegisterDecodeCompleteCallback(&decoder_callback);
+
+ SetRates(kMaxBitrates[2], 30); // To get all three streams.
+ // Setting two (possibly) problematic use cases for stride:
+ // 1. stride > width 2. stride_y != stride_uv/2
+ int stride_y = kDefaultWidth + 20;
+ int stride_uv = ((kDefaultWidth + 1) / 2) + 5;
+ input_buffer_ = I420Buffer::Create(kDefaultWidth, kDefaultHeight, stride_y,
+ stride_uv, stride_uv);
+ input_frame_ = std::make_unique<webrtc::VideoFrame>(
+ webrtc::VideoFrame::Builder()
+ .set_video_frame_buffer(input_buffer_)
+ .set_rotation(webrtc::kVideoRotation_0)
+ .set_timestamp_us(0)
+ .build());
+
+ // Set color.
+ int plane_offset[kNumOfPlanes];
+ plane_offset[kYPlane] = kColorY;
+ plane_offset[kUPlane] = kColorU;
+ plane_offset[kVPlane] = kColorV;
+ CreateImage(input_buffer_, plane_offset);
+
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL));
+
+ // Change color.
+ plane_offset[kYPlane] += 1;
+ plane_offset[kUPlane] += 1;
+ plane_offset[kVPlane] += 1;
+ CreateImage(input_buffer_, plane_offset);
+ input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL));
+
+ EncodedImage encoded_frame;
+ // Only encoding one frame - so will be a key frame.
+ encoder_callback.GetLastEncodedKeyFrame(&encoded_frame);
+ EXPECT_EQ(0, decoder_->Decode(encoded_frame, false, 0));
+ encoder_callback.GetLastEncodedFrame(&encoded_frame);
+ decoder_->Decode(encoded_frame, false, 0);
+ EXPECT_EQ(2, decoder_callback.DecodedFrames());
+}
+
+void SimulcastTestFixtureImpl::TestDecodeWidthHeightSet() {
+ MockEncodedImageCallback encoder_callback;
+ MockDecodedImageCallback decoder_callback;
+
+ EncodedImage encoded_frame[3];
+ SetRates(kMaxBitrates[2], 30); // To get all three streams.
+ encoder_->RegisterEncodeCompleteCallback(&encoder_callback);
+ decoder_->RegisterDecodeCompleteCallback(&decoder_callback);
+
+ EXPECT_CALL(encoder_callback, OnEncodedImage(_, _))
+ .Times(3)
+ .WillRepeatedly(
+ ::testing::Invoke([&](const EncodedImage& encoded_image,
+ const CodecSpecificInfo* codec_specific_info) {
+ EXPECT_EQ(encoded_image._frameType, VideoFrameType::kVideoFrameKey);
+
+ size_t index = encoded_image.SpatialIndex().value_or(0);
+ encoded_frame[index].SetEncodedData(EncodedImageBuffer::Create(
+ encoded_image.data(), encoded_image.size()));
+ encoded_frame[index]._frameType = encoded_image._frameType;
+ return EncodedImageCallback::Result(
+ EncodedImageCallback::Result::OK, 0);
+ }));
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL));
+
+ EXPECT_CALL(decoder_callback, Decoded(_, _, _))
+ .WillOnce(::testing::Invoke([](VideoFrame& decodedImage,
+ absl::optional<int32_t> decode_time_ms,
+ absl::optional<uint8_t> qp) {
+ EXPECT_EQ(decodedImage.width(), kDefaultWidth / 4);
+ EXPECT_EQ(decodedImage.height(), kDefaultHeight / 4);
+ }));
+ EXPECT_EQ(0, decoder_->Decode(encoded_frame[0], false, 0));
+
+ EXPECT_CALL(decoder_callback, Decoded(_, _, _))
+ .WillOnce(::testing::Invoke([](VideoFrame& decodedImage,
+ absl::optional<int32_t> decode_time_ms,
+ absl::optional<uint8_t> qp) {
+ EXPECT_EQ(decodedImage.width(), kDefaultWidth / 2);
+ EXPECT_EQ(decodedImage.height(), kDefaultHeight / 2);
+ }));
+ EXPECT_EQ(0, decoder_->Decode(encoded_frame[1], false, 0));
+
+ EXPECT_CALL(decoder_callback, Decoded(_, _, _))
+ .WillOnce(::testing::Invoke([](VideoFrame& decodedImage,
+ absl::optional<int32_t> decode_time_ms,
+ absl::optional<uint8_t> qp) {
+ EXPECT_EQ(decodedImage.width(), kDefaultWidth);
+ EXPECT_EQ(decodedImage.height(), kDefaultHeight);
+ }));
+ EXPECT_EQ(0, decoder_->Decode(encoded_frame[2], false, 0));
+}
+
+void SimulcastTestFixtureImpl::
+ TestEncoderInfoForDefaultTemporalLayerProfileHasFpsAllocation() {
+ VideoEncoder::EncoderInfo encoder_info = encoder_->GetEncoderInfo();
+ EXPECT_EQ(encoder_info.fps_allocation[0].size(),
+ static_cast<size_t>(kDefaultTemporalLayerProfile[0]));
+ EXPECT_EQ(encoder_info.fps_allocation[1].size(),
+ static_cast<size_t>(kDefaultTemporalLayerProfile[1]));
+ EXPECT_EQ(encoder_info.fps_allocation[2].size(),
+ static_cast<size_t>(kDefaultTemporalLayerProfile[2]));
+}
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/utility/simulcast_test_fixture_impl.h b/third_party/libwebrtc/modules/video_coding/utility/simulcast_test_fixture_impl.h
new file mode 100644
index 0000000000..f142ab4813
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/utility/simulcast_test_fixture_impl.h
@@ -0,0 +1,95 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_UTILITY_SIMULCAST_TEST_FIXTURE_IMPL_H_
+#define MODULES_VIDEO_CODING_UTILITY_SIMULCAST_TEST_FIXTURE_IMPL_H_
+
+#include <memory>
+#include <vector>
+
+#include "api/test/mock_video_decoder.h"
+#include "api/test/mock_video_encoder.h"
+#include "api/test/simulcast_test_fixture.h"
+#include "api/video/i420_buffer.h"
+#include "api/video/video_frame.h"
+#include "api/video_codecs/video_decoder_factory.h"
+#include "api/video_codecs/video_encoder_factory.h"
+#include "modules/video_coding/utility/simulcast_rate_allocator.h"
+
+namespace webrtc {
+namespace test {
+
+class SimulcastTestFixtureImpl final : public SimulcastTestFixture {
+ public:
+ SimulcastTestFixtureImpl(std::unique_ptr<VideoEncoderFactory> encoder_factory,
+ std::unique_ptr<VideoDecoderFactory> decoder_factory,
+ SdpVideoFormat video_format);
+ ~SimulcastTestFixtureImpl() final;
+
+ // Implements SimulcastTestFixture.
+ void TestKeyFrameRequestsOnAllStreams() override;
+ void TestKeyFrameRequestsOnSpecificStreams() override;
+ void TestPaddingAllStreams() override;
+ void TestPaddingTwoStreams() override;
+ void TestPaddingTwoStreamsOneMaxedOut() override;
+ void TestPaddingOneStream() override;
+ void TestPaddingOneStreamTwoMaxedOut() override;
+ void TestSendAllStreams() override;
+ void TestDisablingStreams() override;
+ void TestActiveStreams() override;
+ void TestSwitchingToOneStream() override;
+ void TestSwitchingToOneOddStream() override;
+ void TestSwitchingToOneSmallStream() override;
+ void TestSpatioTemporalLayers333PatternEncoder() override;
+ void TestSpatioTemporalLayers321PatternEncoder() override;
+ void TestStrideEncodeDecode() override;
+ void TestDecodeWidthHeightSet() override;
+ void TestEncoderInfoForDefaultTemporalLayerProfileHasFpsAllocation() override;
+
+ static void DefaultSettings(VideoCodec* settings,
+ const int* temporal_layer_profile,
+ VideoCodecType codec_type,
+ bool reverse_layer_order = false);
+
+ private:
+ class TestEncodedImageCallback;
+ class TestDecodedImageCallback;
+
+ void SetUpCodec(const int* temporal_layer_profile);
+ void SetUpRateAllocator();
+ void SetRates(uint32_t bitrate_kbps, uint32_t fps);
+ void RunActiveStreamsTest(std::vector<bool> active_streams);
+ void UpdateActiveStreams(std::vector<bool> active_streams);
+ void ExpectStream(VideoFrameType frame_type, int scaleResolutionDownBy);
+ void ExpectStreams(VideoFrameType frame_type,
+ std::vector<bool> expected_streams_active);
+ void ExpectStreams(VideoFrameType frame_type, int expected_video_streams);
+ void VerifyTemporalIdxAndSyncForAllSpatialLayers(
+ TestEncodedImageCallback* encoder_callback,
+ const int* expected_temporal_idx,
+ const bool* expected_layer_sync,
+ int num_spatial_layers);
+ void SwitchingToOneStream(int width, int height);
+
+ std::unique_ptr<VideoEncoder> encoder_;
+ MockEncodedImageCallback encoder_callback_;
+ std::unique_ptr<VideoDecoder> decoder_;
+ MockDecodedImageCallback decoder_callback_;
+ VideoCodec settings_;
+ rtc::scoped_refptr<I420Buffer> input_buffer_;
+ std::unique_ptr<VideoFrame> input_frame_;
+ std::unique_ptr<SimulcastRateAllocator> rate_allocator_;
+ VideoCodecType codec_type_;
+};
+
+} // namespace test
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_UTILITY_SIMULCAST_TEST_FIXTURE_IMPL_H_
diff --git a/third_party/libwebrtc/modules/video_coding/utility/simulcast_utility.cc b/third_party/libwebrtc/modules/video_coding/utility/simulcast_utility.cc
new file mode 100644
index 0000000000..65061ed32a
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/utility/simulcast_utility.cc
@@ -0,0 +1,93 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/utility/simulcast_utility.h"
+
+#include <algorithm>
+#include <cmath>
+
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+uint32_t SimulcastUtility::SumStreamMaxBitrate(int streams,
+ const VideoCodec& codec) {
+ uint32_t bitrate_sum = 0;
+ for (int i = 0; i < streams; ++i) {
+ bitrate_sum += codec.simulcastStream[i].maxBitrate;
+ }
+ return bitrate_sum;
+}
+
+int SimulcastUtility::NumberOfSimulcastStreams(const VideoCodec& codec) {
+ int streams =
+ codec.numberOfSimulcastStreams < 1 ? 1 : codec.numberOfSimulcastStreams;
+ uint32_t simulcast_max_bitrate = SumStreamMaxBitrate(streams, codec);
+ if (simulcast_max_bitrate == 0) {
+ streams = 1;
+ }
+ return streams;
+}
+
+bool SimulcastUtility::ValidSimulcastParameters(const VideoCodec& codec,
+ int num_streams) {
+ // Check resolution.
+ if (codec.width != codec.simulcastStream[num_streams - 1].width ||
+ codec.height != codec.simulcastStream[num_streams - 1].height) {
+ return false;
+ }
+ for (int i = 0; i < num_streams; ++i) {
+ if (codec.width * codec.simulcastStream[i].height !=
+ codec.height * codec.simulcastStream[i].width) {
+ return false;
+ }
+ }
+ for (int i = 1; i < num_streams; ++i) {
+ if (codec.simulcastStream[i].width < codec.simulcastStream[i - 1].width) {
+ return false;
+ }
+ }
+
+ // Check frame-rate.
+ for (int i = 1; i < num_streams; ++i) {
+ if (fabs(codec.simulcastStream[i].maxFramerate -
+ codec.simulcastStream[i - 1].maxFramerate) > 1e-9) {
+ return false;
+ }
+ }
+
+ // Check temporal layers.
+ for (int i = 0; i < num_streams - 1; ++i) {
+ if (codec.simulcastStream[i].numberOfTemporalLayers !=
+ codec.simulcastStream[i + 1].numberOfTemporalLayers)
+ return false;
+ }
+ return true;
+}
+
+bool SimulcastUtility::IsConferenceModeScreenshare(const VideoCodec& codec) {
+ return codec.mode == VideoCodecMode::kScreensharing &&
+ codec.legacy_conference_mode;
+}
+
+int SimulcastUtility::NumberOfTemporalLayers(const VideoCodec& codec,
+ int spatial_id) {
+ uint8_t num_temporal_layers =
+ std::max<uint8_t>(1, codec.VP8().numberOfTemporalLayers);
+ if (codec.numberOfSimulcastStreams > 0) {
+ RTC_DCHECK_LT(spatial_id, codec.numberOfSimulcastStreams);
+ num_temporal_layers =
+ std::max(num_temporal_layers,
+ codec.simulcastStream[spatial_id].numberOfTemporalLayers);
+ }
+ return num_temporal_layers;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/utility/simulcast_utility.h b/third_party/libwebrtc/modules/video_coding/utility/simulcast_utility.h
new file mode 100644
index 0000000000..e25a594360
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/utility/simulcast_utility.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_UTILITY_SIMULCAST_UTILITY_H_
+#define MODULES_VIDEO_CODING_UTILITY_SIMULCAST_UTILITY_H_
+
+#include <stdint.h>
+
+#include "api/video_codecs/video_codec.h"
+
+namespace webrtc {
+
+class SimulcastUtility {
+ public:
+ static uint32_t SumStreamMaxBitrate(int streams, const VideoCodec& codec);
+ static int NumberOfSimulcastStreams(const VideoCodec& codec);
+ static bool ValidSimulcastParameters(const VideoCodec& codec,
+ int num_streams);
+ static int NumberOfTemporalLayers(const VideoCodec& codec, int spatial_id);
+ // TODO(sprang): Remove this hack when ScreenshareLayers is gone.
+ static bool IsConferenceModeScreenshare(const VideoCodec& codec);
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_UTILITY_SIMULCAST_UTILITY_H_
diff --git a/third_party/libwebrtc/modules/video_coding/utility/vp8_constants.h b/third_party/libwebrtc/modules/video_coding/utility/vp8_constants.h
new file mode 100644
index 0000000000..9321864dbc
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/utility/vp8_constants.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_UTILITY_VP8_CONSTANTS_H_
+#define MODULES_VIDEO_CODING_UTILITY_VP8_CONSTANTS_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <string>
+
+namespace webrtc {
+
+// QP level below which VP8 variable framerate and zero hertz screencast reduces
+// framerate due to diminishing quality enhancement returns.
+constexpr int kVp8SteadyStateQpThreshold = 15;
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_UTILITY_VP8_CONSTANTS_H_
diff --git a/third_party/libwebrtc/modules/video_coding/utility/vp8_header_parser.cc b/third_party/libwebrtc/modules/video_coding/utility/vp8_header_parser.cc
new file mode 100644
index 0000000000..80026f9a0f
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/utility/vp8_header_parser.cc
@@ -0,0 +1,200 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include "modules/video_coding/utility/vp8_header_parser.h"
+
+#include "rtc_base/logging.h"
+#include "rtc_base/system/arch.h"
+
+namespace webrtc {
+
+namespace vp8 {
+namespace {
+const size_t kCommonPayloadHeaderLength = 3;
+const size_t kKeyPayloadHeaderLength = 10;
+const int kMbFeatureTreeProbs = 3;
+const int kNumMbSegments = 4;
+const int kNumRefLfDeltas = 4;
+const int kNumModeLfDeltas = 4;
+
+} // namespace
+
+// Bitstream parser according to
+// https://tools.ietf.org/html/rfc6386#section-7.3
+void VP8InitBitReader(VP8BitReader* const br,
+ const uint8_t* start,
+ const uint8_t* end) {
+ br->range_ = 255;
+ br->buf_ = start;
+ br->buf_end_ = end;
+ br->value_ = 0;
+ br->bits_ = 0;
+
+ // Read 2 bytes.
+ int i = 0;
+ while (++i <= 2) {
+ if (br->buf_ != br->buf_end_) {
+ br->value_ = br->value_ << 8 | *br->buf_++;
+ } else {
+ br->value_ = br->value_ << 8;
+ }
+ }
+}
+
+// Bit decoder according to https://tools.ietf.org/html/rfc6386#section-7.3
+// Reads one bit from the bitstream, given that it has probability prob/256 to
+// be 1.
+int Vp8BitReaderGetBool(VP8BitReader* br, int prob) {
+ uint32_t split = 1 + (((br->range_ - 1) * prob) >> 8);
+ uint32_t split_hi = split << 8;
+ int retval = 0;
+ if (br->value_ >= split_hi) {
+ retval = 1;
+ br->range_ -= split;
+ br->value_ -= split_hi;
+ } else {
+ retval = 0;
+ br->range_ = split;
+ }
+
+ while (br->range_ < 128) {
+ br->value_ <<= 1;
+ br->range_ <<= 1;
+ if (++br->bits_ == 8) {
+ br->bits_ = 0;
+ if (br->buf_ != br->buf_end_) {
+ br->value_ |= *br->buf_++;
+ }
+ }
+ }
+ return retval;
+}
+
+uint32_t VP8GetValue(VP8BitReader* br, int num_bits) {
+ uint32_t v = 0;
+ while (num_bits--) {
+ // According to https://tools.ietf.org/html/rfc6386
+ // Probability 128/256 is used to encode header fields.
+ v = (v << 1) | Vp8BitReaderGetBool(br, 128);
+ }
+ return v;
+}
+
+// Not a read_signed_literal() from RFC 6386!
+// This one is used to read e.g. quantizer_update, which is written as:
+// L(num_bits), sign-bit.
+int32_t VP8GetSignedValue(VP8BitReader* br, int num_bits) {
+ int v = VP8GetValue(br, num_bits);
+ int sign = VP8GetValue(br, 1);
+ return sign ? -v : v;
+}
+
+static void ParseSegmentHeader(VP8BitReader* br) {
+ int use_segment = VP8GetValue(br, 1);
+ if (use_segment) {
+ int update_map = VP8GetValue(br, 1);
+ if (VP8GetValue(br, 1)) { // update_segment_feature_data.
+ VP8GetValue(br, 1); // segment_feature_mode.
+ int s;
+ for (s = 0; s < kNumMbSegments; ++s) {
+ bool quantizer_update = VP8GetValue(br, 1);
+ if (quantizer_update) {
+ VP8GetSignedValue(br, 7);
+ }
+ }
+ for (s = 0; s < kNumMbSegments; ++s) {
+ bool loop_filter_update = VP8GetValue(br, 1);
+ if (loop_filter_update) {
+ VP8GetSignedValue(br, 6);
+ }
+ }
+ }
+ if (update_map) {
+ int s;
+ for (s = 0; s < kMbFeatureTreeProbs; ++s) {
+ bool segment_prob_update = VP8GetValue(br, 1);
+ if (segment_prob_update) {
+ VP8GetValue(br, 8);
+ }
+ }
+ }
+ }
+}
+
+static void ParseFilterHeader(VP8BitReader* br) {
+ VP8GetValue(br, 1); // filter_type.
+ VP8GetValue(br, 6); // loop_filter_level.
+ VP8GetValue(br, 3); // sharpness_level.
+
+ // mb_lf_adjustments.
+ int loop_filter_adj_enable = VP8GetValue(br, 1);
+ if (loop_filter_adj_enable) {
+ int mode_ref_lf_delta_update = VP8GetValue(br, 1);
+ if (mode_ref_lf_delta_update) {
+ int i;
+ for (i = 0; i < kNumRefLfDeltas; ++i) {
+ int ref_frame_delta_update_flag = VP8GetValue(br, 1);
+ if (ref_frame_delta_update_flag) {
+ VP8GetSignedValue(br, 6); // delta_magnitude.
+ }
+ }
+ for (i = 0; i < kNumModeLfDeltas; ++i) {
+ int mb_mode_delta_update_flag = VP8GetValue(br, 1);
+ if (mb_mode_delta_update_flag) {
+ VP8GetSignedValue(br, 6); // delta_magnitude.
+ }
+ }
+ }
+ }
+}
+
+bool GetQp(const uint8_t* buf, size_t length, int* qp) {
+ if (length < kCommonPayloadHeaderLength) {
+ RTC_LOG(LS_WARNING) << "Failed to get QP, invalid length.";
+ return false;
+ }
+ VP8BitReader br;
+ const uint32_t bits = buf[0] | (buf[1] << 8) | (buf[2] << 16);
+ int key_frame = !(bits & 1);
+ // Size of first partition in bytes.
+ uint32_t partition_length = (bits >> 5);
+ size_t header_length = kCommonPayloadHeaderLength;
+ if (key_frame) {
+ header_length = kKeyPayloadHeaderLength;
+ }
+ if (header_length + partition_length > length) {
+ RTC_LOG(LS_WARNING) << "Failed to get QP, invalid length: " << length;
+ return false;
+ }
+ buf += header_length;
+
+ VP8InitBitReader(&br, buf, buf + partition_length);
+ if (key_frame) {
+ // Color space and pixel type.
+ VP8GetValue(&br, 1);
+ VP8GetValue(&br, 1);
+ }
+ ParseSegmentHeader(&br);
+ ParseFilterHeader(&br);
+ // Parse log2_nbr_of_dct_partitions value.
+ VP8GetValue(&br, 2);
+ // Base QP.
+ const int base_q0 = VP8GetValue(&br, 7);
+ if (br.buf_ == br.buf_end_) {
+ RTC_LOG(LS_WARNING) << "Failed to get QP, bitstream is truncated or"
+ " corrupted.";
+ return false;
+ }
+ *qp = base_q0;
+ return true;
+}
+
+} // namespace vp8
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/utility/vp8_header_parser.h b/third_party/libwebrtc/modules/video_coding/utility/vp8_header_parser.h
new file mode 100644
index 0000000000..dbad999dc8
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/utility/vp8_header_parser.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_UTILITY_VP8_HEADER_PARSER_H_
+#define MODULES_VIDEO_CODING_UTILITY_VP8_HEADER_PARSER_H_
+
+#include <stdint.h>
+#include <stdio.h>
+
+namespace webrtc {
+
+namespace vp8 {
+
+typedef struct VP8BitReader VP8BitReader;
+struct VP8BitReader {
+ // Boolean decoder.
+ uint32_t value_; // Current value (2 bytes).
+ uint32_t range_; // Current range (always in [128..255] interval).
+ int bits_; // Number of bits shifted out of value, at most 7.
+ // Read buffer.
+ const uint8_t* buf_; // Next byte to be read.
+ const uint8_t* buf_end_; // End of read buffer.
+};
+
+// Gets the QP, QP range: [0, 127].
+// Returns true on success, false otherwise.
+bool GetQp(const uint8_t* buf, size_t length, int* qp);
+
+} // namespace vp8
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_UTILITY_VP8_HEADER_PARSER_H_
diff --git a/third_party/libwebrtc/modules/video_coding/utility/vp9_constants.h b/third_party/libwebrtc/modules/video_coding/utility/vp9_constants.h
new file mode 100644
index 0000000000..af2c701b82
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/utility/vp9_constants.h
@@ -0,0 +1,198 @@
+/*
+ * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_UTILITY_VP9_CONSTANTS_H_
+#define MODULES_VIDEO_CODING_UTILITY_VP9_CONSTANTS_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <string>
+
+namespace webrtc {
+
+// Number of frames that can be stored for future reference.
+constexpr size_t kVp9NumRefFrames = 8;
+// Number of frame contexts that can be store for future reference.
+constexpr size_t kVp9NumFrameContexts = 4;
+// Each inter frame can use up to 3 frames for reference.
+constexpr size_t kVp9RefsPerFrame = 3;
+// Number of values that can be decoded for mv_fr.
+constexpr size_t kVp9MvFrSize = 4;
+// Number of positions to search in motion vector prediction.
+constexpr size_t kVp9MvrefNeighbours = 8;
+// Number of contexts when decoding intra_mode .
+constexpr size_t kVp9BlockSizeGroups = 4;
+// Number of different block sizes used.
+constexpr size_t kVp9BlockSizes = 13;
+// Sentinel value to mark partition choices that are illegal.
+constexpr size_t kVp9BlockInvalid = 14;
+// Number of contexts when decoding partition.
+constexpr size_t kVp9PartitionContexts = 16;
+// Smallest size of a mode info block.
+constexpr size_t kVp9MiSize = 8;
+// Minimum width of a tile in units of superblocks (although tiles on
+// the right hand edge can be narrower).
+constexpr size_t kVp9MinTileWidth_B64 = 4;
+// Maximum width of a tile in units of superblocks.
+constexpr size_t kVp9MaxTileWidth_B64 = 64;
+// Number of motion vectors returned by find_mv_refs process.
+constexpr size_t kVp9MaxMvRefCandidates = 2;
+// Number of values that can be derived for ref_frame.
+constexpr size_t kVp9MaxRefFrames = 4;
+// Number of contexts for is_inter.
+constexpr size_t kVp9IsInterContexts = 4;
+// Number of contexts for comp_mode.
+constexpr size_t kVp9CompModeContexts = 5;
+// Number of contexts for single_ref and comp_ref.
+constexpr size_t kVp9RefContexts = 5;
+// Number of segments allowed in segmentation map.
+constexpr size_t kVp9MaxSegments = 8;
+// Index for quantizer segment feature.
+constexpr size_t kVp9SegLvlAlt_Q = 0;
+// Index for loop filter segment feature.
+constexpr size_t kVp9SegLvlAlt_L = 1;
+// Index for reference frame segment feature.
+constexpr size_t kVp9SegLvlRefFrame = 2;
+// Index for skip segment feature.
+constexpr size_t kVp9SegLvlSkip = 3;
+// Number of segment features.
+constexpr size_t kVp9SegLvlMax = 4;
+// Number of different plane types (Y or UV).
+constexpr size_t kVp9BlockTypes = 2;
+// Number of different prediction types (intra or inter).
+constexpr size_t kVp9RefTypes = 2;
+// Number of coefficient bands.
+constexpr size_t kVp9CoefBands = 6;
+// Number of contexts for decoding coefficients.
+constexpr size_t kVp9PrevCoefContexts = 6;
+// Number of coefficient probabilities that are directly transmitted.
+constexpr size_t kVp9UnconstrainedNodes = 3;
+// Number of contexts for transform size.
+constexpr size_t kVp9TxSizeContexts = 2;
+// Number of values for interp_filter.
+constexpr size_t kVp9SwitchableFilters = 3;
+// Number of contexts for interp_filter.
+constexpr size_t kVp9InterpFilterContexts = 4;
+// Number of contexts for decoding skip.
+constexpr size_t kVp9SkipContexts = 3;
+// Number of values for partition.
+constexpr size_t kVp9PartitionTypes = 4;
+// Number of values for tx_size.
+constexpr size_t kVp9TxSizes = 4;
+// Number of values for tx_mode.
+constexpr size_t kVp9TxModes = 5;
+// Inverse transform rows with DCT and columns with DCT.
+constexpr size_t kVp9DctDct = 0;
+// Inverse transform rows with DCT and columns with ADST.
+constexpr size_t kVp9AdstDct = 1;
+// Inverse transform rows with ADST and columns with DCT.
+constexpr size_t kVp9DctAdst = 2;
+// Inverse transform rows with ADST and columns with ADST.
+constexpr size_t kVp9AdstAdst = 3;
+// Number of values for y_mode.
+constexpr size_t kVp9MbModeCount = 14;
+// Number of values for intra_mode.
+constexpr size_t kVp9IntraModes = 10;
+// Number of values for inter_mode.
+constexpr size_t kVp9InterModes = 4;
+// Number of contexts for inter_mode.
+constexpr size_t kVp9InterModeContexts = 7;
+// Number of values for mv_joint.
+constexpr size_t kVp9MvJoints = 4;
+// Number of values for mv_class.
+constexpr size_t kVp9MvClasses = 11;
+// Number of values for mv_class0_bit.
+constexpr size_t kVp9Class0Size = 2;
+// Maximum number of bits for decoding motion vectors.
+constexpr size_t kVp9MvOffsetBits = 10;
+// Number of values allowed for a probability adjustment.
+constexpr size_t kVp9MaxProb = 255;
+// Number of different mode types for loop filtering.
+constexpr size_t kVp9MaxModeLfDeltas = 2;
+// Threshold at which motion vectors are considered large.
+constexpr size_t kVp9CompandedMvrefThresh = 8;
+// Maximum value used for loop filtering.
+constexpr size_t kVp9MaxLoopFilter = 63;
+// Number of bits of precision when scaling reference frames.
+constexpr size_t kVp9RefScaleShift = 14;
+// Number of bits of precision when performing inter prediction.
+constexpr size_t kVp9SubpelBits = 4;
+// 1 << kVp9SubpelBits.
+constexpr size_t kVp9SubpelShifts = 16;
+// kVp9SubpelShifts - 1.
+constexpr size_t kVp9SubpelMask = 15;
+// Value used when clipping motion vectors.
+constexpr size_t kVp9MvBorder = 128;
+// Value used when clipping motion vectors.
+constexpr size_t kVp9InterpExtend = 4;
+// Value used when clipping motion vectors.
+constexpr size_t kVp9Borderinpixels = 160;
+// Value used in adapting probabilities.
+constexpr size_t kVp9MaxUpdateFactor = 128;
+// Value used in adapting probabilities.
+constexpr size_t kVp9CountSat = 20;
+// Both candidates use ZEROMV.
+constexpr size_t kVp9BothZero = 0;
+// One candidate uses ZEROMV, one uses NEARMV or NEARESTMV.
+constexpr size_t kVp9ZeroPlusPredicted = 1;
+// Both candidates use NEARMV or NEARESTMV.
+constexpr size_t kVp9BothPredicted = 2;
+// One candidate uses NEWMV, one uses ZEROMV.
+constexpr size_t kVp9NewPlusNonIntra = 3;
+// Both candidates use NEWMV.
+constexpr size_t kVp9BothNew = 4;
+// One candidate uses intra prediction, one uses inter prediction.
+constexpr size_t kVp9IntraPlusNonIntra = 5;
+// Both candidates use intra prediction.
+constexpr size_t kVp9BothIntra = 6;
+// Sentinel value marking a case that can never occur.
+constexpr size_t kVp9InvalidCase = 9;
+
+enum class Vp9TxMode : uint8_t {
+ kOnly4X4 = 0,
+ kAllow8X8 = 1,
+ kAllow16x16 = 2,
+ kAllow32x32 = 3,
+ kTxModeSelect = 4
+};
+
+enum Vp9BlockSize : uint8_t {
+ kBlock4X4 = 0,
+ kBlock4X8 = 1,
+ kBlock8X4 = 2,
+ kBlock8X8 = 3,
+ kBlock8X16 = 4,
+ kBlock16X8 = 5,
+ kBlock16X16 = 6,
+ kBlock16X32 = 7,
+ kBlock32X16 = 8,
+ kBlock32X32 = 9,
+ kBlock32X64 = 10,
+ kBlock64X32 = 11,
+ kBlock64X64 = 12
+};
+
+enum Vp9Partition : uint8_t {
+ kPartitionNone = 0,
+ kPartitionHorizontal = 1,
+ kPartitionVertical = 2,
+ kPartitionSplit = 3
+};
+
+enum class Vp9ReferenceMode : uint8_t {
+ kSingleReference = 0,
+ kCompoundReference = 1,
+ kReferenceModeSelect = 2,
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_UTILITY_VP9_CONSTANTS_H_
diff --git a/third_party/libwebrtc/modules/video_coding/utility/vp9_uncompressed_header_parser.cc b/third_party/libwebrtc/modules/video_coding/utility/vp9_uncompressed_header_parser.cc
new file mode 100644
index 0000000000..bf9d51f692
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/utility/vp9_uncompressed_header_parser.cc
@@ -0,0 +1,533 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include "modules/video_coding/utility/vp9_uncompressed_header_parser.h"
+
+#include "absl/numeric/bits.h"
+#include "absl/strings/string_view.h"
+#include "rtc_base/bitstream_reader.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/strings/string_builder.h"
+
+namespace webrtc {
+namespace {
+const size_t kVp9NumRefsPerFrame = 3;
+const size_t kVp9MaxRefLFDeltas = 4;
+const size_t kVp9MaxModeLFDeltas = 2;
+const size_t kVp9MinTileWidthB64 = 4;
+const size_t kVp9MaxTileWidthB64 = 64;
+
+void Vp9ReadColorConfig(BitstreamReader& br,
+ Vp9UncompressedHeader* frame_info) {
+ if (frame_info->profile == 2 || frame_info->profile == 3) {
+ frame_info->bit_detph =
+ br.Read<bool>() ? Vp9BitDept::k12Bit : Vp9BitDept::k10Bit;
+ } else {
+ frame_info->bit_detph = Vp9BitDept::k8Bit;
+ }
+
+ frame_info->color_space = static_cast<Vp9ColorSpace>(br.ReadBits(3));
+
+ if (frame_info->color_space != Vp9ColorSpace::CS_RGB) {
+ frame_info->color_range =
+ br.Read<bool>() ? Vp9ColorRange::kFull : Vp9ColorRange::kStudio;
+
+ if (frame_info->profile == 1 || frame_info->profile == 3) {
+ static constexpr Vp9YuvSubsampling kSubSamplings[] = {
+ Vp9YuvSubsampling::k444, Vp9YuvSubsampling::k440,
+ Vp9YuvSubsampling::k422, Vp9YuvSubsampling::k420};
+ frame_info->sub_sampling = kSubSamplings[br.ReadBits(2)];
+
+ if (br.Read<bool>()) {
+ RTC_LOG(LS_WARNING) << "Failed to parse header. Reserved bit set.";
+ br.Invalidate();
+ return;
+ }
+ } else {
+ // Profile 0 or 2.
+ frame_info->sub_sampling = Vp9YuvSubsampling::k420;
+ }
+ } else {
+ // SRGB
+ frame_info->color_range = Vp9ColorRange::kFull;
+ if (frame_info->profile == 1 || frame_info->profile == 3) {
+ frame_info->sub_sampling = Vp9YuvSubsampling::k444;
+ if (br.Read<bool>()) {
+ RTC_LOG(LS_WARNING) << "Failed to parse header. Reserved bit set.";
+ br.Invalidate();
+ }
+ } else {
+ RTC_LOG(LS_WARNING) << "Failed to parse header. 4:4:4 color not supported"
+ " in profile 0 or 2.";
+ br.Invalidate();
+ }
+ }
+}
+
+void ReadRefreshFrameFlags(BitstreamReader& br,
+ Vp9UncompressedHeader* frame_info) {
+ // Refresh frame flags.
+ uint8_t flags = br.Read<uint8_t>();
+ for (int i = 0; i < 8; ++i) {
+ frame_info->updated_buffers.set(i, (flags & (0x01 << (7 - i))) != 0);
+ }
+}
+
+void Vp9ReadFrameSize(BitstreamReader& br, Vp9UncompressedHeader* frame_info) {
+ // 16 bits: frame (width|height) - 1.
+ frame_info->frame_width = br.Read<uint16_t>() + 1;
+ frame_info->frame_height = br.Read<uint16_t>() + 1;
+}
+
+void Vp9ReadRenderSize(size_t total_buffer_size_bits,
+ BitstreamReader& br,
+ Vp9UncompressedHeader* frame_info) {
+ // render_and_frame_size_different
+ if (br.Read<bool>()) {
+ frame_info->render_size_offset_bits =
+ total_buffer_size_bits - br.RemainingBitCount();
+ // 16 bits: render (width|height) - 1.
+ frame_info->render_width = br.Read<uint16_t>() + 1;
+ frame_info->render_height = br.Read<uint16_t>() + 1;
+ } else {
+ frame_info->render_height = frame_info->frame_height;
+ frame_info->render_width = frame_info->frame_width;
+ }
+}
+
+void Vp9ReadFrameSizeFromRefs(BitstreamReader& br,
+ Vp9UncompressedHeader* frame_info) {
+ for (size_t i = 0; i < kVp9NumRefsPerFrame; i++) {
+ // Size in refs.
+ if (br.Read<bool>()) {
+ frame_info->infer_size_from_reference = frame_info->reference_buffers[i];
+ return;
+ }
+ }
+
+ Vp9ReadFrameSize(br, frame_info);
+}
+
+void Vp9ReadLoopfilter(BitstreamReader& br) {
+ // 6 bits: filter level.
+ // 3 bits: sharpness level.
+ br.ConsumeBits(9);
+
+ if (!br.Read<bool>()) { // mode_ref_delta_enabled
+ return;
+ }
+ if (!br.Read<bool>()) { // mode_ref_delta_update
+ return;
+ }
+
+ for (size_t i = 0; i < kVp9MaxRefLFDeltas; i++) {
+ if (br.Read<bool>()) { // update_ref_delta
+ br.ConsumeBits(7);
+ }
+ }
+ for (size_t i = 0; i < kVp9MaxModeLFDeltas; i++) {
+ if (br.Read<bool>()) { // update_mode_delta
+ br.ConsumeBits(7);
+ }
+ }
+}
+
+void Vp9ReadQp(BitstreamReader& br, Vp9UncompressedHeader* frame_info) {
+ frame_info->base_qp = br.Read<uint8_t>();
+
+ // yuv offsets
+ frame_info->is_lossless = frame_info->base_qp == 0;
+ for (int i = 0; i < 3; ++i) {
+ if (br.Read<bool>()) { // if delta_coded
+ // delta_q is a signed integer with leading 4 bits containing absolute
+ // value and last bit containing sign. There are are two ways to represent
+ // zero with such encoding.
+ if ((br.ReadBits(5) & 0b1111'0) != 0) { // delta_q
+ frame_info->is_lossless = false;
+ }
+ }
+ }
+}
+
+void Vp9ReadSegmentationParams(BitstreamReader& br,
+ Vp9UncompressedHeader* frame_info) {
+ constexpr int kSegmentationFeatureBits[kVp9SegLvlMax] = {8, 6, 2, 0};
+ constexpr bool kSegmentationFeatureSigned[kVp9SegLvlMax] = {true, true, false,
+ false};
+
+ frame_info->segmentation_enabled = br.Read<bool>();
+ if (!frame_info->segmentation_enabled) {
+ return;
+ }
+
+ if (br.Read<bool>()) { // update_map
+ frame_info->segmentation_tree_probs.emplace();
+ for (int i = 0; i < 7; ++i) {
+ if (br.Read<bool>()) {
+ (*frame_info->segmentation_tree_probs)[i] = br.Read<uint8_t>();
+ } else {
+ (*frame_info->segmentation_tree_probs)[i] = 255;
+ }
+ }
+
+ // temporal_update
+ frame_info->segmentation_pred_prob.emplace();
+ if (br.Read<bool>()) {
+ for (int i = 0; i < 3; ++i) {
+ if (br.Read<bool>()) {
+ (*frame_info->segmentation_pred_prob)[i] = br.Read<uint8_t>();
+ } else {
+ (*frame_info->segmentation_pred_prob)[i] = 255;
+ }
+ }
+ } else {
+ frame_info->segmentation_pred_prob->fill(255);
+ }
+ }
+
+ if (br.Read<bool>()) { // segmentation_update_data
+ frame_info->segmentation_is_delta = br.Read<bool>();
+ for (size_t i = 0; i < kVp9MaxSegments; ++i) {
+ for (size_t j = 0; j < kVp9SegLvlMax; ++j) {
+ if (!br.Read<bool>()) { // feature_enabled
+ continue;
+ }
+ if (kSegmentationFeatureBits[j] == 0) {
+ // No feature bits used and no sign, just mark it and return.
+ frame_info->segmentation_features[i][j] = 1;
+ continue;
+ }
+ frame_info->segmentation_features[i][j] =
+ br.ReadBits(kSegmentationFeatureBits[j]);
+ if (kSegmentationFeatureSigned[j] && br.Read<bool>()) {
+ (*frame_info->segmentation_features[i][j]) *= -1;
+ }
+ }
+ }
+ }
+}
+
+void Vp9ReadTileInfo(BitstreamReader& br, Vp9UncompressedHeader* frame_info) {
+ size_t mi_cols = (frame_info->frame_width + 7) >> 3;
+ size_t sb64_cols = (mi_cols + 7) >> 3;
+
+ size_t min_log2 = 0;
+ while ((kVp9MaxTileWidthB64 << min_log2) < sb64_cols) {
+ ++min_log2;
+ }
+
+ size_t max_log2 = 1;
+ while ((sb64_cols >> max_log2) >= kVp9MinTileWidthB64) {
+ ++max_log2;
+ }
+ --max_log2;
+
+ frame_info->tile_cols_log2 = min_log2;
+ while (frame_info->tile_cols_log2 < max_log2) {
+ if (br.Read<bool>()) {
+ ++frame_info->tile_cols_log2;
+ } else {
+ break;
+ }
+ }
+ frame_info->tile_rows_log2 = 0;
+ if (br.Read<bool>()) {
+ ++frame_info->tile_rows_log2;
+ if (br.Read<bool>()) {
+ ++frame_info->tile_rows_log2;
+ }
+ }
+}
+
+const Vp9InterpolationFilter kLiteralToType[4] = {
+ Vp9InterpolationFilter::kEightTapSmooth, Vp9InterpolationFilter::kEightTap,
+ Vp9InterpolationFilter::kEightTapSharp, Vp9InterpolationFilter::kBilinear};
+} // namespace
+
+std::string Vp9UncompressedHeader::ToString() const {
+ char buf[1024];
+ rtc::SimpleStringBuilder oss(buf);
+
+ oss << "Vp9UncompressedHeader { "
+ << "profile = " << profile;
+
+ if (show_existing_frame) {
+ oss << ", show_existing_frame = " << *show_existing_frame << " }";
+ return oss.str();
+ }
+
+ oss << ", frame type = " << (is_keyframe ? "key" : "delta")
+ << ", show_frame = " << (show_frame ? "true" : "false")
+ << ", error_resilient = " << (error_resilient ? "true" : "false");
+
+ oss << ", bit_depth = ";
+ switch (bit_detph) {
+ case Vp9BitDept::k8Bit:
+ oss << "8bit";
+ break;
+ case Vp9BitDept::k10Bit:
+ oss << "10bit";
+ break;
+ case Vp9BitDept::k12Bit:
+ oss << "12bit";
+ break;
+ }
+
+ if (color_space) {
+ oss << ", color_space = ";
+ switch (*color_space) {
+ case Vp9ColorSpace::CS_UNKNOWN:
+ oss << "unknown";
+ break;
+ case Vp9ColorSpace::CS_BT_601:
+ oss << "CS_BT_601 Rec. ITU-R BT.601-7";
+ break;
+ case Vp9ColorSpace::CS_BT_709:
+ oss << "Rec. ITU-R BT.709-6";
+ break;
+ case Vp9ColorSpace::CS_SMPTE_170:
+ oss << "SMPTE-170";
+ break;
+ case Vp9ColorSpace::CS_SMPTE_240:
+ oss << "SMPTE-240";
+ break;
+ case Vp9ColorSpace::CS_BT_2020:
+ oss << "Rec. ITU-R BT.2020-2";
+ break;
+ case Vp9ColorSpace::CS_RESERVED:
+ oss << "Reserved";
+ break;
+ case Vp9ColorSpace::CS_RGB:
+ oss << "sRGB (IEC 61966-2-1)";
+ break;
+ }
+ }
+
+ if (color_range) {
+ oss << ", color_range = ";
+ switch (*color_range) {
+ case Vp9ColorRange::kFull:
+ oss << "full";
+ break;
+ case Vp9ColorRange::kStudio:
+ oss << "studio";
+ break;
+ }
+ }
+
+ if (sub_sampling) {
+ oss << ", sub_sampling = ";
+ switch (*sub_sampling) {
+ case Vp9YuvSubsampling::k444:
+ oss << "444";
+ break;
+ case Vp9YuvSubsampling::k440:
+ oss << "440";
+ break;
+ case Vp9YuvSubsampling::k422:
+ oss << "422";
+ break;
+ case Vp9YuvSubsampling::k420:
+ oss << "420";
+ break;
+ }
+ }
+
+ if (infer_size_from_reference) {
+ oss << ", infer_frame_resolution_from = " << *infer_size_from_reference;
+ } else {
+ oss << ", frame_width = " << frame_width
+ << ", frame_height = " << frame_height;
+ }
+ if (render_width != 0 && render_height != 0) {
+ oss << ", render_width = " << render_width
+ << ", render_height = " << render_height;
+ }
+
+ oss << ", base qp = " << base_qp;
+ if (reference_buffers[0] != -1) {
+ oss << ", last_buffer = " << reference_buffers[0];
+ }
+ if (reference_buffers[1] != -1) {
+ oss << ", golden_buffer = " << reference_buffers[1];
+ }
+ if (reference_buffers[2] != -1) {
+ oss << ", altref_buffer = " << reference_buffers[2];
+ }
+
+ oss << ", updated buffers = { ";
+ bool first = true;
+ for (int i = 0; i < 8; ++i) {
+ if (updated_buffers.test(i)) {
+ if (first) {
+ first = false;
+ } else {
+ oss << ", ";
+ }
+ oss << i;
+ }
+ }
+ oss << " }";
+
+ oss << ", compressed_header_size_bytes = " << compressed_header_size;
+
+ oss << " }";
+ return oss.str();
+}
+
+void Parse(BitstreamReader& br,
+ Vp9UncompressedHeader* frame_info,
+ bool qp_only) {
+ const size_t total_buffer_size_bits = br.RemainingBitCount();
+
+ // Frame marker.
+ if (br.ReadBits(2) != 0b10) {
+ RTC_LOG(LS_WARNING) << "Failed to parse header. Frame marker should be 2.";
+ br.Invalidate();
+ return;
+ }
+
+ // Profile has low bit first.
+ frame_info->profile = br.ReadBit();
+ frame_info->profile |= br.ReadBit() << 1;
+ if (frame_info->profile > 2 && br.Read<bool>()) {
+ RTC_LOG(LS_WARNING)
+ << "Failed to parse header. Unsupported bitstream profile.";
+ br.Invalidate();
+ return;
+ }
+
+ // Show existing frame.
+ if (br.Read<bool>()) {
+ frame_info->show_existing_frame = br.ReadBits(3);
+ return;
+ }
+
+ // Frame type: KEY_FRAME(0), INTER_FRAME(1).
+ frame_info->is_keyframe = !br.Read<bool>();
+ frame_info->show_frame = br.Read<bool>();
+ frame_info->error_resilient = br.Read<bool>();
+
+ if (frame_info->is_keyframe) {
+ if (br.ReadBits(24) != 0x498342) {
+ RTC_LOG(LS_WARNING) << "Failed to parse header. Invalid sync code.";
+ br.Invalidate();
+ return;
+ }
+
+ Vp9ReadColorConfig(br, frame_info);
+ Vp9ReadFrameSize(br, frame_info);
+ Vp9ReadRenderSize(total_buffer_size_bits, br, frame_info);
+
+ // Key-frames implicitly update all buffers.
+ frame_info->updated_buffers.set();
+ } else {
+ // Non-keyframe.
+ bool is_intra_only = false;
+ if (!frame_info->show_frame) {
+ is_intra_only = br.Read<bool>();
+ }
+ if (!frame_info->error_resilient) {
+ br.ConsumeBits(2); // Reset frame context.
+ }
+
+ if (is_intra_only) {
+ if (br.ReadBits(24) != 0x498342) {
+ RTC_LOG(LS_WARNING) << "Failed to parse header. Invalid sync code.";
+ br.Invalidate();
+ return;
+ }
+
+ if (frame_info->profile > 0) {
+ Vp9ReadColorConfig(br, frame_info);
+ } else {
+ frame_info->color_space = Vp9ColorSpace::CS_BT_601;
+ frame_info->sub_sampling = Vp9YuvSubsampling::k420;
+ frame_info->bit_detph = Vp9BitDept::k8Bit;
+ }
+ frame_info->reference_buffers.fill(-1);
+ ReadRefreshFrameFlags(br, frame_info);
+ Vp9ReadFrameSize(br, frame_info);
+ Vp9ReadRenderSize(total_buffer_size_bits, br, frame_info);
+ } else {
+ ReadRefreshFrameFlags(br, frame_info);
+
+ frame_info->reference_buffers_sign_bias[0] = false;
+ for (size_t i = 0; i < kVp9NumRefsPerFrame; i++) {
+ frame_info->reference_buffers[i] = br.ReadBits(3);
+ frame_info->reference_buffers_sign_bias[Vp9ReferenceFrame::kLast + i] =
+ br.Read<bool>();
+ }
+
+ Vp9ReadFrameSizeFromRefs(br, frame_info);
+ Vp9ReadRenderSize(total_buffer_size_bits, br, frame_info);
+
+ frame_info->allow_high_precision_mv = br.Read<bool>();
+
+ // Interpolation filter.
+ if (br.Read<bool>()) {
+ frame_info->interpolation_filter = Vp9InterpolationFilter::kSwitchable;
+ } else {
+ frame_info->interpolation_filter = kLiteralToType[br.ReadBits(2)];
+ }
+ }
+ }
+
+ if (!frame_info->error_resilient) {
+ // 1 bit: Refresh frame context.
+ // 1 bit: Frame parallel decoding mode.
+ br.ConsumeBits(2);
+ }
+
+ // Frame context index.
+ frame_info->frame_context_idx = br.ReadBits(2);
+
+ Vp9ReadLoopfilter(br);
+
+ // Read base QP.
+ Vp9ReadQp(br, frame_info);
+
+ if (qp_only) {
+ // Not interested in the rest of the header, return early.
+ return;
+ }
+
+ Vp9ReadSegmentationParams(br, frame_info);
+ Vp9ReadTileInfo(br, frame_info);
+ frame_info->compressed_header_size = br.Read<uint16_t>();
+ frame_info->uncompressed_header_size =
+ (total_buffer_size_bits / 8) - (br.RemainingBitCount() / 8);
+}
+
+absl::optional<Vp9UncompressedHeader> ParseUncompressedVp9Header(
+ rtc::ArrayView<const uint8_t> buf) {
+ BitstreamReader reader(buf);
+ Vp9UncompressedHeader frame_info;
+ Parse(reader, &frame_info, /*qp_only=*/false);
+ if (reader.Ok() && frame_info.frame_width > 0) {
+ return frame_info;
+ }
+ return absl::nullopt;
+}
+
+namespace vp9 {
+
+bool GetQp(const uint8_t* buf, size_t length, int* qp) {
+ BitstreamReader reader(rtc::MakeArrayView(buf, length));
+ Vp9UncompressedHeader frame_info;
+ Parse(reader, &frame_info, /*qp_only=*/true);
+ if (!reader.Ok()) {
+ return false;
+ }
+ *qp = frame_info.base_qp;
+ return true;
+}
+
+} // namespace vp9
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/utility/vp9_uncompressed_header_parser.h b/third_party/libwebrtc/modules/video_coding/utility/vp9_uncompressed_header_parser.h
new file mode 100644
index 0000000000..8d1b88c3d3
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/utility/vp9_uncompressed_header_parser.h
@@ -0,0 +1,155 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_UTILITY_VP9_UNCOMPRESSED_HEADER_PARSER_H_
+#define MODULES_VIDEO_CODING_UTILITY_VP9_UNCOMPRESSED_HEADER_PARSER_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <array>
+#include <bitset>
+#include <string>
+
+#include "absl/types/optional.h"
+#include "api/array_view.h"
+#include "modules/video_coding/utility/vp9_constants.h"
+
+namespace webrtc {
+
+namespace vp9 {
+
+// Gets the QP, QP range: [0, 255].
+// Returns true on success, false otherwise.
+bool GetQp(const uint8_t* buf, size_t length, int* qp);
+
+} // namespace vp9
+
+// Bit depth per channel. Support varies by profile.
+enum class Vp9BitDept : uint8_t {
+ k8Bit = 8,
+ k10Bit = 10,
+ k12Bit = 12,
+};
+
+enum class Vp9ColorSpace : uint8_t {
+ CS_UNKNOWN = 0, // Unknown (in this case the color space must be signaled
+ // outside the VP9 bitstream).
+ CS_BT_601 = 1, // CS_BT_601 Rec. ITU-R BT.601-7
+ CS_BT_709 = 2, // Rec. ITU-R BT.709-6
+ CS_SMPTE_170 = 3, // SMPTE-170
+ CS_SMPTE_240 = 4, // SMPTE-240
+ CS_BT_2020 = 5, // Rec. ITU-R BT.2020-2
+ CS_RESERVED = 6, // Reserved
+ CS_RGB = 7, // sRGB (IEC 61966-2-1)
+};
+
+enum class Vp9ColorRange {
+ kStudio, // Studio swing:
+ // For BitDepth equals 8:
+ // Y is between 16 and 235 inclusive.
+ // U and V are between 16 and 240 inclusive.
+ // For BitDepth equals 10:
+ // Y is between 64 and 940 inclusive.
+ // U and V are between 64 and 960 inclusive.
+ // For BitDepth equals 12:
+ // Y is between 256 and 3760.
+ // U and V are between 256 and 3840 inclusive.
+ kFull // Full swing; no restriction on Y, U, V values.
+};
+
+enum class Vp9YuvSubsampling {
+ k444,
+ k440,
+ k422,
+ k420,
+};
+
+enum Vp9ReferenceFrame : int {
+ kNone = -1,
+ kIntra = 0,
+ kLast = 1,
+ kGolden = 2,
+ kAltref = 3,
+};
+
+enum class Vp9InterpolationFilter : uint8_t {
+ kEightTap = 0,
+ kEightTapSmooth = 1,
+ kEightTapSharp = 2,
+ kBilinear = 3,
+ kSwitchable = 4
+};
+
+struct Vp9UncompressedHeader {
+ int profile = 0; // Profiles 0-3 are valid.
+ absl::optional<uint8_t> show_existing_frame;
+ bool is_keyframe = false;
+ bool show_frame = false;
+ bool error_resilient = false;
+ Vp9BitDept bit_detph = Vp9BitDept::k8Bit;
+ absl::optional<Vp9ColorSpace> color_space;
+ absl::optional<Vp9ColorRange> color_range;
+ absl::optional<Vp9YuvSubsampling> sub_sampling;
+ int frame_width = 0;
+ int frame_height = 0;
+ int render_width = 0;
+ int render_height = 0;
+ // Width/height of the tiles used (in units of 8x8 blocks).
+ size_t tile_cols_log2 = 0; // tile_cols = 1 << tile_cols_log2
+ size_t tile_rows_log2 = 0; // tile_rows = 1 << tile_rows_log2
+ absl::optional<size_t> render_size_offset_bits;
+ Vp9InterpolationFilter interpolation_filter =
+ Vp9InterpolationFilter::kEightTap;
+ bool allow_high_precision_mv = false;
+ int base_qp = 0;
+ bool is_lossless = false;
+ uint8_t frame_context_idx = 0;
+
+ bool segmentation_enabled = false;
+ absl::optional<std::array<uint8_t, 7>> segmentation_tree_probs;
+ absl::optional<std::array<uint8_t, 3>> segmentation_pred_prob;
+ bool segmentation_is_delta = false;
+ std::array<std::array<absl::optional<int>, kVp9SegLvlMax>, kVp9MaxSegments>
+ segmentation_features;
+
+ // Which of the 8 reference buffers may be used as references for this frame.
+ // -1 indicates not used (e.g. {-1, -1, -1} for intra-only frames).
+ std::array<int, kVp9RefsPerFrame> reference_buffers = {-1, -1, -1};
+ // Sign bias corresponding to reference buffers, where the index is a
+ // ReferenceFrame.
+ // false/0 indidate backwards reference, true/1 indicate forwards reference).
+ std::bitset<kVp9MaxRefFrames> reference_buffers_sign_bias = 0;
+
+ // Indicates which reference buffer [0,7] to infer the frame size from.
+ absl::optional<int> infer_size_from_reference;
+ // Which of the 8 reference buffers are updated by this frame.
+ std::bitset<kVp9NumRefFrames> updated_buffers = 0;
+
+ // Header sizes, in bytes.
+ uint32_t uncompressed_header_size = 0;
+ uint32_t compressed_header_size = 0;
+
+ bool is_intra_only() const {
+ return reference_buffers[0] == -1 && reference_buffers[1] == -1 &&
+ reference_buffers[2] == -1;
+ }
+
+ std::string ToString() const;
+};
+
+// Parses the uncompressed header and populates (most) values in a
+// UncompressedHeader struct. Returns nullopt on failure.
+absl::optional<Vp9UncompressedHeader> ParseUncompressedVp9Header(
+ rtc::ArrayView<const uint8_t> buf);
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_UTILITY_VP9_UNCOMPRESSED_HEADER_PARSER_H_
diff --git a/third_party/libwebrtc/modules/video_coding/utility/vp9_uncompressed_header_parser_unittest.cc b/third_party/libwebrtc/modules/video_coding/utility/vp9_uncompressed_header_parser_unittest.cc
new file mode 100644
index 0000000000..d8cc738e07
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/utility/vp9_uncompressed_header_parser_unittest.cc
@@ -0,0 +1,94 @@
+/*
+ * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/utility/vp9_uncompressed_header_parser.h"
+
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace vp9 {
+using ::testing::AllOf;
+using ::testing::ElementsAre;
+using ::testing::Eq;
+using ::testing::Field;
+using ::testing::Optional;
+
+TEST(Vp9UncompressedHeaderParserTest, FrameWithSegmentation) {
+ // Uncompressed header from a frame generated with libvpx.
+ // Encoded QVGA frame (SL0 of a VGA frame) that includes a segmentation.
+ const uint8_t kHeader[] = {
+ 0x87, 0x01, 0x00, 0x00, 0x02, 0x7e, 0x01, 0xdf, 0x02, 0x7f, 0x01, 0xdf,
+ 0xc6, 0x87, 0x04, 0x83, 0x83, 0x2e, 0x46, 0x60, 0x20, 0x38, 0x0c, 0x06,
+ 0x03, 0xcd, 0x80, 0xc0, 0x60, 0x9f, 0xc5, 0x46, 0x00, 0x00, 0x00, 0x00,
+ 0x2e, 0x73, 0xb7, 0xee, 0x22, 0x06, 0x81, 0x82, 0xd4, 0xef, 0xc3, 0x58,
+ 0x1f, 0x12, 0xd2, 0x7b, 0x28, 0x1f, 0x80, 0xfc, 0x07, 0xe0, 0x00, 0x00};
+
+ absl::optional<Vp9UncompressedHeader> frame_info =
+ ParseUncompressedVp9Header(kHeader);
+ ASSERT_TRUE(frame_info.has_value());
+
+ EXPECT_FALSE(frame_info->is_keyframe);
+ EXPECT_TRUE(frame_info->error_resilient);
+ EXPECT_TRUE(frame_info->show_frame);
+ EXPECT_FALSE(frame_info->show_existing_frame);
+ EXPECT_EQ(frame_info->base_qp, 185);
+ EXPECT_EQ(frame_info->frame_width, 320);
+ EXPECT_EQ(frame_info->frame_height, 240);
+ EXPECT_EQ(frame_info->render_width, 640);
+ EXPECT_EQ(frame_info->render_height, 480);
+ EXPECT_TRUE(frame_info->allow_high_precision_mv);
+ EXPECT_EQ(frame_info->frame_context_idx, 0u);
+ EXPECT_EQ(frame_info->interpolation_filter,
+ Vp9InterpolationFilter::kSwitchable);
+ EXPECT_EQ(frame_info->is_lossless, false);
+ EXPECT_EQ(frame_info->profile, 0);
+ EXPECT_THAT(frame_info->reference_buffers, ElementsAre(0, 0, 0));
+ EXPECT_THAT(frame_info->reference_buffers_sign_bias, 0b0000);
+ EXPECT_EQ(frame_info->updated_buffers, 0b10000000);
+ EXPECT_EQ(frame_info->tile_cols_log2, 0u);
+ EXPECT_EQ(frame_info->tile_rows_log2, 0u);
+ EXPECT_EQ(frame_info->render_size_offset_bits, 64u);
+ EXPECT_EQ(frame_info->compressed_header_size, 23u);
+ EXPECT_EQ(frame_info->uncompressed_header_size, 37u);
+
+ EXPECT_TRUE(frame_info->segmentation_enabled);
+ EXPECT_FALSE(frame_info->segmentation_is_delta);
+ EXPECT_THAT(frame_info->segmentation_pred_prob,
+ Optional(ElementsAre(205, 1, 1)));
+ EXPECT_THAT(frame_info->segmentation_tree_probs,
+ Optional(ElementsAre(255, 255, 128, 1, 128, 128, 128)));
+ EXPECT_THAT(frame_info->segmentation_features[1][kVp9SegLvlAlt_Q], Eq(-63));
+ EXPECT_THAT(frame_info->segmentation_features[2][kVp9SegLvlAlt_Q], Eq(-81));
+}
+
+TEST(Vp9UncompressedHeaderParserTest, SegmentationWithDefaultPredProbs) {
+ const uint8_t kHeader[] = {0x90, 0x49, 0x83, 0x42, 0x80, 0x2e,
+ 0x30, 0x0, 0xb0, 0x0, 0x37, 0xff,
+ 0x06, 0x80, 0x0, 0x0, 0x0, 0x0};
+ absl::optional<Vp9UncompressedHeader> frame_info =
+ ParseUncompressedVp9Header(kHeader);
+ ASSERT_TRUE(frame_info.has_value());
+ EXPECT_THAT(frame_info->segmentation_pred_prob,
+ Optional(ElementsAre(255, 255, 255)));
+}
+
+TEST(Vp9UncompressedHeaderParserTest, SegmentationWithSkipLevel) {
+ const uint8_t kHeader[] = {0x90, 0x49, 0x83, 0x42, 0x80, 0x2e, 0x30, 0x00,
+ 0xb0, 0x00, 0x37, 0xff, 0x06, 0x80, 0x01, 0x08,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
+ absl::optional<Vp9UncompressedHeader> frame_info =
+ ParseUncompressedVp9Header(kHeader);
+ ASSERT_TRUE(frame_info.has_value());
+ EXPECT_THAT(frame_info->segmentation_features[0][kVp9SegLvlSkip], Eq(1));
+}
+
+} // namespace vp9
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/video_codec_initializer.cc b/third_party/libwebrtc/modules/video_coding/video_codec_initializer.cc
new file mode 100644
index 0000000000..e1885d74c8
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/video_codec_initializer.cc
@@ -0,0 +1,352 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/include/video_codec_initializer.h"
+
+#include <stdint.h>
+#include <string.h>
+
+#include <algorithm>
+
+#include "absl/types/optional.h"
+#include "api/scoped_refptr.h"
+#include "api/units/data_rate.h"
+#include "api/video/video_bitrate_allocation.h"
+#include "api/video_codecs/video_encoder.h"
+#include "modules/video_coding/codecs/av1/av1_svc_config.h"
+#include "modules/video_coding/codecs/vp8/vp8_scalability.h"
+#include "modules/video_coding/codecs/vp9/svc_config.h"
+#include "modules/video_coding/include/video_coding_defines.h"
+#include "modules/video_coding/svc/scalability_mode_util.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/experiments/min_video_bitrate_experiment.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/numerics/safe_conversions.h"
+
+namespace webrtc {
+
+bool VideoCodecInitializer::SetupCodec(const VideoEncoderConfig& config,
+ const std::vector<VideoStream>& streams,
+ VideoCodec* codec) {
+ if (config.codec_type == kVideoCodecMultiplex) {
+ VideoEncoderConfig associated_config = config.Copy();
+ associated_config.codec_type = kVideoCodecVP9;
+ if (!SetupCodec(associated_config, streams, codec)) {
+ RTC_LOG(LS_ERROR) << "Failed to create stereo encoder configuration.";
+ return false;
+ }
+ codec->codecType = kVideoCodecMultiplex;
+ return true;
+ }
+
+ *codec = VideoEncoderConfigToVideoCodec(config, streams);
+ return true;
+}
+
+// TODO(sprang): Split this up and separate the codec specific parts.
+VideoCodec VideoCodecInitializer::VideoEncoderConfigToVideoCodec(
+ const VideoEncoderConfig& config,
+ const std::vector<VideoStream>& streams) {
+ static const int kEncoderMinBitrateKbps = 30;
+ RTC_DCHECK(!streams.empty());
+ RTC_DCHECK_GE(config.min_transmit_bitrate_bps, 0);
+
+ VideoCodec video_codec;
+ video_codec.codecType = config.codec_type;
+
+ switch (config.content_type) {
+ case VideoEncoderConfig::ContentType::kRealtimeVideo:
+ video_codec.mode = VideoCodecMode::kRealtimeVideo;
+ break;
+ case VideoEncoderConfig::ContentType::kScreen:
+ video_codec.mode = VideoCodecMode::kScreensharing;
+ break;
+ }
+
+ video_codec.legacy_conference_mode =
+ config.content_type == VideoEncoderConfig::ContentType::kScreen &&
+ config.legacy_conference_mode;
+
+ video_codec.SetFrameDropEnabled(config.frame_drop_enabled);
+ video_codec.numberOfSimulcastStreams =
+ static_cast<unsigned char>(streams.size());
+ video_codec.minBitrate = streams[0].min_bitrate_bps / 1000;
+ bool codec_active = false;
+ // Active configuration might not be fully copied to `streams` for SVC yet.
+ // Therefore the `config` is checked here.
+ for (const VideoStream& stream : config.simulcast_layers) {
+ if (stream.active) {
+ codec_active = true;
+ break;
+ }
+ }
+ // Set active for the entire video codec for the non simulcast case.
+ video_codec.active = codec_active;
+ if (video_codec.minBitrate < kEncoderMinBitrateKbps)
+ video_codec.minBitrate = kEncoderMinBitrateKbps;
+ video_codec.timing_frame_thresholds = {kDefaultTimingFramesDelayMs,
+ kDefaultOutlierFrameSizePercent};
+ RTC_DCHECK_LE(streams.size(), kMaxSimulcastStreams);
+
+ int max_framerate = 0;
+
+ absl::optional<ScalabilityMode> scalability_mode =
+ streams[0].scalability_mode;
+ for (size_t i = 0; i < streams.size(); ++i) {
+ SimulcastStream* sim_stream = &video_codec.simulcastStream[i];
+ RTC_DCHECK_GT(streams[i].width, 0);
+ RTC_DCHECK_GT(streams[i].height, 0);
+ RTC_DCHECK_GT(streams[i].max_framerate, 0);
+ RTC_DCHECK_GE(streams[i].min_bitrate_bps, 0);
+ RTC_DCHECK_GE(streams[i].target_bitrate_bps, streams[i].min_bitrate_bps);
+ RTC_DCHECK_GE(streams[i].max_bitrate_bps, streams[i].target_bitrate_bps);
+ RTC_DCHECK_GE(streams[i].max_qp, 0);
+
+ sim_stream->width = static_cast<uint16_t>(streams[i].width);
+ sim_stream->height = static_cast<uint16_t>(streams[i].height);
+ sim_stream->maxFramerate = streams[i].max_framerate;
+ sim_stream->minBitrate = streams[i].min_bitrate_bps / 1000;
+ sim_stream->targetBitrate = streams[i].target_bitrate_bps / 1000;
+ sim_stream->maxBitrate = streams[i].max_bitrate_bps / 1000;
+ sim_stream->qpMax = streams[i].max_qp;
+
+ int num_temporal_layers =
+ streams[i].scalability_mode.has_value()
+ ? ScalabilityModeToNumTemporalLayers(*streams[i].scalability_mode)
+ : streams[i].num_temporal_layers.value_or(1);
+
+ sim_stream->numberOfTemporalLayers =
+ static_cast<unsigned char>(num_temporal_layers);
+ sim_stream->active = streams[i].active;
+
+ video_codec.width =
+ std::max(video_codec.width, static_cast<uint16_t>(streams[i].width));
+ video_codec.height =
+ std::max(video_codec.height, static_cast<uint16_t>(streams[i].height));
+ video_codec.minBitrate =
+ std::min(static_cast<uint16_t>(video_codec.minBitrate),
+ static_cast<uint16_t>(streams[i].min_bitrate_bps / 1000));
+ video_codec.maxBitrate += streams[i].max_bitrate_bps / 1000;
+ video_codec.qpMax = std::max(video_codec.qpMax,
+ static_cast<unsigned int>(streams[i].max_qp));
+ max_framerate = std::max(max_framerate, streams[i].max_framerate);
+
+ // TODO(bugs.webrtc.org/11607): Since scalability mode is a top-level
+ // setting on VideoCodec, setting it makes sense only if it is the same for
+ // all simulcast streams.
+ if (streams[0].scalability_mode != streams[i].scalability_mode) {
+ scalability_mode.reset();
+ // For VP8, top-level scalability mode doesn't matter, since configuration
+ // is based on the per-simulcast stream configuration of temporal layers.
+ if (video_codec.codecType != kVideoCodecVP8) {
+ RTC_LOG(LS_WARNING) << "Inconsistent scalability modes configured.";
+ }
+ }
+ }
+
+ if (scalability_mode.has_value()) {
+ video_codec.SetScalabilityMode(*scalability_mode);
+ }
+
+ if (video_codec.maxBitrate == 0) {
+ // Unset max bitrate -> cap to one bit per pixel.
+ video_codec.maxBitrate =
+ (video_codec.width * video_codec.height * video_codec.maxFramerate) /
+ 1000;
+ }
+ if (video_codec.maxBitrate < kEncoderMinBitrateKbps)
+ video_codec.maxBitrate = kEncoderMinBitrateKbps;
+
+ video_codec.maxFramerate = max_framerate;
+ video_codec.spatialLayers[0] = {0};
+ video_codec.spatialLayers[0].width = video_codec.width;
+ video_codec.spatialLayers[0].height = video_codec.height;
+ video_codec.spatialLayers[0].maxFramerate = max_framerate;
+ video_codec.spatialLayers[0].numberOfTemporalLayers =
+ streams[0].scalability_mode.has_value()
+ ? ScalabilityModeToNumTemporalLayers(*streams[0].scalability_mode)
+ : streams[0].num_temporal_layers.value_or(1);
+
+ // Set codec specific options
+ if (config.encoder_specific_settings)
+ config.encoder_specific_settings->FillEncoderSpecificSettings(&video_codec);
+
+ switch (video_codec.codecType) {
+ case kVideoCodecVP8: {
+ if (!config.encoder_specific_settings) {
+ *video_codec.VP8() = VideoEncoder::GetDefaultVp8Settings();
+ }
+
+ // Validate specified scalability modes. If some layer has an unsupported
+ // mode, store it as the top-level scalability mode, which will make
+ // InitEncode fail with an appropriate error.
+ for (const auto& stream : streams) {
+ if (stream.scalability_mode.has_value() &&
+ !VP8SupportsScalabilityMode(*stream.scalability_mode)) {
+ RTC_LOG(LS_WARNING)
+ << "Invalid scalability mode for VP8: "
+ << ScalabilityModeToString(*stream.scalability_mode);
+ video_codec.SetScalabilityMode(*stream.scalability_mode);
+ break;
+ }
+ }
+ video_codec.VP8()->numberOfTemporalLayers =
+ streams.back().scalability_mode.has_value()
+ ? ScalabilityModeToNumTemporalLayers(
+ *streams.back().scalability_mode)
+ : streams.back().num_temporal_layers.value_or(
+ video_codec.VP8()->numberOfTemporalLayers);
+
+ RTC_DCHECK_GE(video_codec.VP8()->numberOfTemporalLayers, 1);
+ RTC_DCHECK_LE(video_codec.VP8()->numberOfTemporalLayers,
+ kMaxTemporalStreams);
+
+ break;
+ }
+ case kVideoCodecVP9: {
+ // Force the first stream to always be active.
+ video_codec.simulcastStream[0].active = codec_active;
+
+ if (!config.encoder_specific_settings) {
+ *video_codec.VP9() = VideoEncoder::GetDefaultVp9Settings();
+ }
+
+ video_codec.VP9()->numberOfTemporalLayers = static_cast<unsigned char>(
+ streams.back().num_temporal_layers.value_or(
+ video_codec.VP9()->numberOfTemporalLayers));
+ RTC_DCHECK_GE(video_codec.VP9()->numberOfTemporalLayers, 1);
+ RTC_DCHECK_LE(video_codec.VP9()->numberOfTemporalLayers,
+ kMaxTemporalStreams);
+
+ RTC_DCHECK(config.spatial_layers.empty() ||
+ config.spatial_layers.size() ==
+ video_codec.VP9()->numberOfSpatialLayers);
+
+ std::vector<SpatialLayer> spatial_layers;
+ if (!config.spatial_layers.empty()) {
+ // Layering is set explicitly.
+ spatial_layers = config.spatial_layers;
+ } else if (scalability_mode.has_value()) {
+ // Layering is set via scalability mode.
+ spatial_layers = GetVp9SvcConfig(video_codec);
+ if (spatial_layers.empty())
+ break;
+ } else {
+ size_t first_active_layer = 0;
+ for (size_t spatial_idx = 0;
+ spatial_idx < config.simulcast_layers.size(); ++spatial_idx) {
+ if (config.simulcast_layers[spatial_idx].active) {
+ first_active_layer = spatial_idx;
+ break;
+ }
+ }
+
+ spatial_layers = GetSvcConfig(
+ video_codec.width, video_codec.height, video_codec.maxFramerate,
+ first_active_layer, video_codec.VP9()->numberOfSpatialLayers,
+ video_codec.VP9()->numberOfTemporalLayers,
+ video_codec.mode == VideoCodecMode::kScreensharing);
+
+ // If there was no request for spatial layering, don't limit bitrate
+ // of single spatial layer.
+ const bool no_spatial_layering =
+ video_codec.VP9()->numberOfSpatialLayers <= 1;
+ if (no_spatial_layering) {
+ // Use codec's bitrate limits.
+ spatial_layers.back().minBitrate = video_codec.minBitrate;
+ spatial_layers.back().targetBitrate = video_codec.maxBitrate;
+ spatial_layers.back().maxBitrate = video_codec.maxBitrate;
+ }
+
+ for (size_t spatial_idx = first_active_layer;
+ spatial_idx < config.simulcast_layers.size() &&
+ spatial_idx < spatial_layers.size() + first_active_layer;
+ ++spatial_idx) {
+ spatial_layers[spatial_idx - first_active_layer].active =
+ config.simulcast_layers[spatial_idx].active;
+ }
+ }
+
+ RTC_DCHECK(!spatial_layers.empty());
+ for (size_t i = 0; i < spatial_layers.size(); ++i) {
+ video_codec.spatialLayers[i] = spatial_layers[i];
+ }
+
+ // The top spatial layer dimensions may not be equal to the input
+ // resolution because of the rounding or explicit configuration.
+ // This difference must be propagated to the stream configuration.
+ video_codec.width = spatial_layers.back().width;
+ video_codec.height = spatial_layers.back().height;
+ video_codec.simulcastStream[0].width = spatial_layers.back().width;
+ video_codec.simulcastStream[0].height = spatial_layers.back().height;
+
+ // Update layering settings.
+ video_codec.VP9()->numberOfSpatialLayers =
+ static_cast<unsigned char>(spatial_layers.size());
+ RTC_DCHECK_GE(video_codec.VP9()->numberOfSpatialLayers, 1);
+ RTC_DCHECK_LE(video_codec.VP9()->numberOfSpatialLayers,
+ kMaxSpatialLayers);
+
+ video_codec.VP9()->numberOfTemporalLayers = static_cast<unsigned char>(
+ spatial_layers.back().numberOfTemporalLayers);
+ RTC_DCHECK_GE(video_codec.VP9()->numberOfTemporalLayers, 1);
+ RTC_DCHECK_LE(video_codec.VP9()->numberOfTemporalLayers,
+ kMaxTemporalStreams);
+
+ break;
+ }
+ case kVideoCodecAV1:
+ if (SetAv1SvcConfig(video_codec,
+ /*num_temporal_layers=*/
+ streams.back().num_temporal_layers.value_or(1),
+ /*num_spatial_layers=*/
+ std::max<int>(config.spatial_layers.size(), 1))) {
+ for (size_t i = 0; i < config.spatial_layers.size(); ++i) {
+ video_codec.spatialLayers[i].active = config.spatial_layers[i].active;
+ }
+ } else {
+ RTC_LOG(LS_WARNING) << "Failed to configure svc bitrates for av1.";
+ }
+ break;
+ case kVideoCodecH264: {
+ RTC_CHECK(!config.encoder_specific_settings);
+
+ *video_codec.H264() = VideoEncoder::GetDefaultH264Settings();
+ video_codec.H264()->numberOfTemporalLayers = static_cast<unsigned char>(
+ streams.back().num_temporal_layers.value_or(
+ video_codec.H264()->numberOfTemporalLayers));
+ RTC_DCHECK_GE(video_codec.H264()->numberOfTemporalLayers, 1);
+ RTC_DCHECK_LE(video_codec.H264()->numberOfTemporalLayers,
+ kMaxTemporalStreams);
+ break;
+ }
+ default:
+ // TODO(pbos): Support encoder_settings codec-agnostically.
+ RTC_DCHECK(!config.encoder_specific_settings)
+ << "Encoder-specific settings for codec type not wired up.";
+ break;
+ }
+
+ const absl::optional<DataRate> experimental_min_bitrate =
+ GetExperimentalMinVideoBitrate(video_codec.codecType);
+ if (experimental_min_bitrate) {
+ const int experimental_min_bitrate_kbps =
+ rtc::saturated_cast<int>(experimental_min_bitrate->kbps());
+ video_codec.minBitrate = experimental_min_bitrate_kbps;
+ video_codec.simulcastStream[0].minBitrate = experimental_min_bitrate_kbps;
+ if (video_codec.codecType == kVideoCodecVP9) {
+ video_codec.spatialLayers[0].minBitrate = experimental_min_bitrate_kbps;
+ }
+ }
+
+ return video_codec;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/video_codec_initializer_unittest.cc b/third_party/libwebrtc/modules/video_coding/video_codec_initializer_unittest.cc
new file mode 100644
index 0000000000..0e6f2dfca2
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/video_codec_initializer_unittest.cc
@@ -0,0 +1,493 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/include/video_codec_initializer.h"
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <memory>
+
+#include "absl/types/optional.h"
+#include "api/scoped_refptr.h"
+#include "api/test/mock_fec_controller_override.h"
+#include "api/video/builtin_video_bitrate_allocator_factory.h"
+#include "api/video/video_bitrate_allocation.h"
+#include "api/video/video_bitrate_allocator.h"
+#include "api/video/video_bitrate_allocator_factory.h"
+#include "api/video_codecs/video_encoder.h"
+#include "api/video_codecs/vp8_temporal_layers.h"
+#include "api/video_codecs/vp8_temporal_layers_factory.h"
+#include "modules/video_coding/codecs/vp9/include/vp9_globals.h"
+#include "rtc_base/checks.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+namespace {
+static const int kDefaultWidth = 1280;
+static const int kDefaultHeight = 720;
+static const int kDefaultFrameRate = 30;
+static const uint32_t kDefaultMinBitrateBps = 60000;
+static const uint32_t kDefaultTargetBitrateBps = 2000000;
+static const uint32_t kDefaultMaxBitrateBps = 2000000;
+static const uint32_t kDefaultMinTransmitBitrateBps = 400000;
+static const int kDefaultMaxQp = 48;
+static const uint32_t kScreenshareTl0BitrateBps = 120000;
+static const uint32_t kScreenshareConferenceTl0BitrateBps = 200000;
+static const uint32_t kScreenshareCodecTargetBitrateBps = 200000;
+static const uint32_t kScreenshareDefaultFramerate = 5;
+// Bitrates for the temporal layers of the higher screenshare simulcast stream.
+static const uint32_t kHighScreenshareTl0Bps = 800000;
+static const uint32_t kHighScreenshareTl1Bps = 1200000;
+} // namespace
+
+// TODO(sprang): Extend coverage to handle the rest of the codec initializer.
+class VideoCodecInitializerTest : public ::testing::Test {
+ public:
+ VideoCodecInitializerTest() {}
+ virtual ~VideoCodecInitializerTest() {}
+
+ protected:
+ void SetUpFor(VideoCodecType type,
+ int num_spatial_streams,
+ int num_temporal_streams,
+ bool screenshare) {
+ config_ = VideoEncoderConfig();
+ config_.codec_type = type;
+
+ if (screenshare) {
+ config_.min_transmit_bitrate_bps = kDefaultMinTransmitBitrateBps;
+ config_.content_type = VideoEncoderConfig::ContentType::kScreen;
+ }
+
+ if (type == VideoCodecType::kVideoCodecVP8) {
+ config_.number_of_streams = num_spatial_streams;
+ VideoCodecVP8 vp8_settings = VideoEncoder::GetDefaultVp8Settings();
+ vp8_settings.numberOfTemporalLayers = num_temporal_streams;
+ config_.encoder_specific_settings = rtc::make_ref_counted<
+ webrtc::VideoEncoderConfig::Vp8EncoderSpecificSettings>(vp8_settings);
+ } else if (type == VideoCodecType::kVideoCodecVP9) {
+ VideoCodecVP9 vp9_settings = VideoEncoder::GetDefaultVp9Settings();
+ vp9_settings.numberOfSpatialLayers = num_spatial_streams;
+ vp9_settings.numberOfTemporalLayers = num_temporal_streams;
+ config_.encoder_specific_settings = rtc::make_ref_counted<
+ webrtc::VideoEncoderConfig::Vp9EncoderSpecificSettings>(vp9_settings);
+ } else if (type != VideoCodecType::kVideoCodecMultiplex) {
+ ADD_FAILURE() << "Unexpected codec type: " << type;
+ }
+ }
+
+ bool InitializeCodec() {
+ codec_out_ = VideoCodec();
+ frame_buffer_controller_.reset();
+ if (!VideoCodecInitializer::SetupCodec(config_, streams_, &codec_out_)) {
+ return false;
+ }
+ bitrate_allocator_ = CreateBuiltinVideoBitrateAllocatorFactory()
+ ->CreateVideoBitrateAllocator(codec_out_);
+ RTC_CHECK(bitrate_allocator_);
+ if (codec_out_.codecType == VideoCodecType::kVideoCodecMultiplex)
+ return true;
+
+ // Make sure temporal layers instances have been created.
+ if (codec_out_.codecType == VideoCodecType::kVideoCodecVP8) {
+ Vp8TemporalLayersFactory factory;
+ const VideoEncoder::Settings settings(VideoEncoder::Capabilities(false),
+ 1, 1000);
+ frame_buffer_controller_ =
+ factory.Create(codec_out_, settings, &fec_controller_override_);
+ }
+ return true;
+ }
+
+ VideoStream DefaultStream() {
+ VideoStream stream;
+ stream.width = kDefaultWidth;
+ stream.height = kDefaultHeight;
+ stream.max_framerate = kDefaultFrameRate;
+ stream.min_bitrate_bps = kDefaultMinBitrateBps;
+ stream.target_bitrate_bps = kDefaultTargetBitrateBps;
+ stream.max_bitrate_bps = kDefaultMaxBitrateBps;
+ stream.max_qp = kDefaultMaxQp;
+ stream.num_temporal_layers = 1;
+ stream.active = true;
+ return stream;
+ }
+
+ VideoStream DefaultScreenshareStream() {
+ VideoStream stream = DefaultStream();
+ stream.min_bitrate_bps = 30000;
+ stream.target_bitrate_bps = kScreenshareCodecTargetBitrateBps;
+ stream.max_bitrate_bps = 1000000;
+ stream.max_framerate = kScreenshareDefaultFramerate;
+ stream.num_temporal_layers = 2;
+ stream.active = true;
+ return stream;
+ }
+
+ MockFecControllerOverride fec_controller_override_;
+
+ // Input settings.
+ VideoEncoderConfig config_;
+ std::vector<VideoStream> streams_;
+
+ // Output.
+ VideoCodec codec_out_;
+ std::unique_ptr<VideoBitrateAllocator> bitrate_allocator_;
+ std::unique_ptr<Vp8FrameBufferController> frame_buffer_controller_;
+};
+
+TEST_F(VideoCodecInitializerTest, SingleStreamVp8Screenshare) {
+ SetUpFor(VideoCodecType::kVideoCodecVP8, 1, 1, true);
+ streams_.push_back(DefaultStream());
+ EXPECT_TRUE(InitializeCodec());
+
+ VideoBitrateAllocation bitrate_allocation =
+ bitrate_allocator_->Allocate(VideoBitrateAllocationParameters(
+ kDefaultTargetBitrateBps, kDefaultFrameRate));
+ EXPECT_EQ(1u, codec_out_.numberOfSimulcastStreams);
+ EXPECT_EQ(1u, codec_out_.VP8()->numberOfTemporalLayers);
+ EXPECT_EQ(kDefaultTargetBitrateBps, bitrate_allocation.get_sum_bps());
+}
+
+TEST_F(VideoCodecInitializerTest, SingleStreamVp8ScreenshareInactive) {
+ SetUpFor(VideoCodecType::kVideoCodecVP8, 1, 1, true);
+ VideoStream inactive_stream = DefaultStream();
+ inactive_stream.active = false;
+ streams_.push_back(inactive_stream);
+ EXPECT_TRUE(InitializeCodec());
+
+ VideoBitrateAllocation bitrate_allocation =
+ bitrate_allocator_->Allocate(VideoBitrateAllocationParameters(
+ kDefaultTargetBitrateBps, kDefaultFrameRate));
+ EXPECT_EQ(1u, codec_out_.numberOfSimulcastStreams);
+ EXPECT_EQ(1u, codec_out_.VP8()->numberOfTemporalLayers);
+ EXPECT_EQ(0U, bitrate_allocation.get_sum_bps());
+}
+
+TEST_F(VideoCodecInitializerTest, TemporalLayeredVp8ScreenshareConference) {
+ SetUpFor(VideoCodecType::kVideoCodecVP8, 1, 2, true);
+ streams_.push_back(DefaultScreenshareStream());
+ EXPECT_TRUE(InitializeCodec());
+ bitrate_allocator_->SetLegacyConferenceMode(true);
+
+ EXPECT_EQ(1u, codec_out_.numberOfSimulcastStreams);
+ EXPECT_EQ(2u, codec_out_.VP8()->numberOfTemporalLayers);
+ VideoBitrateAllocation bitrate_allocation =
+ bitrate_allocator_->Allocate(VideoBitrateAllocationParameters(
+ kScreenshareCodecTargetBitrateBps, kScreenshareDefaultFramerate));
+ EXPECT_EQ(kScreenshareCodecTargetBitrateBps,
+ bitrate_allocation.get_sum_bps());
+ EXPECT_EQ(kScreenshareConferenceTl0BitrateBps,
+ bitrate_allocation.GetBitrate(0, 0));
+}
+
+TEST_F(VideoCodecInitializerTest, TemporalLayeredVp8Screenshare) {
+ SetUpFor(VideoCodecType::kVideoCodecVP8, 1, 2, true);
+ streams_.push_back(DefaultScreenshareStream());
+ EXPECT_TRUE(InitializeCodec());
+
+ EXPECT_EQ(1u, codec_out_.numberOfSimulcastStreams);
+ EXPECT_EQ(2u, codec_out_.VP8()->numberOfTemporalLayers);
+ VideoBitrateAllocation bitrate_allocation =
+ bitrate_allocator_->Allocate(VideoBitrateAllocationParameters(
+ kScreenshareCodecTargetBitrateBps, kScreenshareDefaultFramerate));
+ EXPECT_EQ(kScreenshareCodecTargetBitrateBps,
+ bitrate_allocation.get_sum_bps());
+ EXPECT_EQ(kScreenshareTl0BitrateBps, bitrate_allocation.GetBitrate(0, 0));
+}
+
+TEST_F(VideoCodecInitializerTest, SimulcastVp8Screenshare) {
+ SetUpFor(VideoCodecType::kVideoCodecVP8, 2, 1, true);
+ streams_.push_back(DefaultScreenshareStream());
+ VideoStream video_stream = DefaultStream();
+ video_stream.max_framerate = kScreenshareDefaultFramerate;
+ streams_.push_back(video_stream);
+ EXPECT_TRUE(InitializeCodec());
+
+ EXPECT_EQ(2u, codec_out_.numberOfSimulcastStreams);
+ EXPECT_EQ(1u, codec_out_.VP8()->numberOfTemporalLayers);
+ const uint32_t max_bitrate_bps =
+ streams_[0].target_bitrate_bps + streams_[1].max_bitrate_bps;
+ VideoBitrateAllocation bitrate_allocation =
+ bitrate_allocator_->Allocate(VideoBitrateAllocationParameters(
+ max_bitrate_bps, kScreenshareDefaultFramerate));
+ EXPECT_EQ(max_bitrate_bps, bitrate_allocation.get_sum_bps());
+ EXPECT_EQ(static_cast<uint32_t>(streams_[0].target_bitrate_bps),
+ bitrate_allocation.GetSpatialLayerSum(0));
+ EXPECT_EQ(static_cast<uint32_t>(streams_[1].max_bitrate_bps),
+ bitrate_allocation.GetSpatialLayerSum(1));
+}
+
+// Tests that when a video stream is inactive, then the bitrate allocation will
+// be 0 for that stream.
+TEST_F(VideoCodecInitializerTest, SimulcastVp8ScreenshareInactive) {
+ SetUpFor(VideoCodecType::kVideoCodecVP8, 2, 1, true);
+ streams_.push_back(DefaultScreenshareStream());
+ VideoStream inactive_video_stream = DefaultStream();
+ inactive_video_stream.active = false;
+ inactive_video_stream.max_framerate = kScreenshareDefaultFramerate;
+ streams_.push_back(inactive_video_stream);
+ EXPECT_TRUE(InitializeCodec());
+
+ EXPECT_EQ(2u, codec_out_.numberOfSimulcastStreams);
+ EXPECT_EQ(1u, codec_out_.VP8()->numberOfTemporalLayers);
+ const uint32_t target_bitrate =
+ streams_[0].target_bitrate_bps + streams_[1].target_bitrate_bps;
+ VideoBitrateAllocation bitrate_allocation =
+ bitrate_allocator_->Allocate(VideoBitrateAllocationParameters(
+ target_bitrate, kScreenshareDefaultFramerate));
+ EXPECT_EQ(static_cast<uint32_t>(streams_[0].max_bitrate_bps),
+ bitrate_allocation.get_sum_bps());
+ EXPECT_EQ(static_cast<uint32_t>(streams_[0].max_bitrate_bps),
+ bitrate_allocation.GetSpatialLayerSum(0));
+ EXPECT_EQ(0U, bitrate_allocation.GetSpatialLayerSum(1));
+}
+
+TEST_F(VideoCodecInitializerTest, HighFpsSimulcastVp8Screenshare) {
+ // Two simulcast streams, the lower one using legacy settings (two temporal
+ // streams, 5fps), the higher one using 3 temporal streams and 30fps.
+ SetUpFor(VideoCodecType::kVideoCodecVP8, 2, 3, true);
+ streams_.push_back(DefaultScreenshareStream());
+ VideoStream video_stream = DefaultStream();
+ video_stream.num_temporal_layers = 3;
+ streams_.push_back(video_stream);
+ EXPECT_TRUE(InitializeCodec());
+
+ EXPECT_EQ(2u, codec_out_.numberOfSimulcastStreams);
+ EXPECT_EQ(3u, codec_out_.VP8()->numberOfTemporalLayers);
+ const uint32_t max_bitrate_bps =
+ streams_[0].target_bitrate_bps + streams_[1].max_bitrate_bps;
+ VideoBitrateAllocation bitrate_allocation = bitrate_allocator_->Allocate(
+ VideoBitrateAllocationParameters(max_bitrate_bps, kDefaultFrameRate));
+ EXPECT_EQ(max_bitrate_bps, bitrate_allocation.get_sum_bps());
+ EXPECT_EQ(static_cast<uint32_t>(streams_[0].target_bitrate_bps),
+ bitrate_allocation.GetSpatialLayerSum(0));
+ EXPECT_EQ(static_cast<uint32_t>(streams_[1].max_bitrate_bps),
+ bitrate_allocation.GetSpatialLayerSum(1));
+ EXPECT_EQ(kHighScreenshareTl0Bps, bitrate_allocation.GetBitrate(1, 0));
+ EXPECT_EQ(kHighScreenshareTl1Bps - kHighScreenshareTl0Bps,
+ bitrate_allocation.GetBitrate(1, 1));
+}
+
+TEST_F(VideoCodecInitializerTest, SingleStreamMultiplexCodec) {
+ SetUpFor(VideoCodecType::kVideoCodecMultiplex, 1, 1, true);
+ streams_.push_back(DefaultStream());
+ EXPECT_TRUE(InitializeCodec());
+}
+
+TEST_F(VideoCodecInitializerTest, Vp9SvcDefaultLayering) {
+ SetUpFor(VideoCodecType::kVideoCodecVP9, 3, 3, false);
+ VideoStream stream = DefaultStream();
+ stream.num_temporal_layers = 3;
+ streams_.push_back(stream);
+
+ EXPECT_TRUE(InitializeCodec());
+ EXPECT_EQ(codec_out_.VP9()->numberOfSpatialLayers, 3u);
+ EXPECT_EQ(codec_out_.VP9()->numberOfTemporalLayers, 3u);
+}
+
+TEST_F(VideoCodecInitializerTest, Vp9SvcAdjustedLayering) {
+ SetUpFor(VideoCodecType::kVideoCodecVP9, 3, 3, false);
+ VideoStream stream = DefaultStream();
+ stream.num_temporal_layers = 3;
+ // Set resolution which is only enough to produce 2 spatial layers.
+ stream.width = kMinVp9SpatialLayerLongSideLength * 2;
+ stream.height = kMinVp9SpatialLayerShortSideLength * 2;
+
+ streams_.push_back(stream);
+
+ EXPECT_TRUE(InitializeCodec());
+ EXPECT_EQ(codec_out_.VP9()->numberOfSpatialLayers, 2u);
+}
+
+TEST_F(VideoCodecInitializerTest,
+ Vp9SingleSpatialLayerMaxBitrateIsEqualToCodecMaxBitrate) {
+ SetUpFor(VideoCodecType::kVideoCodecVP9, 1, 3, false);
+ VideoStream stream = DefaultStream();
+ stream.num_temporal_layers = 3;
+ streams_.push_back(stream);
+
+ EXPECT_TRUE(InitializeCodec());
+ EXPECT_EQ(codec_out_.spatialLayers[0].maxBitrate,
+ kDefaultMaxBitrateBps / 1000);
+}
+
+TEST_F(VideoCodecInitializerTest,
+ Vp9SingleSpatialLayerTargetBitrateIsEqualToCodecMaxBitrate) {
+ SetUpFor(VideoCodecType::kVideoCodecVP9, 1, 1, true);
+ VideoStream stream = DefaultStream();
+ stream.num_temporal_layers = 1;
+ streams_.push_back(stream);
+
+ EXPECT_TRUE(InitializeCodec());
+ EXPECT_EQ(codec_out_.spatialLayers[0].targetBitrate,
+ kDefaultMaxBitrateBps / 1000);
+}
+
+TEST_F(VideoCodecInitializerTest,
+ Vp9KeepBitrateLimitsIfNumberOfSpatialLayersIsReducedToOne) {
+ // Request 3 spatial layers for 320x180 input. Actual number of layers will be
+ // reduced to 1 due to low input resolution but SVC bitrate limits should be
+ // applied.
+ SetUpFor(VideoCodecType::kVideoCodecVP9, 3, 3, false);
+ VideoStream stream = DefaultStream();
+ stream.width = 320;
+ stream.height = 180;
+ stream.num_temporal_layers = 3;
+ streams_.push_back(stream);
+
+ EXPECT_TRUE(InitializeCodec());
+ EXPECT_LT(codec_out_.spatialLayers[0].maxBitrate,
+ kDefaultMaxBitrateBps / 1000);
+}
+
+TEST_F(VideoCodecInitializerTest, Vp9DeactivateLayers) {
+ SetUpFor(VideoCodecType::kVideoCodecVP9, 3, 1, false);
+ VideoStream stream = DefaultStream();
+ streams_.push_back(stream);
+
+ config_.simulcast_layers.resize(3);
+
+ // Activate all layers.
+ config_.simulcast_layers[0].active = true;
+ config_.simulcast_layers[1].active = true;
+ config_.simulcast_layers[2].active = true;
+ EXPECT_TRUE(InitializeCodec());
+ EXPECT_EQ(codec_out_.VP9()->numberOfSpatialLayers, 3);
+ EXPECT_TRUE(codec_out_.spatialLayers[0].active);
+ EXPECT_TRUE(codec_out_.spatialLayers[1].active);
+ EXPECT_TRUE(codec_out_.spatialLayers[2].active);
+
+ // Deactivate top layer.
+ config_.simulcast_layers[0].active = true;
+ config_.simulcast_layers[1].active = true;
+ config_.simulcast_layers[2].active = false;
+ EXPECT_TRUE(InitializeCodec());
+ EXPECT_EQ(codec_out_.VP9()->numberOfSpatialLayers, 3);
+ EXPECT_TRUE(codec_out_.spatialLayers[0].active);
+ EXPECT_TRUE(codec_out_.spatialLayers[1].active);
+ EXPECT_FALSE(codec_out_.spatialLayers[2].active);
+
+ // Deactivate middle layer.
+ config_.simulcast_layers[0].active = true;
+ config_.simulcast_layers[1].active = false;
+ config_.simulcast_layers[2].active = true;
+ EXPECT_TRUE(InitializeCodec());
+ EXPECT_EQ(codec_out_.VP9()->numberOfSpatialLayers, 3);
+ EXPECT_TRUE(codec_out_.spatialLayers[0].active);
+ EXPECT_FALSE(codec_out_.spatialLayers[1].active);
+ EXPECT_TRUE(codec_out_.spatialLayers[2].active);
+
+ // Deactivate first layer.
+ config_.simulcast_layers[0].active = false;
+ config_.simulcast_layers[1].active = true;
+ config_.simulcast_layers[2].active = true;
+ EXPECT_TRUE(InitializeCodec());
+ EXPECT_EQ(codec_out_.VP9()->numberOfSpatialLayers, 2);
+ EXPECT_TRUE(codec_out_.spatialLayers[0].active);
+ EXPECT_TRUE(codec_out_.spatialLayers[1].active);
+
+ // HD singlecast.
+ config_.simulcast_layers[0].active = false;
+ config_.simulcast_layers[1].active = false;
+ config_.simulcast_layers[2].active = true;
+ EXPECT_TRUE(InitializeCodec());
+ EXPECT_EQ(codec_out_.VP9()->numberOfSpatialLayers, 1);
+ EXPECT_TRUE(codec_out_.spatialLayers[0].active);
+
+ // VGA singlecast.
+ config_.simulcast_layers[0].active = false;
+ config_.simulcast_layers[1].active = true;
+ config_.simulcast_layers[2].active = false;
+ EXPECT_TRUE(InitializeCodec());
+ EXPECT_EQ(codec_out_.VP9()->numberOfSpatialLayers, 2);
+ EXPECT_TRUE(codec_out_.spatialLayers[0].active);
+ EXPECT_FALSE(codec_out_.spatialLayers[1].active);
+
+ // QVGA singlecast.
+ config_.simulcast_layers[0].active = true;
+ config_.simulcast_layers[1].active = false;
+ config_.simulcast_layers[2].active = false;
+ EXPECT_TRUE(InitializeCodec());
+ EXPECT_EQ(codec_out_.VP9()->numberOfSpatialLayers, 3);
+ EXPECT_TRUE(codec_out_.spatialLayers[0].active);
+ EXPECT_FALSE(codec_out_.spatialLayers[1].active);
+ EXPECT_FALSE(codec_out_.spatialLayers[2].active);
+}
+
+TEST_F(VideoCodecInitializerTest, Av1SingleSpatialLayerBitratesAreConsistent) {
+ VideoEncoderConfig config;
+ config.codec_type = VideoCodecType::kVideoCodecAV1;
+ std::vector<VideoStream> streams = {DefaultStream()};
+ streams[0].scalability_mode = ScalabilityMode::kL1T2;
+
+ VideoCodec codec;
+ EXPECT_TRUE(VideoCodecInitializer::SetupCodec(config, streams, &codec));
+
+ EXPECT_GE(codec.spatialLayers[0].targetBitrate,
+ codec.spatialLayers[0].minBitrate);
+ EXPECT_LE(codec.spatialLayers[0].targetBitrate,
+ codec.spatialLayers[0].maxBitrate);
+}
+
+TEST_F(VideoCodecInitializerTest, Av1TwoSpatialLayersBitratesAreConsistent) {
+ VideoEncoderConfig config;
+ config.codec_type = VideoCodecType::kVideoCodecAV1;
+ std::vector<VideoStream> streams = {DefaultStream()};
+ streams[0].scalability_mode = ScalabilityMode::kL2T2;
+
+ VideoCodec codec;
+ EXPECT_TRUE(VideoCodecInitializer::SetupCodec(config, streams, &codec));
+
+ EXPECT_GE(codec.spatialLayers[0].targetBitrate,
+ codec.spatialLayers[0].minBitrate);
+ EXPECT_LE(codec.spatialLayers[0].targetBitrate,
+ codec.spatialLayers[0].maxBitrate);
+
+ EXPECT_GE(codec.spatialLayers[1].targetBitrate,
+ codec.spatialLayers[1].minBitrate);
+ EXPECT_LE(codec.spatialLayers[1].targetBitrate,
+ codec.spatialLayers[1].maxBitrate);
+}
+
+TEST_F(VideoCodecInitializerTest, Av1TwoSpatialLayersActiveByDefault) {
+ VideoEncoderConfig config;
+ config.codec_type = VideoCodecType::kVideoCodecAV1;
+ std::vector<VideoStream> streams = {DefaultStream()};
+ streams[0].scalability_mode = ScalabilityMode::kL2T2;
+ config.spatial_layers = {};
+
+ VideoCodec codec;
+ EXPECT_TRUE(VideoCodecInitializer::SetupCodec(config, streams, &codec));
+
+ EXPECT_TRUE(codec.spatialLayers[0].active);
+ EXPECT_TRUE(codec.spatialLayers[1].active);
+}
+
+TEST_F(VideoCodecInitializerTest, Av1TwoSpatialLayersOneDeactivated) {
+ VideoEncoderConfig config;
+ config.codec_type = VideoCodecType::kVideoCodecAV1;
+ std::vector<VideoStream> streams = {DefaultStream()};
+ streams[0].scalability_mode = ScalabilityMode::kL2T2;
+ config.spatial_layers.resize(2);
+ config.spatial_layers[0].active = true;
+ config.spatial_layers[1].active = false;
+
+ VideoCodec codec;
+ EXPECT_TRUE(VideoCodecInitializer::SetupCodec(config, streams, &codec));
+
+ EXPECT_TRUE(codec.spatialLayers[0].active);
+ EXPECT_FALSE(codec.spatialLayers[1].active);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/video_codec_interface_gn/moz.build b/third_party/libwebrtc/modules/video_coding/video_codec_interface_gn/moz.build
new file mode 100644
index 0000000000..f01783ebd1
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/video_codec_interface_gn/moz.build
@@ -0,0 +1,226 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+DEFINES["WEBRTC_STRICT_FIELD_TRIALS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "!/third_party/libwebrtc/gen",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/video_coding/include/video_codec_interface.cc",
+ "/third_party/libwebrtc/modules/video_coding/video_coding_defines.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "mips32":
+
+ DEFINES["MIPS32_LE"] = True
+ DEFINES["MIPS_FPU_LE"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "mips64":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("video_codec_interface_gn")
diff --git a/third_party/libwebrtc/modules/video_coding/video_coding_defines.cc b/third_party/libwebrtc/modules/video_coding/video_coding_defines.cc
new file mode 100644
index 0000000000..436b1a6490
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/video_coding_defines.cc
@@ -0,0 +1,20 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/include/video_coding_defines.h"
+
+namespace webrtc {
+
+void VCMReceiveCallback::OnDroppedFrames(uint32_t frames_dropped) {}
+void VCMReceiveCallback::OnIncomingPayloadType(int payload_type) {}
+void VCMReceiveCallback::OnDecoderInfoChanged(
+ const VideoDecoder::DecoderInfo&) {}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/video_coding_gn/moz.build b/third_party/libwebrtc/modules/video_coding/video_coding_gn/moz.build
new file mode 100644
index 0000000000..a6034ad476
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/video_coding_gn/moz.build
@@ -0,0 +1,249 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+DEFINES["WEBRTC_STRICT_FIELD_TRIALS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "!/third_party/libwebrtc/gen",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/video_coding/decoder_database.cc",
+ "/third_party/libwebrtc/modules/video_coding/event_wrapper.cc",
+ "/third_party/libwebrtc/modules/video_coding/fec_controller_default.cc",
+ "/third_party/libwebrtc/modules/video_coding/frame_object.cc",
+ "/third_party/libwebrtc/modules/video_coding/generic_decoder.cc",
+ "/third_party/libwebrtc/modules/video_coding/h264_sprop_parameter_sets.cc",
+ "/third_party/libwebrtc/modules/video_coding/h264_sps_pps_tracker.cc",
+ "/third_party/libwebrtc/modules/video_coding/loss_notification_controller.cc",
+ "/third_party/libwebrtc/modules/video_coding/media_opt_util.cc",
+ "/third_party/libwebrtc/modules/video_coding/rtp_frame_id_only_ref_finder.cc",
+ "/third_party/libwebrtc/modules/video_coding/rtp_frame_reference_finder.cc",
+ "/third_party/libwebrtc/modules/video_coding/rtp_generic_ref_finder.cc",
+ "/third_party/libwebrtc/modules/video_coding/rtp_seq_num_only_ref_finder.cc",
+ "/third_party/libwebrtc/modules/video_coding/rtp_vp8_ref_finder.cc",
+ "/third_party/libwebrtc/modules/video_coding/rtp_vp9_ref_finder.cc",
+ "/third_party/libwebrtc/modules/video_coding/video_codec_initializer.cc",
+ "/third_party/libwebrtc/modules/video_coding/video_receiver2.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "GLESv2",
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "crypt32",
+ "iphlpapi",
+ "secur32",
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "mips32":
+
+ DEFINES["MIPS32_LE"] = True
+ DEFINES["MIPS_FPU_LE"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "mips64":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("video_coding_gn")
diff --git a/third_party/libwebrtc/modules/video_coding/video_coding_impl.cc b/third_party/libwebrtc/modules/video_coding/video_coding_impl.cc
new file mode 100644
index 0000000000..2eaecd5011
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/video_coding_impl.cc
@@ -0,0 +1,254 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/video_coding_impl.h"
+
+#include <algorithm>
+#include <memory>
+
+#include "api/field_trials_view.h"
+#include "api/sequence_checker.h"
+#include "api/transport/field_trial_based_config.h"
+#include "api/video/encoded_image.h"
+#include "modules/video_coding/include/video_codec_interface.h"
+#include "modules/video_coding/timing/timing.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/memory/always_valid_pointer.h"
+#include "system_wrappers/include/clock.h"
+
+namespace webrtc {
+namespace vcm {
+
+int64_t VCMProcessTimer::Period() const {
+ return _periodMs;
+}
+
+int64_t VCMProcessTimer::TimeUntilProcess() const {
+ const int64_t time_since_process = _clock->TimeInMilliseconds() - _latestMs;
+ const int64_t time_until_process = _periodMs - time_since_process;
+ return std::max<int64_t>(time_until_process, 0);
+}
+
+void VCMProcessTimer::Processed() {
+ _latestMs = _clock->TimeInMilliseconds();
+}
+
+DEPRECATED_VCMDecoderDataBase::DEPRECATED_VCMDecoderDataBase() {
+ decoder_sequence_checker_.Detach();
+}
+
+VideoDecoder* DEPRECATED_VCMDecoderDataBase::DeregisterExternalDecoder(
+ uint8_t payload_type) {
+ RTC_DCHECK_RUN_ON(&decoder_sequence_checker_);
+ auto it = decoders_.find(payload_type);
+ if (it == decoders_.end()) {
+ return nullptr;
+ }
+
+ // We can't use payload_type to check if the decoder is currently in use,
+ // because payload type may be out of date (e.g. before we decode the first
+ // frame after RegisterReceiveCodec).
+ if (current_decoder_ && current_decoder_->IsSameDecoder(it->second)) {
+ // Release it if it was registered and in use.
+ current_decoder_ = absl::nullopt;
+ }
+ VideoDecoder* ret = it->second;
+ decoders_.erase(it);
+ return ret;
+}
+
+// Add the external decoder object to the list of external decoders.
+// Won't be registered as a receive codec until RegisterReceiveCodec is called.
+void DEPRECATED_VCMDecoderDataBase::RegisterExternalDecoder(
+ uint8_t payload_type,
+ VideoDecoder* external_decoder) {
+ RTC_DCHECK_RUN_ON(&decoder_sequence_checker_);
+ // If payload value already exists, erase old and insert new.
+ DeregisterExternalDecoder(payload_type);
+ decoders_[payload_type] = external_decoder;
+}
+
+bool DEPRECATED_VCMDecoderDataBase::IsExternalDecoderRegistered(
+ uint8_t payload_type) const {
+ RTC_DCHECK_RUN_ON(&decoder_sequence_checker_);
+ return payload_type == current_payload_type_ ||
+ decoders_.find(payload_type) != decoders_.end();
+}
+
+void DEPRECATED_VCMDecoderDataBase::RegisterReceiveCodec(
+ uint8_t payload_type,
+ const VideoDecoder::Settings& settings) {
+ // If payload value already exists, erase old and insert new.
+ if (payload_type == current_payload_type_) {
+ current_payload_type_ = absl::nullopt;
+ }
+ decoder_settings_[payload_type] = settings;
+}
+
+bool DEPRECATED_VCMDecoderDataBase::DeregisterReceiveCodec(
+ uint8_t payload_type) {
+ if (decoder_settings_.erase(payload_type) == 0) {
+ return false;
+ }
+ if (payload_type == current_payload_type_) {
+ // This codec is currently in use.
+ current_payload_type_ = absl::nullopt;
+ }
+ return true;
+}
+
+VCMGenericDecoder* DEPRECATED_VCMDecoderDataBase::GetDecoder(
+ const VCMEncodedFrame& frame,
+ VCMDecodedFrameCallback* decoded_frame_callback) {
+ RTC_DCHECK_RUN_ON(&decoder_sequence_checker_);
+ RTC_DCHECK(decoded_frame_callback->UserReceiveCallback());
+ uint8_t payload_type = frame.PayloadType();
+ if (payload_type == current_payload_type_ || payload_type == 0) {
+ return current_decoder_.has_value() ? &*current_decoder_ : nullptr;
+ }
+ // If decoder exists - delete.
+ if (current_decoder_.has_value()) {
+ current_decoder_ = absl::nullopt;
+ current_payload_type_ = absl::nullopt;
+ }
+
+ CreateAndInitDecoder(frame);
+ if (current_decoder_ == absl::nullopt) {
+ return nullptr;
+ }
+
+ VCMReceiveCallback* callback = decoded_frame_callback->UserReceiveCallback();
+ callback->OnIncomingPayloadType(payload_type);
+ if (current_decoder_->RegisterDecodeCompleteCallback(decoded_frame_callback) <
+ 0) {
+ current_decoder_ = absl::nullopt;
+ return nullptr;
+ }
+
+ current_payload_type_ = payload_type;
+ return &*current_decoder_;
+}
+
+void DEPRECATED_VCMDecoderDataBase::CreateAndInitDecoder(
+ const VCMEncodedFrame& frame) {
+ uint8_t payload_type = frame.PayloadType();
+ RTC_LOG(LS_INFO) << "Initializing decoder with payload type '"
+ << int{payload_type} << "'.";
+ auto decoder_item = decoder_settings_.find(payload_type);
+ if (decoder_item == decoder_settings_.end()) {
+ RTC_LOG(LS_ERROR) << "Can't find a decoder associated with payload type: "
+ << int{payload_type};
+ return;
+ }
+ auto external_dec_item = decoders_.find(payload_type);
+ if (external_dec_item == decoders_.end()) {
+ RTC_LOG(LS_ERROR) << "No decoder of this type exists.";
+ return;
+ }
+ current_decoder_.emplace(external_dec_item->second);
+
+ // Copy over input resolutions to prevent codec reinitialization due to
+ // the first frame being of a different resolution than the database values.
+ // This is best effort, since there's no guarantee that width/height have been
+ // parsed yet (and may be zero).
+ RenderResolution frame_resolution(frame.EncodedImage()._encodedWidth,
+ frame.EncodedImage()._encodedHeight);
+ if (frame_resolution.Valid()) {
+ decoder_item->second.set_max_render_resolution(frame_resolution);
+ }
+ if (!current_decoder_->Configure(decoder_item->second)) {
+ current_decoder_ = absl::nullopt;
+ RTC_LOG(LS_ERROR) << "Failed to initialize decoder.";
+ }
+}
+
+} // namespace vcm
+
+namespace {
+
+class VideoCodingModuleImpl : public VideoCodingModule {
+ public:
+ explicit VideoCodingModuleImpl(Clock* clock,
+ const FieldTrialsView* field_trials)
+ : VideoCodingModule(),
+ field_trials_(field_trials),
+ timing_(new VCMTiming(clock, *field_trials_)),
+ receiver_(clock, timing_.get(), *field_trials_) {}
+
+ ~VideoCodingModuleImpl() override = default;
+
+ void Process() override { receiver_.Process(); }
+
+ void RegisterReceiveCodec(
+ uint8_t payload_type,
+ const VideoDecoder::Settings& decoder_settings) override {
+ receiver_.RegisterReceiveCodec(payload_type, decoder_settings);
+ }
+
+ void RegisterExternalDecoder(VideoDecoder* externalDecoder,
+ uint8_t payloadType) override {
+ receiver_.RegisterExternalDecoder(externalDecoder, payloadType);
+ }
+
+ int32_t RegisterReceiveCallback(
+ VCMReceiveCallback* receiveCallback) override {
+ RTC_DCHECK(construction_thread_.IsCurrent());
+ return receiver_.RegisterReceiveCallback(receiveCallback);
+ }
+
+ int32_t RegisterFrameTypeCallback(
+ VCMFrameTypeCallback* frameTypeCallback) override {
+ return receiver_.RegisterFrameTypeCallback(frameTypeCallback);
+ }
+
+ int32_t RegisterPacketRequestCallback(
+ VCMPacketRequestCallback* callback) override {
+ RTC_DCHECK(construction_thread_.IsCurrent());
+ return receiver_.RegisterPacketRequestCallback(callback);
+ }
+
+ int32_t Decode(uint16_t maxWaitTimeMs) override {
+ return receiver_.Decode(maxWaitTimeMs);
+ }
+
+ int32_t IncomingPacket(const uint8_t* incomingPayload,
+ size_t payloadLength,
+ const RTPHeader& rtp_header,
+ const RTPVideoHeader& video_header) override {
+ return receiver_.IncomingPacket(incomingPayload, payloadLength, rtp_header,
+ video_header);
+ }
+
+ void SetNackSettings(size_t max_nack_list_size,
+ int max_packet_age_to_nack,
+ int max_incomplete_time_ms) override {
+ return receiver_.SetNackSettings(max_nack_list_size, max_packet_age_to_nack,
+ max_incomplete_time_ms);
+ }
+
+ private:
+ AlwaysValidPointer<const FieldTrialsView, FieldTrialBasedConfig>
+ field_trials_;
+ SequenceChecker construction_thread_;
+ const std::unique_ptr<VCMTiming> timing_;
+ vcm::VideoReceiver receiver_;
+};
+} // namespace
+
+// DEPRECATED. Create method for current interface, will be removed when the
+// new jitter buffer is in place.
+VideoCodingModule* VideoCodingModule::Create(
+ Clock* clock,
+ const FieldTrialsView* field_trials) {
+ RTC_DCHECK(clock);
+ return new VideoCodingModuleImpl(clock, field_trials);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/video_coding_impl.h b/third_party/libwebrtc/modules/video_coding/video_coding_impl.h
new file mode 100644
index 0000000000..927b2da4b8
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/video_coding_impl.h
@@ -0,0 +1,179 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_VIDEO_CODING_IMPL_H_
+#define MODULES_VIDEO_CODING_VIDEO_CODING_IMPL_H_
+
+#include <map>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "absl/types/optional.h"
+#include "api/field_trials_view.h"
+#include "api/sequence_checker.h"
+#include "modules/video_coding/frame_buffer.h"
+#include "modules/video_coding/generic_decoder.h"
+#include "modules/video_coding/include/video_coding.h"
+#include "modules/video_coding/jitter_buffer.h"
+#include "modules/video_coding/receiver.h"
+#include "modules/video_coding/timing/timing.h"
+#include "rtc_base/one_time_event.h"
+#include "rtc_base/synchronization/mutex.h"
+#include "rtc_base/thread_annotations.h"
+#include "system_wrappers/include/clock.h"
+
+namespace webrtc {
+
+class VideoBitrateAllocator;
+class VideoBitrateAllocationObserver;
+
+namespace vcm {
+
+class VCMProcessTimer {
+ public:
+ static const int64_t kDefaultProcessIntervalMs = 1000;
+
+ VCMProcessTimer(int64_t periodMs, Clock* clock)
+ : _clock(clock),
+ _periodMs(periodMs),
+ _latestMs(_clock->TimeInMilliseconds()) {}
+ int64_t Period() const;
+ int64_t TimeUntilProcess() const;
+ void Processed();
+
+ private:
+ Clock* _clock;
+ int64_t _periodMs;
+ int64_t _latestMs;
+};
+
+class DEPRECATED_VCMDecoderDataBase {
+ public:
+ DEPRECATED_VCMDecoderDataBase();
+ DEPRECATED_VCMDecoderDataBase(const DEPRECATED_VCMDecoderDataBase&) = delete;
+ DEPRECATED_VCMDecoderDataBase& operator=(
+ const DEPRECATED_VCMDecoderDataBase&) = delete;
+ ~DEPRECATED_VCMDecoderDataBase() = default;
+
+ // Returns a pointer to the previously registered decoder or nullptr if none
+ // was registered for the `payload_type`.
+ VideoDecoder* DeregisterExternalDecoder(uint8_t payload_type);
+ void RegisterExternalDecoder(uint8_t payload_type,
+ VideoDecoder* external_decoder);
+ bool IsExternalDecoderRegistered(uint8_t payload_type) const;
+
+ void RegisterReceiveCodec(uint8_t payload_type,
+ const VideoDecoder::Settings& settings);
+ bool DeregisterReceiveCodec(uint8_t payload_type);
+
+ // Returns a decoder specified by frame.PayloadType. The decoded frame
+ // callback of the decoder is set to `decoded_frame_callback`. If no such
+ // decoder already exists an instance will be created and initialized.
+ // nullptr is returned if no decoder with the specified payload type was found
+ // and the function failed to create one.
+ VCMGenericDecoder* GetDecoder(
+ const VCMEncodedFrame& frame,
+ VCMDecodedFrameCallback* decoded_frame_callback);
+
+ private:
+ void CreateAndInitDecoder(const VCMEncodedFrame& frame)
+ RTC_RUN_ON(decoder_sequence_checker_);
+
+ SequenceChecker decoder_sequence_checker_;
+
+ absl::optional<uint8_t> current_payload_type_;
+ absl::optional<VCMGenericDecoder> current_decoder_
+ RTC_GUARDED_BY(decoder_sequence_checker_);
+ // Initialization paramaters for decoders keyed by payload type.
+ std::map<uint8_t, VideoDecoder::Settings> decoder_settings_;
+ // Decoders keyed by payload type.
+ std::map<uint8_t, VideoDecoder*> decoders_
+ RTC_GUARDED_BY(decoder_sequence_checker_);
+};
+
+class VideoReceiver {
+ public:
+ VideoReceiver(Clock* clock,
+ VCMTiming* timing,
+ const FieldTrialsView& field_trials);
+ ~VideoReceiver();
+
+ void RegisterReceiveCodec(uint8_t payload_type,
+ const VideoDecoder::Settings& settings);
+
+ void RegisterExternalDecoder(VideoDecoder* externalDecoder,
+ uint8_t payloadType);
+ int32_t RegisterReceiveCallback(VCMReceiveCallback* receiveCallback);
+ int32_t RegisterFrameTypeCallback(VCMFrameTypeCallback* frameTypeCallback);
+ int32_t RegisterPacketRequestCallback(VCMPacketRequestCallback* callback);
+
+ int32_t Decode(uint16_t maxWaitTimeMs);
+
+ int32_t IncomingPacket(const uint8_t* incomingPayload,
+ size_t payloadLength,
+ const RTPHeader& rtp_header,
+ const RTPVideoHeader& video_header);
+
+ void SetNackSettings(size_t max_nack_list_size,
+ int max_packet_age_to_nack,
+ int max_incomplete_time_ms);
+
+ void Process();
+
+ protected:
+ int32_t Decode(const webrtc::VCMEncodedFrame& frame);
+ int32_t RequestKeyFrame();
+
+ private:
+ // Used for DCHECKing thread correctness.
+ // In build where DCHECKs are enabled, will return false before
+ // DecoderThreadStarting is called, then true until DecoderThreadStopped
+ // is called.
+ // In builds where DCHECKs aren't enabled, it will return true.
+ bool IsDecoderThreadRunning();
+
+ SequenceChecker construction_thread_checker_;
+ SequenceChecker decoder_thread_checker_;
+ SequenceChecker module_thread_checker_;
+ Clock* const clock_;
+ Mutex process_mutex_;
+ VCMTiming* _timing;
+ VCMReceiver _receiver;
+ VCMDecodedFrameCallback _decodedFrameCallback;
+
+ // These callbacks are set on the construction thread before being attached
+ // to the module thread or decoding started, so a lock is not required.
+ VCMFrameTypeCallback* _frameTypeCallback;
+ VCMPacketRequestCallback* _packetRequestCallback;
+
+ // Used on both the module and decoder thread.
+ bool _scheduleKeyRequest RTC_GUARDED_BY(process_mutex_);
+ bool drop_frames_until_keyframe_ RTC_GUARDED_BY(process_mutex_);
+
+ // Modified on the construction thread while not attached to the process
+ // thread. Once attached to the process thread, its value is only read
+ // so a lock is not required.
+ size_t max_nack_list_size_;
+
+ // Callbacks are set before the decoder thread starts.
+ // Once the decoder thread has been started, usage of `_codecDataBase` moves
+ // over to the decoder thread.
+ DEPRECATED_VCMDecoderDataBase _codecDataBase;
+
+ VCMProcessTimer _retransmissionTimer RTC_GUARDED_BY(module_thread_checker_);
+ VCMProcessTimer _keyRequestTimer RTC_GUARDED_BY(module_thread_checker_);
+ ThreadUnsafeOneTimeEvent first_frame_received_
+ RTC_GUARDED_BY(decoder_thread_checker_);
+};
+
+} // namespace vcm
+} // namespace webrtc
+#endif // MODULES_VIDEO_CODING_VIDEO_CODING_IMPL_H_
diff --git a/third_party/libwebrtc/modules/video_coding/video_coding_utility_gn/moz.build b/third_party/libwebrtc/modules/video_coding/video_coding_utility_gn/moz.build
new file mode 100644
index 0000000000..1de983a786
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/video_coding_utility_gn/moz.build
@@ -0,0 +1,243 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+DEFINES["WEBRTC_STRICT_FIELD_TRIALS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "!/third_party/libwebrtc/gen",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/video_coding/utility/bandwidth_quality_scaler.cc",
+ "/third_party/libwebrtc/modules/video_coding/utility/decoded_frames_history.cc",
+ "/third_party/libwebrtc/modules/video_coding/utility/frame_dropper.cc",
+ "/third_party/libwebrtc/modules/video_coding/utility/framerate_controller_deprecated.cc",
+ "/third_party/libwebrtc/modules/video_coding/utility/ivf_file_reader.cc",
+ "/third_party/libwebrtc/modules/video_coding/utility/ivf_file_writer.cc",
+ "/third_party/libwebrtc/modules/video_coding/utility/qp_parser.cc",
+ "/third_party/libwebrtc/modules/video_coding/utility/quality_scaler.cc",
+ "/third_party/libwebrtc/modules/video_coding/utility/simulcast_rate_allocator.cc",
+ "/third_party/libwebrtc/modules/video_coding/utility/simulcast_utility.cc",
+ "/third_party/libwebrtc/modules/video_coding/utility/vp8_header_parser.cc",
+ "/third_party/libwebrtc/modules/video_coding/utility/vp9_uncompressed_header_parser.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "crypt32",
+ "iphlpapi",
+ "secur32",
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "mips32":
+
+ DEFINES["MIPS32_LE"] = True
+ DEFINES["MIPS_FPU_LE"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "mips64":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("video_coding_utility_gn")
diff --git a/third_party/libwebrtc/modules/video_coding/video_receiver.cc b/third_party/libwebrtc/modules/video_coding/video_receiver.cc
new file mode 100644
index 0000000000..38b70f87cd
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/video_receiver.cc
@@ -0,0 +1,278 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <stddef.h>
+
+#include <cstdint>
+#include <vector>
+
+#include "api/rtp_headers.h"
+#include "api/sequence_checker.h"
+#include "api/video_codecs/video_codec.h"
+#include "api/video_codecs/video_decoder.h"
+#include "modules/video_coding/decoder_database.h"
+#include "modules/video_coding/encoded_frame.h"
+#include "modules/video_coding/generic_decoder.h"
+#include "modules/video_coding/include/video_coding.h"
+#include "modules/video_coding/include/video_coding_defines.h"
+#include "modules/video_coding/internal_defines.h"
+#include "modules/video_coding/jitter_buffer.h"
+#include "modules/video_coding/media_opt_util.h"
+#include "modules/video_coding/packet.h"
+#include "modules/video_coding/receiver.h"
+#include "modules/video_coding/timing/timing.h"
+#include "modules/video_coding/video_coding_impl.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/one_time_event.h"
+#include "rtc_base/trace_event.h"
+#include "system_wrappers/include/clock.h"
+
+namespace webrtc {
+namespace vcm {
+
+VideoReceiver::VideoReceiver(Clock* clock,
+ VCMTiming* timing,
+ const FieldTrialsView& field_trials)
+ : clock_(clock),
+ _timing(timing),
+ _receiver(_timing, clock_, field_trials),
+ _decodedFrameCallback(_timing, clock_, field_trials),
+ _frameTypeCallback(nullptr),
+ _packetRequestCallback(nullptr),
+ _scheduleKeyRequest(false),
+ drop_frames_until_keyframe_(false),
+ max_nack_list_size_(0),
+ _codecDataBase(),
+ _retransmissionTimer(10, clock_),
+ _keyRequestTimer(500, clock_) {
+ decoder_thread_checker_.Detach();
+ module_thread_checker_.Detach();
+}
+
+VideoReceiver::~VideoReceiver() {
+ RTC_DCHECK_RUN_ON(&construction_thread_checker_);
+}
+
+void VideoReceiver::Process() {
+ RTC_DCHECK_RUN_ON(&module_thread_checker_);
+
+ // Key frame requests
+ if (_keyRequestTimer.TimeUntilProcess() == 0) {
+ _keyRequestTimer.Processed();
+ bool request_key_frame = _frameTypeCallback != nullptr;
+ if (request_key_frame) {
+ MutexLock lock(&process_mutex_);
+ request_key_frame = _scheduleKeyRequest;
+ }
+ if (request_key_frame)
+ RequestKeyFrame();
+ }
+
+ // Packet retransmission requests
+ // TODO(holmer): Add API for changing Process interval and make sure it's
+ // disabled when NACK is off.
+ if (_retransmissionTimer.TimeUntilProcess() == 0) {
+ _retransmissionTimer.Processed();
+ bool callback_registered = _packetRequestCallback != nullptr;
+ uint16_t length = max_nack_list_size_;
+ if (callback_registered && length > 0) {
+ // Collect sequence numbers from the default receiver.
+ bool request_key_frame = false;
+ std::vector<uint16_t> nackList = _receiver.NackList(&request_key_frame);
+ int32_t ret = VCM_OK;
+ if (request_key_frame) {
+ ret = RequestKeyFrame();
+ }
+ if (ret == VCM_OK && !nackList.empty()) {
+ MutexLock lock(&process_mutex_);
+ if (_packetRequestCallback != nullptr) {
+ _packetRequestCallback->ResendPackets(&nackList[0], nackList.size());
+ }
+ }
+ }
+ }
+}
+
+// Register a receive callback. Will be called whenever there is a new frame
+// ready for rendering.
+int32_t VideoReceiver::RegisterReceiveCallback(
+ VCMReceiveCallback* receiveCallback) {
+ RTC_DCHECK_RUN_ON(&construction_thread_checker_);
+ // This value is set before the decoder thread starts and unset after
+ // the decoder thread has been stopped.
+ _decodedFrameCallback.SetUserReceiveCallback(receiveCallback);
+ return VCM_OK;
+}
+
+// Register an externally defined decoder object.
+void VideoReceiver::RegisterExternalDecoder(VideoDecoder* externalDecoder,
+ uint8_t payloadType) {
+ RTC_DCHECK_RUN_ON(&construction_thread_checker_);
+ if (externalDecoder == nullptr) {
+ RTC_CHECK(_codecDataBase.DeregisterExternalDecoder(payloadType));
+ return;
+ }
+ _codecDataBase.RegisterExternalDecoder(payloadType, externalDecoder);
+}
+
+// Register a frame type request callback.
+int32_t VideoReceiver::RegisterFrameTypeCallback(
+ VCMFrameTypeCallback* frameTypeCallback) {
+ RTC_DCHECK_RUN_ON(&construction_thread_checker_);
+ // This callback is used on the module thread, but since we don't get
+ // callbacks on the module thread while the decoder thread isn't running
+ // (and this function must not be called when the decoder is running),
+ // we don't need a lock here.
+ _frameTypeCallback = frameTypeCallback;
+ return VCM_OK;
+}
+
+int32_t VideoReceiver::RegisterPacketRequestCallback(
+ VCMPacketRequestCallback* callback) {
+ RTC_DCHECK_RUN_ON(&construction_thread_checker_);
+ // This callback is used on the module thread, but since we don't get
+ // callbacks on the module thread while the decoder thread isn't running
+ // (and this function must not be called when the decoder is running),
+ // we don't need a lock here.
+ _packetRequestCallback = callback;
+ return VCM_OK;
+}
+
+// Decode next frame, blocking.
+// Should be called as often as possible to get the most out of the decoder.
+int32_t VideoReceiver::Decode(uint16_t maxWaitTimeMs) {
+ RTC_DCHECK_RUN_ON(&decoder_thread_checker_);
+ VCMEncodedFrame* frame = _receiver.FrameForDecoding(maxWaitTimeMs, true);
+
+ if (!frame)
+ return VCM_FRAME_NOT_READY;
+
+ bool drop_frame = false;
+ {
+ MutexLock lock(&process_mutex_);
+ if (drop_frames_until_keyframe_) {
+ // Still getting delta frames, schedule another keyframe request as if
+ // decode failed.
+ if (frame->FrameType() != VideoFrameType::kVideoFrameKey) {
+ drop_frame = true;
+ _scheduleKeyRequest = true;
+ } else {
+ drop_frames_until_keyframe_ = false;
+ }
+ }
+ }
+
+ if (drop_frame) {
+ _receiver.ReleaseFrame(frame);
+ return VCM_FRAME_NOT_READY;
+ }
+
+ // If this frame was too late, we should adjust the delay accordingly
+ if (frame->RenderTimeMs() > 0)
+ _timing->UpdateCurrentDelay(Timestamp::Millis(frame->RenderTimeMs()),
+ clock_->CurrentTime());
+
+ if (first_frame_received_()) {
+ RTC_LOG(LS_INFO) << "Received first complete decodable video frame";
+ }
+
+ const int32_t ret = Decode(*frame);
+ _receiver.ReleaseFrame(frame);
+ return ret;
+}
+
+int32_t VideoReceiver::RequestKeyFrame() {
+ RTC_DCHECK_RUN_ON(&module_thread_checker_);
+
+ TRACE_EVENT0("webrtc", "RequestKeyFrame");
+ if (_frameTypeCallback != nullptr) {
+ const int32_t ret = _frameTypeCallback->RequestKeyFrame();
+ if (ret < 0) {
+ return ret;
+ }
+ MutexLock lock(&process_mutex_);
+ _scheduleKeyRequest = false;
+ } else {
+ return VCM_MISSING_CALLBACK;
+ }
+ return VCM_OK;
+}
+
+// Must be called from inside the receive side critical section.
+int32_t VideoReceiver::Decode(const VCMEncodedFrame& frame) {
+ RTC_DCHECK_RUN_ON(&decoder_thread_checker_);
+ TRACE_EVENT0("webrtc", "VideoReceiver::Decode");
+ // Change decoder if payload type has changed
+ VCMGenericDecoder* decoder =
+ _codecDataBase.GetDecoder(frame, &_decodedFrameCallback);
+ if (decoder == nullptr) {
+ return VCM_NO_CODEC_REGISTERED;
+ }
+ return decoder->Decode(frame, clock_->CurrentTime());
+}
+
+// Register possible receive codecs, can be called multiple times
+void VideoReceiver::RegisterReceiveCodec(
+ uint8_t payload_type,
+ const VideoDecoder::Settings& settings) {
+ RTC_DCHECK_RUN_ON(&construction_thread_checker_);
+ _codecDataBase.RegisterReceiveCodec(payload_type, settings);
+}
+
+// Incoming packet from network parsed and ready for decode, non blocking.
+int32_t VideoReceiver::IncomingPacket(const uint8_t* incomingPayload,
+ size_t payloadLength,
+ const RTPHeader& rtp_header,
+ const RTPVideoHeader& video_header) {
+ RTC_DCHECK_RUN_ON(&module_thread_checker_);
+ if (video_header.frame_type == VideoFrameType::kVideoFrameKey) {
+ TRACE_EVENT1("webrtc", "VCM::PacketKeyFrame", "seqnum",
+ rtp_header.sequenceNumber);
+ }
+ if (incomingPayload == nullptr) {
+ // The jitter buffer doesn't handle non-zero payload lengths for packets
+ // without payload.
+ // TODO(holmer): We should fix this in the jitter buffer.
+ payloadLength = 0;
+ }
+ // Callers don't provide any ntp time.
+ const VCMPacket packet(incomingPayload, payloadLength, rtp_header,
+ video_header, /*ntp_time_ms=*/0,
+ clock_->CurrentTime());
+ int32_t ret = _receiver.InsertPacket(packet);
+
+ // TODO(holmer): Investigate if this somehow should use the key frame
+ // request scheduling to throttle the requests.
+ if (ret == VCM_FLUSH_INDICATOR) {
+ {
+ MutexLock lock(&process_mutex_);
+ drop_frames_until_keyframe_ = true;
+ }
+ RequestKeyFrame();
+ } else if (ret < 0) {
+ return ret;
+ }
+ return VCM_OK;
+}
+
+void VideoReceiver::SetNackSettings(size_t max_nack_list_size,
+ int max_packet_age_to_nack,
+ int max_incomplete_time_ms) {
+ RTC_DCHECK_RUN_ON(&construction_thread_checker_);
+ if (max_nack_list_size != 0) {
+ max_nack_list_size_ = max_nack_list_size;
+ }
+ _receiver.SetNackSettings(max_nack_list_size, max_packet_age_to_nack,
+ max_incomplete_time_ms);
+}
+
+} // namespace vcm
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/video_receiver2.cc b/third_party/libwebrtc/modules/video_coding/video_receiver2.cc
new file mode 100644
index 0000000000..0751869a98
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/video_receiver2.cc
@@ -0,0 +1,108 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/video_receiver2.h"
+
+#include <stddef.h>
+
+#include <cstdint>
+#include <utility>
+#include <vector>
+
+#include "absl/algorithm/container.h"
+#include "api/video_codecs/video_codec.h"
+#include "api/video_codecs/video_decoder.h"
+#include "modules/video_coding/decoder_database.h"
+#include "modules/video_coding/encoded_frame.h"
+#include "modules/video_coding/generic_decoder.h"
+#include "modules/video_coding/include/video_coding_defines.h"
+#include "modules/video_coding/timing/timing.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/trace_event.h"
+#include "system_wrappers/include/clock.h"
+
+namespace webrtc {
+
+VideoReceiver2::VideoReceiver2(Clock* clock,
+ VCMTiming* timing,
+ const FieldTrialsView& field_trials)
+ : clock_(clock),
+ decoded_frame_callback_(timing, clock_, field_trials),
+ codec_database_() {
+ decoder_sequence_checker_.Detach();
+}
+
+VideoReceiver2::~VideoReceiver2() {
+ RTC_DCHECK_RUN_ON(&construction_sequence_checker_);
+}
+
+// Register a receive callback. Will be called whenever there is a new frame
+// ready for rendering.
+int32_t VideoReceiver2::RegisterReceiveCallback(
+ VCMReceiveCallback* receive_callback) {
+ RTC_DCHECK_RUN_ON(&construction_sequence_checker_);
+ // This value is set before the decoder thread starts and unset after
+ // the decoder thread has been stopped.
+ decoded_frame_callback_.SetUserReceiveCallback(receive_callback);
+ return VCM_OK;
+}
+
+void VideoReceiver2::RegisterExternalDecoder(
+ std::unique_ptr<VideoDecoder> decoder,
+ uint8_t payload_type) {
+ RTC_DCHECK_RUN_ON(&decoder_sequence_checker_);
+ RTC_DCHECK(decoded_frame_callback_.UserReceiveCallback());
+
+ if (decoder) {
+ RTC_DCHECK(!codec_database_.IsExternalDecoderRegistered(payload_type));
+ codec_database_.RegisterExternalDecoder(payload_type, std::move(decoder));
+ } else {
+ codec_database_.DeregisterExternalDecoder(payload_type);
+ }
+}
+
+bool VideoReceiver2::IsExternalDecoderRegistered(uint8_t payload_type) const {
+ RTC_DCHECK_RUN_ON(&decoder_sequence_checker_);
+ return codec_database_.IsExternalDecoderRegistered(payload_type);
+}
+
+// Must be called from inside the receive side critical section.
+int32_t VideoReceiver2::Decode(const VCMEncodedFrame* frame) {
+ RTC_DCHECK_RUN_ON(&decoder_sequence_checker_);
+ TRACE_EVENT0("webrtc", "VideoReceiver2::Decode");
+ // Change decoder if payload type has changed.
+ VCMGenericDecoder* decoder =
+ codec_database_.GetDecoder(*frame, &decoded_frame_callback_);
+ if (decoder == nullptr) {
+ return VCM_NO_CODEC_REGISTERED;
+ }
+ return decoder->Decode(*frame, clock_->CurrentTime());
+}
+
+// Register possible receive codecs, can be called multiple times.
+// Called before decoder thread is started.
+void VideoReceiver2::RegisterReceiveCodec(
+ uint8_t payload_type,
+ const VideoDecoder::Settings& settings) {
+ RTC_DCHECK_RUN_ON(&construction_sequence_checker_);
+ codec_database_.RegisterReceiveCodec(payload_type, settings);
+}
+
+void VideoReceiver2::DeregisterReceiveCodec(uint8_t payload_type) {
+ RTC_DCHECK_RUN_ON(&construction_sequence_checker_);
+ codec_database_.DeregisterReceiveCodec(payload_type);
+}
+
+void VideoReceiver2::DeregisterReceiveCodecs() {
+ RTC_DCHECK_RUN_ON(&construction_sequence_checker_);
+ codec_database_.DeregisterReceiveCodecs();
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/video_receiver2.h b/third_party/libwebrtc/modules/video_coding/video_receiver2.h
new file mode 100644
index 0000000000..4457a5b5b3
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/video_receiver2.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_VIDEO_RECEIVER2_H_
+#define MODULES_VIDEO_CODING_VIDEO_RECEIVER2_H_
+
+#include <memory>
+#include <vector>
+
+#include "api/field_trials_view.h"
+#include "api/sequence_checker.h"
+#include "api/video_codecs/video_decoder.h"
+#include "modules/video_coding/decoder_database.h"
+#include "modules/video_coding/encoded_frame.h"
+#include "modules/video_coding/generic_decoder.h"
+#include "modules/video_coding/timing/timing.h"
+#include "rtc_base/system/no_unique_address.h"
+#include "system_wrappers/include/clock.h"
+
+namespace webrtc {
+
+// This class is a copy of vcm::VideoReceiver, trimmed down to what's used by
+// VideoReceive stream, with the aim to incrementally trim it down further and
+// ultimately delete it. It's difficult to do this incrementally with the
+// original VideoReceiver class, since it is used by the legacy
+// VideoCodingModule api.
+class VideoReceiver2 {
+ public:
+ VideoReceiver2(Clock* clock,
+ VCMTiming* timing,
+ const FieldTrialsView& field_trials);
+ ~VideoReceiver2();
+
+ void RegisterReceiveCodec(uint8_t payload_type,
+ const VideoDecoder::Settings& decoder_settings);
+ void DeregisterReceiveCodec(uint8_t payload_type);
+ void DeregisterReceiveCodecs();
+
+ void RegisterExternalDecoder(std::unique_ptr<VideoDecoder> decoder,
+ uint8_t payload_type);
+
+ bool IsExternalDecoderRegistered(uint8_t payload_type) const;
+ int32_t RegisterReceiveCallback(VCMReceiveCallback* receive_callback);
+
+ int32_t Decode(const VCMEncodedFrame* frame);
+
+ private:
+ RTC_NO_UNIQUE_ADDRESS SequenceChecker construction_sequence_checker_;
+ RTC_NO_UNIQUE_ADDRESS SequenceChecker decoder_sequence_checker_;
+ Clock* const clock_;
+ VCMDecodedFrameCallback decoded_frame_callback_;
+ // Callbacks are set before the decoder thread starts.
+ // Once the decoder thread has been started, usage of `_codecDataBase` moves
+ // over to the decoder thread.
+ VCMDecoderDatabase codec_database_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_VIDEO_RECEIVER2_H_
diff --git a/third_party/libwebrtc/modules/video_coding/video_receiver2_unittest.cc b/third_party/libwebrtc/modules/video_coding/video_receiver2_unittest.cc
new file mode 100644
index 0000000000..6edf1230d8
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/video_receiver2_unittest.cc
@@ -0,0 +1,145 @@
+/*
+ * Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/video_receiver2.h"
+
+#include <memory>
+#include <utility>
+
+#include "api/test/mock_video_decoder.h"
+#include "api/units/timestamp.h"
+#include "api/video/encoded_frame.h"
+#include "common_video/test/utilities.h"
+#include "modules/video_coding/decoder_database.h"
+#include "modules/video_coding/timing/timing.h"
+#include "system_wrappers/include/clock.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+#include "test/scoped_key_value_config.h"
+
+namespace webrtc {
+namespace {
+
+using ::testing::_;
+using ::testing::NiceMock;
+using ::testing::Return;
+
+class MockVCMReceiveCallback : public VCMReceiveCallback {
+ public:
+ MockVCMReceiveCallback() = default;
+
+ MOCK_METHOD(
+ int32_t,
+ FrameToRender,
+ (VideoFrame&, absl::optional<uint8_t>, TimeDelta, VideoContentType),
+ (override));
+ MOCK_METHOD(void, OnIncomingPayloadType, (int), (override));
+ MOCK_METHOD(void,
+ OnDecoderInfoChanged,
+ (const VideoDecoder::DecoderInfo&),
+ (override));
+};
+
+class TestEncodedFrame : public EncodedFrame {
+ public:
+ explicit TestEncodedFrame(int payload_type) {
+ _payloadType = payload_type;
+ SetPacketInfos(CreatePacketInfos(3));
+ }
+
+ void SetReceivedTime(webrtc::Timestamp received_time) {
+ received_time_ = received_time;
+ }
+
+ int64_t ReceivedTime() const override { return received_time_.ms(); }
+
+ int64_t RenderTime() const override { return _renderTimeMs; }
+
+ private:
+ webrtc::Timestamp received_time_ = webrtc::Timestamp::Millis(0);
+};
+
+class VideoReceiver2Test : public ::testing::Test {
+ protected:
+ VideoReceiver2Test() {
+ receiver_.RegisterReceiveCallback(&receive_callback_);
+ }
+
+ void RegisterReceiveCodecSettings(
+ int payload_type,
+ VideoCodecType codec_type = kVideoCodecVP8) {
+ VideoDecoder::Settings settings;
+ settings.set_codec_type(codec_type);
+ settings.set_max_render_resolution({10, 10});
+ settings.set_number_of_cores(4);
+ receiver_.RegisterReceiveCodec(payload_type, settings);
+ }
+
+ test::ScopedKeyValueConfig field_trials_;
+ SimulatedClock clock_{Timestamp::Millis(1337)};
+ VCMTiming timing_{&clock_, field_trials_};
+ NiceMock<MockVCMReceiveCallback> receive_callback_;
+ VideoReceiver2 receiver_{&clock_, &timing_, field_trials_};
+};
+
+TEST_F(VideoReceiver2Test, RegisterExternalDecoder) {
+ constexpr int kPayloadType = 1;
+ ASSERT_FALSE(receiver_.IsExternalDecoderRegistered(kPayloadType));
+
+ // Register a decoder, check for correctness, then unregister and check again.
+ auto decoder = std::make_unique<NiceMock<MockVideoDecoder>>();
+ bool decoder_deleted = false;
+ EXPECT_CALL(*decoder, Destruct).WillOnce([&decoder_deleted] {
+ decoder_deleted = true;
+ });
+ receiver_.RegisterExternalDecoder(std::move(decoder), kPayloadType);
+ EXPECT_TRUE(receiver_.IsExternalDecoderRegistered(kPayloadType));
+ receiver_.RegisterExternalDecoder(nullptr, kPayloadType);
+ EXPECT_TRUE(decoder_deleted);
+ EXPECT_FALSE(receiver_.IsExternalDecoderRegistered(kPayloadType));
+}
+
+TEST_F(VideoReceiver2Test, RegisterReceiveCodecs) {
+ constexpr int kPayloadType = 1;
+
+ RegisterReceiveCodecSettings(kPayloadType);
+
+ TestEncodedFrame frame(kPayloadType);
+
+ // A decoder has not been registered yet, so an attempt to decode should fail.
+ EXPECT_EQ(receiver_.Decode(&frame), VCM_NO_CODEC_REGISTERED);
+
+ // Register a decoder that will accept the Decode operation.
+ auto decoder = std::make_unique<NiceMock<MockVideoDecoder>>();
+ EXPECT_CALL(*decoder, RegisterDecodeCompleteCallback)
+ .WillOnce(Return(WEBRTC_VIDEO_CODEC_OK));
+ EXPECT_CALL(*decoder, Decode).WillOnce(Return(WEBRTC_VIDEO_CODEC_OK));
+ EXPECT_CALL(*decoder, Release).WillOnce(Return(WEBRTC_VIDEO_CODEC_OK));
+
+ // Register the decoder. Note that this moves ownership of the mock object
+ // to the `receiver_`.
+ receiver_.RegisterExternalDecoder(std::move(decoder), kPayloadType);
+ EXPECT_TRUE(receiver_.IsExternalDecoderRegistered(kPayloadType));
+
+ EXPECT_CALL(receive_callback_, OnIncomingPayloadType(kPayloadType));
+ EXPECT_CALL(receive_callback_, OnDecoderInfoChanged);
+
+ // Call `Decode`. This triggers the above call expectations.
+ EXPECT_EQ(receiver_.Decode(&frame), VCM_OK);
+
+ // Unregister the decoder and verify.
+ receiver_.RegisterExternalDecoder(nullptr, kPayloadType);
+ EXPECT_FALSE(receiver_.IsExternalDecoderRegistered(kPayloadType));
+
+ receiver_.DeregisterReceiveCodec(kPayloadType);
+}
+
+} // namespace
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/video_receiver_unittest.cc b/third_party/libwebrtc/modules/video_coding/video_receiver_unittest.cc
new file mode 100644
index 0000000000..fe9674e521
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/video_receiver_unittest.cc
@@ -0,0 +1,236 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "api/test/mock_video_decoder.h"
+#include "api/video_codecs/video_decoder.h"
+#include "modules/video_coding/include/video_coding.h"
+#include "modules/video_coding/timing/timing.h"
+#include "modules/video_coding/video_coding_impl.h"
+#include "system_wrappers/include/clock.h"
+#include "test/gtest.h"
+#include "test/scoped_key_value_config.h"
+
+using ::testing::_;
+using ::testing::AnyNumber;
+using ::testing::NiceMock;
+
+namespace webrtc {
+namespace vcm {
+namespace {
+
+class MockPacketRequestCallback : public VCMPacketRequestCallback {
+ public:
+ MOCK_METHOD(int32_t,
+ ResendPackets,
+ (const uint16_t* sequenceNumbers, uint16_t length),
+ (override));
+};
+
+class MockVCMReceiveCallback : public VCMReceiveCallback {
+ public:
+ MockVCMReceiveCallback() {}
+ virtual ~MockVCMReceiveCallback() {}
+
+ MOCK_METHOD(
+ int32_t,
+ FrameToRender,
+ (VideoFrame&, absl::optional<uint8_t>, TimeDelta, VideoContentType),
+ (override));
+ MOCK_METHOD(void, OnIncomingPayloadType, (int), (override));
+ MOCK_METHOD(void,
+ OnDecoderInfoChanged,
+ (const VideoDecoder::DecoderInfo&),
+ (override));
+};
+
+class TestVideoReceiver : public ::testing::Test {
+ protected:
+ static const int kUnusedPayloadType = 10;
+ static const uint16_t kMaxWaitTimeMs = 100;
+
+ TestVideoReceiver()
+ : clock_(0),
+ timing_(&clock_, field_trials_),
+ receiver_(&clock_, &timing_, field_trials_) {}
+
+ virtual void SetUp() {
+ // Register decoder.
+ receiver_.RegisterExternalDecoder(&decoder_, kUnusedPayloadType);
+ VideoDecoder::Settings settings;
+ settings.set_codec_type(kVideoCodecVP8);
+ receiver_.RegisterReceiveCodec(kUnusedPayloadType, settings);
+
+ // Set protection mode.
+ const size_t kMaxNackListSize = 250;
+ const int kMaxPacketAgeToNack = 450;
+ receiver_.SetNackSettings(kMaxNackListSize, kMaxPacketAgeToNack, 0);
+ EXPECT_EQ(
+ 0, receiver_.RegisterPacketRequestCallback(&packet_request_callback_));
+
+ // Since we call Decode, we need to provide a valid receive callback.
+ // However, for the purposes of these tests, we ignore the callbacks.
+ EXPECT_CALL(receive_callback_, OnIncomingPayloadType(_)).Times(AnyNumber());
+ EXPECT_CALL(receive_callback_, OnDecoderInfoChanged).Times(AnyNumber());
+ receiver_.RegisterReceiveCallback(&receive_callback_);
+ }
+
+ RTPHeader GetDefaultRTPHeader() const {
+ RTPHeader header;
+ header.markerBit = false;
+ header.payloadType = kUnusedPayloadType;
+ header.ssrc = 1;
+ header.headerLength = 12;
+ return header;
+ }
+
+ RTPVideoHeader GetDefaultVp8Header() const {
+ RTPVideoHeader video_header = {};
+ video_header.frame_type = VideoFrameType::kEmptyFrame;
+ video_header.codec = kVideoCodecVP8;
+ return video_header;
+ }
+
+ void InsertAndVerifyPaddingFrame(const uint8_t* payload,
+ RTPHeader* header,
+ const RTPVideoHeader& video_header) {
+ for (int j = 0; j < 5; ++j) {
+ // Padding only packets are passed to the VCM with payload size 0.
+ EXPECT_EQ(0, receiver_.IncomingPacket(payload, 0, *header, video_header));
+ ++header->sequenceNumber;
+ }
+ receiver_.Process();
+ EXPECT_CALL(decoder_, Decode(_, _, _)).Times(0);
+ EXPECT_EQ(VCM_FRAME_NOT_READY, receiver_.Decode(kMaxWaitTimeMs));
+ }
+
+ void InsertAndVerifyDecodableFrame(const uint8_t* payload,
+ size_t length,
+ RTPHeader* header,
+ const RTPVideoHeader& video_header) {
+ EXPECT_EQ(0,
+ receiver_.IncomingPacket(payload, length, *header, video_header));
+ ++header->sequenceNumber;
+ EXPECT_CALL(packet_request_callback_, ResendPackets(_, _)).Times(0);
+
+ receiver_.Process();
+ EXPECT_CALL(decoder_, Decode(_, _, _)).Times(1);
+ EXPECT_EQ(0, receiver_.Decode(kMaxWaitTimeMs));
+ }
+
+ test::ScopedKeyValueConfig field_trials_;
+ SimulatedClock clock_;
+ NiceMock<MockVideoDecoder> decoder_;
+ NiceMock<MockPacketRequestCallback> packet_request_callback_;
+ VCMTiming timing_;
+ MockVCMReceiveCallback receive_callback_;
+ VideoReceiver receiver_;
+};
+
+TEST_F(TestVideoReceiver, PaddingOnlyFrames) {
+ const size_t kPaddingSize = 220;
+ const uint8_t kPayload[kPaddingSize] = {0};
+ RTPHeader header = GetDefaultRTPHeader();
+ RTPVideoHeader video_header = GetDefaultVp8Header();
+ header.paddingLength = kPaddingSize;
+ for (int i = 0; i < 10; ++i) {
+ EXPECT_CALL(packet_request_callback_, ResendPackets(_, _)).Times(0);
+ InsertAndVerifyPaddingFrame(kPayload, &header, video_header);
+ clock_.AdvanceTimeMilliseconds(33);
+ header.timestamp += 3000;
+ }
+}
+
+TEST_F(TestVideoReceiver, PaddingOnlyFramesWithLosses) {
+ const size_t kFrameSize = 1200;
+ const size_t kPaddingSize = 220;
+ const uint8_t kPayload[kFrameSize] = {0};
+ RTPHeader header = GetDefaultRTPHeader();
+ RTPVideoHeader video_header = GetDefaultVp8Header();
+ header.paddingLength = kPaddingSize;
+ video_header.video_type_header.emplace<RTPVideoHeaderVP8>();
+
+ // Insert one video frame to get one frame decoded.
+ video_header.frame_type = VideoFrameType::kVideoFrameKey;
+ video_header.is_first_packet_in_frame = true;
+ header.markerBit = true;
+ InsertAndVerifyDecodableFrame(kPayload, kFrameSize, &header, video_header);
+
+ clock_.AdvanceTimeMilliseconds(33);
+ header.timestamp += 3000;
+ video_header.frame_type = VideoFrameType::kEmptyFrame;
+ video_header.is_first_packet_in_frame = false;
+ header.markerBit = false;
+ // Insert padding frames.
+ for (int i = 0; i < 10; ++i) {
+ // Lose one packet from the 6th frame.
+ if (i == 5) {
+ ++header.sequenceNumber;
+ }
+ // Lose the 4th frame.
+ if (i == 3) {
+ header.sequenceNumber += 5;
+ } else {
+ if (i > 3 && i < 5) {
+ EXPECT_CALL(packet_request_callback_, ResendPackets(_, 5)).Times(1);
+ } else if (i >= 5) {
+ EXPECT_CALL(packet_request_callback_, ResendPackets(_, 6)).Times(1);
+ } else {
+ EXPECT_CALL(packet_request_callback_, ResendPackets(_, _)).Times(0);
+ }
+ InsertAndVerifyPaddingFrame(kPayload, &header, video_header);
+ }
+ clock_.AdvanceTimeMilliseconds(33);
+ header.timestamp += 3000;
+ }
+}
+
+TEST_F(TestVideoReceiver, PaddingOnlyAndVideo) {
+ const size_t kFrameSize = 1200;
+ const size_t kPaddingSize = 220;
+ const uint8_t kPayload[kFrameSize] = {0};
+ RTPHeader header = GetDefaultRTPHeader();
+ RTPVideoHeader video_header = GetDefaultVp8Header();
+ video_header.is_first_packet_in_frame = false;
+ header.paddingLength = kPaddingSize;
+ auto& vp8_header =
+ video_header.video_type_header.emplace<RTPVideoHeaderVP8>();
+ vp8_header.pictureId = -1;
+ vp8_header.tl0PicIdx = -1;
+
+ for (int i = 0; i < 3; ++i) {
+ // Insert 2 video frames.
+ for (int j = 0; j < 2; ++j) {
+ if (i == 0 && j == 0) // First frame should be a key frame.
+ video_header.frame_type = VideoFrameType::kVideoFrameKey;
+ else
+ video_header.frame_type = VideoFrameType::kVideoFrameDelta;
+ video_header.is_first_packet_in_frame = true;
+ header.markerBit = true;
+ InsertAndVerifyDecodableFrame(kPayload, kFrameSize, &header,
+ video_header);
+ clock_.AdvanceTimeMilliseconds(33);
+ header.timestamp += 3000;
+ }
+
+ // Insert 2 padding only frames.
+ video_header.frame_type = VideoFrameType::kEmptyFrame;
+ video_header.is_first_packet_in_frame = false;
+ header.markerBit = false;
+ for (int j = 0; j < 2; ++j) {
+ // InsertAndVerifyPaddingFrame(kPayload, &header);
+ clock_.AdvanceTimeMilliseconds(33);
+ header.timestamp += 3000;
+ }
+ }
+}
+
+} // namespace
+} // namespace vcm
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/webrtc_libvpx_interface_gn/moz.build b/third_party/libwebrtc/modules/video_coding/webrtc_libvpx_interface_gn/moz.build
new file mode 100644
index 0000000000..03437c5d57
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/webrtc_libvpx_interface_gn/moz.build
@@ -0,0 +1,221 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+DEFINES["WEBRTC_STRICT_FIELD_TRIALS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "!/third_party/libwebrtc/gen",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/video_coding/codecs/interface/libvpx_interface.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "mips32":
+
+ DEFINES["MIPS32_LE"] = True
+ DEFINES["MIPS_FPU_LE"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "mips64":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("webrtc_libvpx_interface_gn")
diff --git a/third_party/libwebrtc/modules/video_coding/webrtc_vp8_gn/moz.build b/third_party/libwebrtc/modules/video_coding/webrtc_vp8_gn/moz.build
new file mode 100644
index 0000000000..db366355de
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/webrtc_vp8_gn/moz.build
@@ -0,0 +1,235 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+DEFINES["WEBRTC_STRICT_FIELD_TRIALS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "!/third_party/libwebrtc/gen",
+ "/ipc/chromium/src",
+ "/media/libyuv/",
+ "/media/libyuv/libyuv/include/",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/video_coding/codecs/vp8/libvpx_vp8_decoder.cc",
+ "/third_party/libwebrtc/modules/video_coding/codecs/vp8/libvpx_vp8_encoder.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "crypt32",
+ "iphlpapi",
+ "secur32",
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "mips32":
+
+ DEFINES["MIPS32_LE"] = True
+ DEFINES["MIPS_FPU_LE"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "mips64":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("webrtc_vp8_gn")
diff --git a/third_party/libwebrtc/modules/video_coding/webrtc_vp8_scalability_gn/moz.build b/third_party/libwebrtc/modules/video_coding/webrtc_vp8_scalability_gn/moz.build
new file mode 100644
index 0000000000..3d5642df00
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/webrtc_vp8_scalability_gn/moz.build
@@ -0,0 +1,221 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+DEFINES["WEBRTC_STRICT_FIELD_TRIALS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "!/third_party/libwebrtc/gen",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/video_coding/codecs/vp8/vp8_scalability.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "mips32":
+
+ DEFINES["MIPS32_LE"] = True
+ DEFINES["MIPS_FPU_LE"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "mips64":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("webrtc_vp8_scalability_gn")
diff --git a/third_party/libwebrtc/modules/video_coding/webrtc_vp8_temporal_layers_gn/moz.build b/third_party/libwebrtc/modules/video_coding/webrtc_vp8_temporal_layers_gn/moz.build
new file mode 100644
index 0000000000..db3848fb57
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/webrtc_vp8_temporal_layers_gn/moz.build
@@ -0,0 +1,237 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+DEFINES["WEBRTC_STRICT_FIELD_TRIALS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "!/third_party/libwebrtc/gen",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+SOURCES += [
+ "/third_party/libwebrtc/modules/video_coding/codecs/vp8/screenshare_layers.cc"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/video_coding/codecs/vp8/default_temporal_layers.cc",
+ "/third_party/libwebrtc/modules/video_coding/codecs/vp8/temporal_layers_checker.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "crypt32",
+ "iphlpapi",
+ "secur32",
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "mips32":
+
+ DEFINES["MIPS32_LE"] = True
+ DEFINES["MIPS_FPU_LE"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "mips64":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("webrtc_vp8_temporal_layers_gn")
diff --git a/third_party/libwebrtc/modules/video_coding/webrtc_vp9_gn/moz.build b/third_party/libwebrtc/modules/video_coding/webrtc_vp9_gn/moz.build
new file mode 100644
index 0000000000..f470652e6d
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/webrtc_vp9_gn/moz.build
@@ -0,0 +1,238 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+DEFINES["WEBRTC_STRICT_FIELD_TRIALS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "!/third_party/libwebrtc/gen",
+ "/ipc/chromium/src",
+ "/media/libyuv/",
+ "/media/libyuv/libyuv/include/",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/video_coding/codecs/vp9/libvpx_vp9_decoder.cc",
+ "/third_party/libwebrtc/modules/video_coding/codecs/vp9/libvpx_vp9_encoder.cc",
+ "/third_party/libwebrtc/modules/video_coding/codecs/vp9/vp9.cc",
+ "/third_party/libwebrtc/modules/video_coding/codecs/vp9/vp9_frame_buffer_pool.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "GLESv2",
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "crypt32",
+ "iphlpapi",
+ "secur32",
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "mips32":
+
+ DEFINES["MIPS32_LE"] = True
+ DEFINES["MIPS_FPU_LE"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "mips64":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("webrtc_vp9_gn")
diff --git a/third_party/libwebrtc/modules/video_coding/webrtc_vp9_helpers_gn/moz.build b/third_party/libwebrtc/modules/video_coding/webrtc_vp9_helpers_gn/moz.build
new file mode 100644
index 0000000000..77a0c38f99
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/webrtc_vp9_helpers_gn/moz.build
@@ -0,0 +1,233 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+DEFINES["WEBRTC_STRICT_FIELD_TRIALS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "!/third_party/libwebrtc/gen",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/video_coding/codecs/vp9/svc_config.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "GLESv2",
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "crypt32",
+ "iphlpapi",
+ "secur32",
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "mips32":
+
+ DEFINES["MIPS32_LE"] = True
+ DEFINES["MIPS_FPU_LE"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "mips64":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("webrtc_vp9_helpers_gn")