summaryrefslogtreecommitdiffstats
path: root/third_party/libwebrtc/video
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 19:33:14 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 19:33:14 +0000
commit36d22d82aa202bb199967e9512281e9a53db42c9 (patch)
tree105e8c98ddea1c1e4784a60a5a6410fa416be2de /third_party/libwebrtc/video
parentInitial commit. (diff)
downloadfirefox-esr-36d22d82aa202bb199967e9512281e9a53db42c9.tar.xz
firefox-esr-36d22d82aa202bb199967e9512281e9a53db42c9.zip
Adding upstream version 115.7.0esr.upstream/115.7.0esr
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'third_party/libwebrtc/video')
-rw-r--r--third_party/libwebrtc/video/BUILD.gn972
-rw-r--r--third_party/libwebrtc/video/DEPS17
-rw-r--r--third_party/libwebrtc/video/OWNERS6
-rw-r--r--third_party/libwebrtc/video/adaptation/BUILD.gn125
-rw-r--r--third_party/libwebrtc/video/adaptation/OWNERS3
-rw-r--r--third_party/libwebrtc/video/adaptation/balanced_constraint.cc62
-rw-r--r--third_party/libwebrtc/video/adaptation/balanced_constraint.h53
-rw-r--r--third_party/libwebrtc/video/adaptation/bandwidth_quality_scaler_resource.cc83
-rw-r--r--third_party/libwebrtc/video/adaptation/bandwidth_quality_scaler_resource.h62
-rw-r--r--third_party/libwebrtc/video/adaptation/bitrate_constraint.cc87
-rw-r--r--third_party/libwebrtc/video/adaptation/bitrate_constraint.h53
-rw-r--r--third_party/libwebrtc/video/adaptation/bitrate_constraint_unittest.cc320
-rw-r--r--third_party/libwebrtc/video/adaptation/encode_usage_resource.cc105
-rw-r--r--third_party/libwebrtc/video/adaptation/encode_usage_resource.h68
-rw-r--r--third_party/libwebrtc/video/adaptation/overuse_frame_detector.cc722
-rw-r--r--third_party/libwebrtc/video/adaptation/overuse_frame_detector.h172
-rw-r--r--third_party/libwebrtc/video/adaptation/overuse_frame_detector_unittest.cc1023
-rw-r--r--third_party/libwebrtc/video/adaptation/pixel_limit_resource.cc101
-rw-r--r--third_party/libwebrtc/video/adaptation/pixel_limit_resource.h60
-rw-r--r--third_party/libwebrtc/video/adaptation/pixel_limit_resource_unittest.cc147
-rw-r--r--third_party/libwebrtc/video/adaptation/quality_rampup_experiment_helper.cc88
-rw-r--r--third_party/libwebrtc/video/adaptation/quality_rampup_experiment_helper.h71
-rw-r--r--third_party/libwebrtc/video/adaptation/quality_scaler_resource.cc99
-rw-r--r--third_party/libwebrtc/video/adaptation/quality_scaler_resource.h59
-rw-r--r--third_party/libwebrtc/video/adaptation/quality_scaler_resource_unittest.cc76
-rw-r--r--third_party/libwebrtc/video/adaptation/video_adaptation_gn/moz.build241
-rw-r--r--third_party/libwebrtc/video/adaptation/video_stream_encoder_resource.cc63
-rw-r--r--third_party/libwebrtc/video/adaptation/video_stream_encoder_resource.h55
-rw-r--r--third_party/libwebrtc/video/adaptation/video_stream_encoder_resource_manager.cc851
-rw-r--r--third_party/libwebrtc/video/adaptation/video_stream_encoder_resource_manager.h238
-rw-r--r--third_party/libwebrtc/video/alignment_adjuster.cc126
-rw-r--r--third_party/libwebrtc/video/alignment_adjuster.h42
-rw-r--r--third_party/libwebrtc/video/alignment_adjuster_unittest.cc189
-rw-r--r--third_party/libwebrtc/video/buffered_frame_decryptor.cc123
-rw-r--r--third_party/libwebrtc/video/buffered_frame_decryptor.h105
-rw-r--r--third_party/libwebrtc/video/buffered_frame_decryptor_unittest.cc230
-rw-r--r--third_party/libwebrtc/video/call_stats2.cc168
-rw-r--r--third_party/libwebrtc/video/call_stats2.h135
-rw-r--r--third_party/libwebrtc/video/call_stats2_unittest.cc312
-rw-r--r--third_party/libwebrtc/video/config/BUILD.gn99
-rw-r--r--third_party/libwebrtc/video/config/encoder_config_gn/moz.build225
-rw-r--r--third_party/libwebrtc/video/config/encoder_stream_factory.cc465
-rw-r--r--third_party/libwebrtc/video/config/encoder_stream_factory.h80
-rw-r--r--third_party/libwebrtc/video/config/encoder_stream_factory_unittest.cc83
-rw-r--r--third_party/libwebrtc/video/config/simulcast.cc497
-rw-r--r--third_party/libwebrtc/video/config/simulcast.h72
-rw-r--r--third_party/libwebrtc/video/config/simulcast_unittest.cc525
-rw-r--r--third_party/libwebrtc/video/config/streams_config_gn/moz.build234
-rw-r--r--third_party/libwebrtc/video/config/video_encoder_config.cc133
-rw-r--r--third_party/libwebrtc/video/config/video_encoder_config.h208
-rw-r--r--third_party/libwebrtc/video/cpu_scaling_tests.cc144
-rw-r--r--third_party/libwebrtc/video/decode_synchronizer.cc190
-rw-r--r--third_party/libwebrtc/video/decode_synchronizer.h137
-rw-r--r--third_party/libwebrtc/video/decode_synchronizer_gn/moz.build232
-rw-r--r--third_party/libwebrtc/video/decode_synchronizer_unittest.cc252
-rw-r--r--third_party/libwebrtc/video/encoder_bitrate_adjuster.cc338
-rw-r--r--third_party/libwebrtc/video/encoder_bitrate_adjuster.h75
-rw-r--r--third_party/libwebrtc/video/encoder_bitrate_adjuster_unittest.cc506
-rw-r--r--third_party/libwebrtc/video/encoder_overshoot_detector.cc204
-rw-r--r--third_party/libwebrtc/video/encoder_overshoot_detector.h78
-rw-r--r--third_party/libwebrtc/video/encoder_overshoot_detector_unittest.cc166
-rw-r--r--third_party/libwebrtc/video/encoder_rtcp_feedback.cc139
-rw-r--r--third_party/libwebrtc/video/encoder_rtcp_feedback.h69
-rw-r--r--third_party/libwebrtc/video/encoder_rtcp_feedback_unittest.cc61
-rw-r--r--third_party/libwebrtc/video/end_to_end_tests/bandwidth_tests.cc402
-rw-r--r--third_party/libwebrtc/video/end_to_end_tests/call_operation_tests.cc195
-rw-r--r--third_party/libwebrtc/video/end_to_end_tests/codec_tests.cc288
-rw-r--r--third_party/libwebrtc/video/end_to_end_tests/config_tests.cc113
-rw-r--r--third_party/libwebrtc/video/end_to_end_tests/extended_reports_tests.cc264
-rw-r--r--third_party/libwebrtc/video/end_to_end_tests/fec_tests.cc502
-rw-r--r--third_party/libwebrtc/video/end_to_end_tests/frame_encryption_tests.cc91
-rw-r--r--third_party/libwebrtc/video/end_to_end_tests/histogram_tests.cc317
-rw-r--r--third_party/libwebrtc/video/end_to_end_tests/multi_codec_receive_tests.cc291
-rw-r--r--third_party/libwebrtc/video/end_to_end_tests/multi_stream_tester.cc180
-rw-r--r--third_party/libwebrtc/video/end_to_end_tests/multi_stream_tester.h64
-rw-r--r--third_party/libwebrtc/video/end_to_end_tests/multi_stream_tests.cc92
-rw-r--r--third_party/libwebrtc/video/end_to_end_tests/network_state_tests.cc428
-rw-r--r--third_party/libwebrtc/video/end_to_end_tests/resolution_bitrate_limits_tests.cc481
-rw-r--r--third_party/libwebrtc/video/end_to_end_tests/retransmission_tests.cc513
-rw-r--r--third_party/libwebrtc/video/end_to_end_tests/rtp_rtcp_tests.cc551
-rw-r--r--third_party/libwebrtc/video/end_to_end_tests/ssrc_tests.cc325
-rw-r--r--third_party/libwebrtc/video/end_to_end_tests/stats_tests.cc733
-rw-r--r--third_party/libwebrtc/video/end_to_end_tests/transport_feedback_tests.cc493
-rw-r--r--third_party/libwebrtc/video/frame_cadence_adapter.cc803
-rw-r--r--third_party/libwebrtc/video/frame_cadence_adapter.h122
-rw-r--r--third_party/libwebrtc/video/frame_cadence_adapter_gn/moz.build232
-rw-r--r--third_party/libwebrtc/video/frame_cadence_adapter_unittest.cc1101
-rw-r--r--third_party/libwebrtc/video/frame_decode_scheduler.h52
-rw-r--r--third_party/libwebrtc/video/frame_decode_scheduler_gn/moz.build216
-rw-r--r--third_party/libwebrtc/video/frame_decode_timing.cc60
-rw-r--r--third_party/libwebrtc/video/frame_decode_timing.h54
-rw-r--r--third_party/libwebrtc/video/frame_decode_timing_gn/moz.build232
-rw-r--r--third_party/libwebrtc/video/frame_decode_timing_unittest.cc147
-rw-r--r--third_party/libwebrtc/video/frame_dumping_decoder.cc91
-rw-r--r--third_party/libwebrtc/video/frame_dumping_decoder.h28
-rw-r--r--third_party/libwebrtc/video/frame_dumping_decoder_gn/moz.build233
-rw-r--r--third_party/libwebrtc/video/frame_encode_metadata_writer.cc278
-rw-r--r--third_party/libwebrtc/video/frame_encode_metadata_writer.h82
-rw-r--r--third_party/libwebrtc/video/frame_encode_metadata_writer_unittest.cc471
-rw-r--r--third_party/libwebrtc/video/full_stack_tests.cc1189
-rwxr-xr-xthird_party/libwebrtc/video/full_stack_tests_plot.py469
-rw-r--r--third_party/libwebrtc/video/g3doc/adaptation.md114
-rw-r--r--third_party/libwebrtc/video/g3doc/stats.md215
-rw-r--r--third_party/libwebrtc/video/pc_full_stack_tests.cc1833
-rw-r--r--third_party/libwebrtc/video/picture_id_tests.cc428
-rw-r--r--third_party/libwebrtc/video/quality_limitation_reason_tracker.cc52
-rw-r--r--third_party/libwebrtc/video/quality_limitation_reason_tracker.h58
-rw-r--r--third_party/libwebrtc/video/quality_limitation_reason_tracker_unittest.cc115
-rw-r--r--third_party/libwebrtc/video/quality_scaling_tests.cc613
-rw-r--r--third_party/libwebrtc/video/quality_threshold.cc104
-rw-r--r--third_party/libwebrtc/video/quality_threshold.h53
-rw-r--r--third_party/libwebrtc/video/quality_threshold_unittest.cc133
-rw-r--r--third_party/libwebrtc/video/receive_statistics_proxy2.cc1037
-rw-r--r--third_party/libwebrtc/video/receive_statistics_proxy2.h223
-rw-r--r--third_party/libwebrtc/video/receive_statistics_proxy2_unittest.cc1818
-rw-r--r--third_party/libwebrtc/video/render/BUILD.gn51
-rw-r--r--third_party/libwebrtc/video/render/incoming_video_stream.cc66
-rw-r--r--third_party/libwebrtc/video/render/incoming_video_stream.h48
-rw-r--r--third_party/libwebrtc/video/render/incoming_video_stream_gn/moz.build225
-rw-r--r--third_party/libwebrtc/video/render/video_render_frames.cc116
-rw-r--r--third_party/libwebrtc/video/render/video_render_frames.h55
-rw-r--r--third_party/libwebrtc/video/render/video_render_frames_gn/moz.build225
-rw-r--r--third_party/libwebrtc/video/report_block_stats.cc65
-rw-r--r--third_party/libwebrtc/video/report_block_stats.h58
-rw-r--r--third_party/libwebrtc/video/report_block_stats_unittest.cc63
-rw-r--r--third_party/libwebrtc/video/rtp_streams_synchronizer2.cc219
-rw-r--r--third_party/libwebrtc/video/rtp_streams_synchronizer2.h75
-rw-r--r--third_party/libwebrtc/video/rtp_video_stream_receiver2.cc1317
-rw-r--r--third_party/libwebrtc/video/rtp_video_stream_receiver2.h455
-rw-r--r--third_party/libwebrtc/video/rtp_video_stream_receiver2_unittest.cc1233
-rw-r--r--third_party/libwebrtc/video/rtp_video_stream_receiver_frame_transformer_delegate.cc129
-rw-r--r--third_party/libwebrtc/video/rtp_video_stream_receiver_frame_transformer_delegate.h74
-rw-r--r--third_party/libwebrtc/video/rtp_video_stream_receiver_frame_transformer_delegate_unittest.cc167
-rw-r--r--third_party/libwebrtc/video/screenshare_loopback.cc387
-rw-r--r--third_party/libwebrtc/video/send_delay_stats.cc120
-rw-r--r--third_party/libwebrtc/video/send_delay_stats.h94
-rw-r--r--third_party/libwebrtc/video/send_delay_stats_unittest.cc133
-rw-r--r--third_party/libwebrtc/video/send_statistics_proxy.cc1512
-rw-r--r--third_party/libwebrtc/video/send_statistics_proxy.h380
-rw-r--r--third_party/libwebrtc/video/send_statistics_proxy_unittest.cc3123
-rw-r--r--third_party/libwebrtc/video/stats_counter.cc463
-rw-r--r--third_party/libwebrtc/video/stats_counter.h296
-rw-r--r--third_party/libwebrtc/video/stats_counter_unittest.cc602
-rw-r--r--third_party/libwebrtc/video/stream_synchronization.cc195
-rw-r--r--third_party/libwebrtc/video/stream_synchronization.h71
-rw-r--r--third_party/libwebrtc/video/stream_synchronization_unittest.cc532
-rw-r--r--third_party/libwebrtc/video/sv_loopback.cc719
-rw-r--r--third_party/libwebrtc/video/task_queue_frame_decode_scheduler.cc77
-rw-r--r--third_party/libwebrtc/video/task_queue_frame_decode_scheduler.h48
-rw-r--r--third_party/libwebrtc/video/task_queue_frame_decode_scheduler_gn/moz.build232
-rw-r--r--third_party/libwebrtc/video/task_queue_frame_decode_scheduler_unittest.cc102
-rw-r--r--third_party/libwebrtc/video/test/mock_video_stream_encoder.h76
-rw-r--r--third_party/libwebrtc/video/transport_adapter.cc50
-rw-r--r--third_party/libwebrtc/video/transport_adapter.h43
-rw-r--r--third_party/libwebrtc/video/unique_timestamp_counter.cc41
-rw-r--r--third_party/libwebrtc/video/unique_timestamp_counter.h44
-rw-r--r--third_party/libwebrtc/video/unique_timestamp_counter_gn/moz.build217
-rw-r--r--third_party/libwebrtc/video/unique_timestamp_counter_unittest.cc52
-rw-r--r--third_party/libwebrtc/video/video_analyzer.cc1047
-rw-r--r--third_party/libwebrtc/video/video_analyzer.h322
-rw-r--r--third_party/libwebrtc/video/video_gn/moz.build255
-rw-r--r--third_party/libwebrtc/video/video_loopback.cc456
-rw-r--r--third_party/libwebrtc/video/video_loopback.h19
-rw-r--r--third_party/libwebrtc/video/video_loopback_main.cc15
-rw-r--r--third_party/libwebrtc/video/video_loopback_main.mm17
-rw-r--r--third_party/libwebrtc/video/video_quality_observer2.cc296
-rw-r--r--third_party/libwebrtc/video/video_quality_observer2.h101
-rw-r--r--third_party/libwebrtc/video/video_quality_test.cc1577
-rw-r--r--third_party/libwebrtc/video/video_quality_test.h145
-rw-r--r--third_party/libwebrtc/video/video_receive_stream2.cc1112
-rw-r--r--third_party/libwebrtc/video/video_receive_stream2.h345
-rw-r--r--third_party/libwebrtc/video/video_receive_stream2_unittest.cc1219
-rw-r--r--third_party/libwebrtc/video/video_receive_stream_timeout_tracker.cc98
-rw-r--r--third_party/libwebrtc/video/video_receive_stream_timeout_tracker.h70
-rw-r--r--third_party/libwebrtc/video/video_receive_stream_timeout_tracker_gn/moz.build232
-rw-r--r--third_party/libwebrtc/video/video_receive_stream_timeout_tracker_unittest.cc94
-rw-r--r--third_party/libwebrtc/video/video_send_stream.cc359
-rw-r--r--third_party/libwebrtc/video/video_send_stream.h125
-rw-r--r--third_party/libwebrtc/video/video_send_stream_impl.cc625
-rw-r--r--third_party/libwebrtc/video/video_send_stream_impl.h180
-rw-r--r--third_party/libwebrtc/video/video_send_stream_impl_unittest.cc1036
-rw-r--r--third_party/libwebrtc/video/video_send_stream_tests.cc4289
-rw-r--r--third_party/libwebrtc/video/video_source_sink_controller.cc193
-rw-r--r--third_party/libwebrtc/video/video_source_sink_controller.h102
-rw-r--r--third_party/libwebrtc/video/video_source_sink_controller_unittest.cc199
-rw-r--r--third_party/libwebrtc/video/video_stream_buffer_controller.cc422
-rw-r--r--third_party/libwebrtc/video/video_stream_buffer_controller.h122
-rw-r--r--third_party/libwebrtc/video/video_stream_buffer_controller_gn/moz.build233
-rw-r--r--third_party/libwebrtc/video/video_stream_buffer_controller_unittest.cc922
-rw-r--r--third_party/libwebrtc/video/video_stream_decoder2.cc70
-rw-r--r--third_party/libwebrtc/video/video_stream_decoder2.h61
-rw-r--r--third_party/libwebrtc/video/video_stream_decoder_impl.cc293
-rw-r--r--third_party/libwebrtc/video/video_stream_decoder_impl.h128
-rw-r--r--third_party/libwebrtc/video/video_stream_decoder_impl_unittest.cc221
-rw-r--r--third_party/libwebrtc/video/video_stream_encoder.cc2597
-rw-r--r--third_party/libwebrtc/video/video_stream_encoder.h494
-rw-r--r--third_party/libwebrtc/video/video_stream_encoder_impl_gn/moz.build238
-rw-r--r--third_party/libwebrtc/video/video_stream_encoder_interface.h147
-rw-r--r--third_party/libwebrtc/video/video_stream_encoder_interface_gn/moz.build209
-rw-r--r--third_party/libwebrtc/video/video_stream_encoder_observer.h119
-rw-r--r--third_party/libwebrtc/video/video_stream_encoder_unittest.cc9528
201 files changed, 76285 insertions, 0 deletions
diff --git a/third_party/libwebrtc/video/BUILD.gn b/third_party/libwebrtc/video/BUILD.gn
new file mode 100644
index 0000000000..2c0a411e35
--- /dev/null
+++ b/third_party/libwebrtc/video/BUILD.gn
@@ -0,0 +1,972 @@
+# Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+import("../webrtc.gni")
+
+rtc_library("video_stream_encoder_interface") {
+ sources = [
+ "video_stream_encoder_interface.h",
+ "video_stream_encoder_observer.h",
+ ]
+ deps = [
+ "../api:fec_controller_api",
+ "../api:rtc_error",
+ "../api:rtp_parameters",
+ "../api:rtp_sender_setparameters_callback",
+ "../api:scoped_refptr",
+ "../api/adaptation:resource_adaptation_api",
+ "../api/units:data_rate",
+ "../api/video:video_adaptation",
+ "../api/video:video_bitrate_allocation",
+ "../api/video:video_bitrate_allocator",
+ "../api/video:video_codec_constants",
+ "../api/video:video_frame",
+ "../api/video:video_layers_allocation",
+ "../api/video_codecs:video_codecs_api",
+ "../video/config:encoder_config",
+ ]
+ absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ]
+}
+
+rtc_library("video") {
+ sources = [
+ "buffered_frame_decryptor.cc",
+ "buffered_frame_decryptor.h",
+ "call_stats2.cc",
+ "call_stats2.h",
+ "encoder_rtcp_feedback.cc",
+ "encoder_rtcp_feedback.h",
+ "quality_limitation_reason_tracker.cc",
+ "quality_limitation_reason_tracker.h",
+ "quality_threshold.cc",
+ "quality_threshold.h",
+ "receive_statistics_proxy2.cc",
+ "receive_statistics_proxy2.h",
+ "report_block_stats.cc",
+ "report_block_stats.h",
+ "rtp_streams_synchronizer2.cc",
+ "rtp_streams_synchronizer2.h",
+ "rtp_video_stream_receiver2.cc",
+ "rtp_video_stream_receiver2.h",
+ "rtp_video_stream_receiver_frame_transformer_delegate.cc",
+ "rtp_video_stream_receiver_frame_transformer_delegate.h",
+ "send_delay_stats.cc",
+ "send_delay_stats.h",
+ "send_statistics_proxy.cc",
+ "send_statistics_proxy.h",
+ "stats_counter.cc",
+ "stats_counter.h",
+ "stream_synchronization.cc",
+ "stream_synchronization.h",
+ "transport_adapter.cc",
+ "transport_adapter.h",
+ "video_quality_observer2.cc",
+ "video_quality_observer2.h",
+ "video_receive_stream2.cc",
+ "video_receive_stream2.h",
+ "video_send_stream.cc",
+ "video_send_stream.h",
+ "video_send_stream_impl.cc",
+ "video_send_stream_impl.h",
+ "video_stream_decoder2.cc",
+ "video_stream_decoder2.h",
+ ]
+
+ deps = [
+ ":frame_cadence_adapter",
+ ":frame_dumping_decoder",
+ ":task_queue_frame_decode_scheduler",
+ ":unique_timestamp_counter",
+ ":video_stream_buffer_controller",
+ ":video_stream_encoder_impl",
+ ":video_stream_encoder_interface",
+ "../api:array_view",
+ "../api:fec_controller_api",
+ "../api:field_trials_view",
+ "../api:frame_transformer_interface",
+ "../api:rtp_parameters",
+ "../api:scoped_refptr",
+ "../api:sequence_checker",
+ "../api:transport_api",
+ "../api/crypto:frame_decryptor_interface",
+ "../api/crypto:options",
+ "../api/task_queue",
+ "../api/task_queue:pending_task_safety_flag",
+ "../api/transport:field_trial_based_config",
+ "../api/units:data_rate",
+ "../api/units:frequency",
+ "../api/units:time_delta",
+ "../api/units:timestamp",
+ "../api/video:encoded_image",
+ "../api/video:recordable_encoded_frame",
+ "../api/video:video_bitrate_allocation",
+ "../api/video:video_bitrate_allocator",
+ "../api/video:video_codec_constants",
+ "../api/video:video_frame",
+ "../api/video:video_rtp_headers",
+ "../api/video:video_stream_encoder",
+ "../api/video_codecs:video_codecs_api",
+ "../call:bitrate_allocator",
+ "../call:call_interfaces",
+ "../call:rtp_interfaces",
+ "../call:rtp_receiver",
+ "../call:rtp_sender",
+ "../call:video_stream_api",
+ "../common_video",
+ "../media:media_constants",
+ "../modules:module_api",
+ "../modules:module_api_public",
+ "../modules/pacing",
+ "../modules/remote_bitrate_estimator",
+ "../modules/rtp_rtcp",
+ "../modules/rtp_rtcp:rtp_rtcp_format",
+ "../modules/rtp_rtcp:rtp_video_header",
+ "../modules/utility:utility",
+ "../modules/video_coding",
+ "../modules/video_coding:nack_requester",
+ "../modules/video_coding:packet_buffer",
+ "../modules/video_coding:video_codec_interface",
+ "../modules/video_coding:video_coding_utility",
+ "../modules/video_coding:webrtc_vp9_helpers",
+ "../modules/video_coding/timing:timing_module",
+ "../rtc_base:checks",
+ "../rtc_base:event_tracer",
+ "../rtc_base:histogram_percentile_counter",
+ "../rtc_base:logging",
+ "../rtc_base:macromagic",
+ "../rtc_base:mod_ops",
+ "../rtc_base:moving_max_counter",
+ "../rtc_base:platform_thread",
+ "../rtc_base:rate_statistics",
+ "../rtc_base:rate_tracker",
+ "../rtc_base:rtc_event",
+ "../rtc_base:rtc_numerics",
+ "../rtc_base:rtc_task_queue",
+ "../rtc_base:safe_conversions",
+ "../rtc_base:sample_counter",
+ "../rtc_base:stringutils",
+ "../rtc_base:threading",
+ "../rtc_base:timeutils",
+ "../rtc_base/experiments:alr_experiment",
+ "../rtc_base/experiments:field_trial_parser",
+ "../rtc_base/experiments:keyframe_interval_settings_experiment",
+ "../rtc_base/experiments:min_video_bitrate_experiment",
+ "../rtc_base/experiments:normalize_simulcast_size_experiment",
+ "../rtc_base/experiments:rate_control_settings",
+ "../rtc_base/synchronization:mutex",
+ "../rtc_base/system:no_unique_address",
+ "../rtc_base/task_utils:repeating_task",
+ "../system_wrappers",
+ "../system_wrappers:field_trial",
+ "../system_wrappers:metrics",
+ "../video/config:encoder_config",
+ "adaptation:video_adaptation",
+ "render:incoming_video_stream",
+ ]
+ absl_deps = [
+ "//third_party/abseil-cpp/absl/algorithm:container",
+ "//third_party/abseil-cpp/absl/memory",
+ "//third_party/abseil-cpp/absl/strings",
+ "//third_party/abseil-cpp/absl/types:optional",
+ ]
+
+ if (!build_with_mozilla) {
+ deps += [ "../media:rtc_media_base" ]
+ }
+}
+
+rtc_library("video_stream_decoder_impl") {
+ visibility = [ "*" ]
+
+ sources = [
+ "video_stream_decoder_impl.cc",
+ "video_stream_decoder_impl.h",
+ ]
+
+ deps = [
+ "../api:field_trials_view",
+ "../api:sequence_checker",
+ "../api/task_queue",
+ "../api/transport:field_trial_based_config",
+ "../api/video:encoded_frame",
+ "../api/video:video_frame",
+ "../api/video:video_rtp_headers",
+ "../api/video:video_stream_decoder",
+ "../api/video_codecs:video_codecs_api",
+ "../modules/video_coding",
+ "../modules/video_coding:frame_buffer2",
+ "../modules/video_coding/timing:timing_module",
+ "../rtc_base:logging",
+ "../rtc_base:mod_ops",
+ "../rtc_base:platform_thread",
+ "../rtc_base:rtc_task_queue",
+ "../rtc_base:timeutils",
+ "../rtc_base/memory:always_valid_pointer",
+ "../rtc_base/synchronization:mutex",
+ "../system_wrappers",
+ ]
+ absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ]
+}
+
+rtc_library("frame_dumping_decoder") {
+ visibility = [ "*" ]
+
+ sources = [
+ "frame_dumping_decoder.cc",
+ "frame_dumping_decoder.h",
+ ]
+
+ deps = [
+ "../api/video:encoded_frame",
+ "../api/video:encoded_image",
+ "../api/video_codecs:video_codecs_api",
+ "../modules/video_coding",
+ "../modules/video_coding:video_codec_interface",
+ "../modules/video_coding:video_coding_utility",
+ "../rtc_base/system:file_wrapper",
+ ]
+}
+
+rtc_library("frame_cadence_adapter") {
+ visibility = [ "*" ]
+ sources = [
+ "frame_cadence_adapter.cc",
+ "frame_cadence_adapter.h",
+ ]
+
+ deps = [
+ "../api:field_trials_view",
+ "../api:sequence_checker",
+ "../api/task_queue",
+ "../api/task_queue:pending_task_safety_flag",
+ "../api/units:time_delta",
+ "../api/units:timestamp",
+ "../api/video:video_frame",
+ "../rtc_base:checks",
+ "../rtc_base:logging",
+ "../rtc_base:macromagic",
+ "../rtc_base:race_checker",
+ "../rtc_base:rate_statistics",
+ "../rtc_base:timeutils",
+ "../rtc_base/synchronization:mutex",
+ "../rtc_base/system:no_unique_address",
+ "../rtc_base/task_utils:repeating_task",
+ "../system_wrappers",
+ "../system_wrappers:field_trial",
+ "../system_wrappers:metrics",
+ ]
+ absl_deps = [
+ "//third_party/abseil-cpp/absl/algorithm:container",
+ "//third_party/abseil-cpp/absl/base:core_headers",
+ ]
+}
+
+rtc_library("video_stream_buffer_controller") {
+ sources = [
+ "video_stream_buffer_controller.cc",
+ "video_stream_buffer_controller.h",
+ ]
+ deps = [
+ ":decode_synchronizer",
+ ":frame_decode_scheduler",
+ ":frame_decode_timing",
+ ":task_queue_frame_decode_scheduler",
+ ":video_receive_stream_timeout_tracker",
+ "../api:field_trials_view",
+ "../api:sequence_checker",
+ "../api/metronome",
+ "../api/task_queue",
+ "../api/units:data_size",
+ "../api/video:encoded_frame",
+ "../api/video:frame_buffer",
+ "../api/video:video_rtp_headers",
+ "../modules/video_coding",
+ "../modules/video_coding:frame_helpers",
+ "../modules/video_coding:video_codec_interface",
+ "../modules/video_coding/timing:inter_frame_delay",
+ "../modules/video_coding/timing:jitter_estimator",
+ "../modules/video_coding/timing:timing_module",
+ "../rtc_base:checks",
+ "../rtc_base:logging",
+ "../rtc_base:macromagic",
+ "../rtc_base/experiments:rtt_mult_experiment",
+ "../system_wrappers",
+ "../system_wrappers:field_trial",
+ ]
+ absl_deps = [
+ "//third_party/abseil-cpp/absl/base:core_headers",
+ "//third_party/abseil-cpp/absl/functional:bind_front",
+ "//third_party/abseil-cpp/absl/types:optional",
+ ]
+}
+
+rtc_source_set("frame_decode_scheduler") {
+ sources = [ "frame_decode_scheduler.h" ]
+ deps = [
+ ":frame_decode_timing",
+ "../api/units:timestamp",
+ ]
+ absl_deps = [
+ "//third_party/abseil-cpp/absl/functional:any_invocable",
+ "//third_party/abseil-cpp/absl/types:optional",
+ ]
+}
+
+rtc_library("task_queue_frame_decode_scheduler") {
+ sources = [
+ "task_queue_frame_decode_scheduler.cc",
+ "task_queue_frame_decode_scheduler.h",
+ ]
+ deps = [
+ ":frame_decode_scheduler",
+ ":frame_decode_timing",
+ "../api:sequence_checker",
+ "../api/task_queue",
+ "../api/task_queue:pending_task_safety_flag",
+ "../api/units:timestamp",
+ "../rtc_base:checks",
+ "../system_wrappers",
+ ]
+ absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ]
+}
+
+rtc_library("frame_decode_timing") {
+ sources = [
+ "frame_decode_timing.cc",
+ "frame_decode_timing.h",
+ ]
+ deps = [
+ "../api/task_queue",
+ "../api/task_queue:pending_task_safety_flag",
+ "../api/units:time_delta",
+ "../modules/video_coding/timing:timing_module",
+ "../rtc_base:logging",
+ "../system_wrappers",
+ ]
+ absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ]
+}
+
+rtc_library("video_receive_stream_timeout_tracker") {
+ sources = [
+ "video_receive_stream_timeout_tracker.cc",
+ "video_receive_stream_timeout_tracker.h",
+ ]
+ deps = [
+ "../api/task_queue",
+ "../api/units:time_delta",
+ "../api/units:timestamp",
+ "../rtc_base:logging",
+ "../rtc_base/task_utils:repeating_task",
+ "../system_wrappers",
+ ]
+}
+
+rtc_library("decode_synchronizer") {
+ sources = [
+ "decode_synchronizer.cc",
+ "decode_synchronizer.h",
+ ]
+ deps = [
+ ":frame_decode_scheduler",
+ ":frame_decode_timing",
+ "../api:sequence_checker",
+ "../api/metronome",
+ "../api/task_queue",
+ "../api/units:time_delta",
+ "../api/units:timestamp",
+ "../rtc_base:checks",
+ "../rtc_base:logging",
+ "../rtc_base:macromagic",
+ ]
+ absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ]
+}
+
+rtc_library("video_stream_encoder_impl") {
+ visibility = [ "*" ]
+
+ # visibility = [ "../api/video:video_stream_encoder_create" ]
+ sources = [
+ "alignment_adjuster.cc",
+ "alignment_adjuster.h",
+ "encoder_bitrate_adjuster.cc",
+ "encoder_bitrate_adjuster.h",
+ "encoder_overshoot_detector.cc",
+ "encoder_overshoot_detector.h",
+ "frame_encode_metadata_writer.cc",
+ "frame_encode_metadata_writer.h",
+ "video_source_sink_controller.cc",
+ "video_source_sink_controller.h",
+ "video_stream_encoder.cc",
+ "video_stream_encoder.h",
+ ]
+
+ deps = [
+ ":frame_cadence_adapter",
+ ":video_stream_encoder_interface",
+ "../api:field_trials_view",
+ "../api:rtp_parameters",
+ "../api:rtp_sender_setparameters_callback",
+ "../api:sequence_checker",
+ "../api/adaptation:resource_adaptation_api",
+ "../api/task_queue:pending_task_safety_flag",
+ "../api/task_queue:task_queue",
+ "../api/units:data_rate",
+ "../api/video:encoded_image",
+ "../api/video:render_resolution",
+ "../api/video:video_adaptation",
+ "../api/video:video_bitrate_allocation",
+ "../api/video:video_bitrate_allocator",
+ "../api/video:video_bitrate_allocator_factory",
+ "../api/video:video_codec_constants",
+ "../api/video:video_frame",
+ "../api/video:video_layers_allocation",
+ "../api/video:video_rtp_headers",
+ "../api/video:video_stream_encoder",
+ "../api/video_codecs:video_codecs_api",
+ "../call/adaptation:resource_adaptation",
+ "../common_video",
+ "../media:media_channel",
+ "../media:rtc_media_base",
+ "../modules:module_api_public",
+ "../modules/video_coding",
+ "../modules/video_coding:video_codec_interface",
+ "../modules/video_coding:video_coding_utility",
+ "../modules/video_coding:webrtc_vp9_helpers",
+ "../modules/video_coding/svc:scalability_structures",
+ "../modules/video_coding/svc:svc_rate_allocator",
+ "../rtc_base:checks",
+ "../rtc_base:criticalsection",
+ "../rtc_base:event_tracer",
+ "../rtc_base:logging",
+ "../rtc_base:macromagic",
+ "../rtc_base:race_checker",
+ "../rtc_base:rate_statistics",
+ "../rtc_base:refcount",
+ "../rtc_base:rtc_event",
+ "../rtc_base:rtc_numerics",
+ "../rtc_base:rtc_task_queue",
+ "../rtc_base:safe_conversions",
+ "../rtc_base:stringutils",
+ "../rtc_base:timeutils",
+ "../rtc_base/experiments:alr_experiment",
+ "../rtc_base/experiments:balanced_degradation_settings",
+ "../rtc_base/experiments:encoder_info_settings",
+ "../rtc_base/experiments:field_trial_parser",
+ "../rtc_base/experiments:quality_rampup_experiment",
+ "../rtc_base/experiments:quality_scaler_settings",
+ "../rtc_base/experiments:quality_scaling_experiment",
+ "../rtc_base/experiments:rate_control_settings",
+ "../rtc_base/synchronization:mutex",
+ "../rtc_base/system:no_unique_address",
+ "../rtc_base/task_utils:repeating_task",
+ "../system_wrappers",
+ "../system_wrappers:field_trial",
+ "../system_wrappers:metrics",
+ "adaptation:video_adaptation",
+ "config:encoder_config",
+ "config:streams_config",
+ ]
+ absl_deps = [
+ "//third_party/abseil-cpp/absl/algorithm:container",
+ "//third_party/abseil-cpp/absl/base:core_headers",
+ "//third_party/abseil-cpp/absl/cleanup",
+ "//third_party/abseil-cpp/absl/container:inlined_vector",
+ "//third_party/abseil-cpp/absl/types:optional",
+ ]
+}
+
+rtc_library("unique_timestamp_counter") {
+ sources = [
+ "unique_timestamp_counter.cc",
+ "unique_timestamp_counter.h",
+ ]
+}
+
+if (rtc_include_tests) {
+ rtc_library("video_mocks") {
+ testonly = true
+ sources = [ "test/mock_video_stream_encoder.h" ]
+ deps = [
+ ":video_stream_encoder_interface",
+ "../api/video:video_stream_encoder",
+ "../test:test_support",
+ ]
+ }
+ if (!build_with_chromium) {
+ rtc_library("video_quality_test") {
+ testonly = true
+
+ # Only targets in this file and api/ can depend on this.
+ visibility = [
+ ":*",
+ "../api:create_video_quality_test_fixture_api",
+ ]
+ sources = [
+ "video_analyzer.cc",
+ "video_analyzer.h",
+ "video_quality_test.cc",
+ "video_quality_test.h",
+ ]
+ deps = [
+ ":frame_dumping_decoder",
+ "../api:create_frame_generator",
+ "../api:fec_controller_api",
+ "../api:frame_generator_api",
+ "../api:libjingle_peerconnection_api",
+ "../api:rtc_event_log_output_file",
+ "../api:test_dependency_factory",
+ "../api:video_quality_test_fixture_api",
+ "../api/numerics",
+ "../api/rtc_event_log:rtc_event_log_factory",
+ "../api/task_queue",
+ "../api/task_queue:default_task_queue_factory",
+ "../api/test/metrics:global_metrics_logger_and_exporter",
+ "../api/test/metrics:metric",
+ "../api/video:builtin_video_bitrate_allocator_factory",
+ "../api/video:video_bitrate_allocator_factory",
+ "../api/video:video_frame",
+ "../api/video:video_rtp_headers",
+ "../api/video_codecs:video_codecs_api",
+ "../call:fake_network",
+ "../call:simulated_network",
+ "../common_video",
+ "../media:media_constants",
+ "../media:rtc_audio_video",
+ "../media:rtc_encoder_simulcast_proxy",
+ "../media:rtc_internal_video_codecs",
+ "../media:rtc_media_base",
+ "../modules/audio_device:audio_device_api",
+ "../modules/audio_device:audio_device_module_from_input_and_output",
+ "../modules/audio_device:windows_core_audio_utility",
+ "../modules/audio_mixer:audio_mixer_impl",
+ "../modules/rtp_rtcp",
+ "../modules/rtp_rtcp:rtp_rtcp_format",
+ "../modules/video_coding",
+ "../modules/video_coding:video_coding_utility",
+ "../modules/video_coding:webrtc_h264",
+ "../modules/video_coding:webrtc_multiplex",
+ "../modules/video_coding:webrtc_vp8",
+ "../modules/video_coding:webrtc_vp9",
+ "../rtc_base:macromagic",
+ "../rtc_base:platform_thread",
+ "../rtc_base:rtc_base_tests_utils",
+ "../rtc_base:rtc_event",
+ "../rtc_base:rtc_numerics",
+ "../rtc_base:stringutils",
+ "../rtc_base:task_queue_for_test",
+ "../rtc_base:timeutils",
+ "../rtc_base/synchronization:mutex",
+ "../rtc_base/task_utils:repeating_task",
+ "../system_wrappers",
+ "../test:fake_video_codecs",
+ "../test:fileutils",
+ "../test:platform_video_capturer",
+ "../test:rtp_test_utils",
+ "../test:test_common",
+ "../test:test_renderer",
+ "../test:test_support",
+ "../test:test_support_test_artifacts",
+ "../test:video_test_common",
+ "../test:video_test_support",
+ "config:streams_config",
+ ]
+ absl_deps = [
+ "//third_party/abseil-cpp/absl/algorithm:container",
+ "//third_party/abseil-cpp/absl/flags:flag",
+ "//third_party/abseil-cpp/absl/flags:parse",
+ "//third_party/abseil-cpp/absl/strings",
+ ]
+
+ if (is_mac || is_ios) {
+ deps += [ "../test:video_test_mac" ]
+ }
+ if (is_win) {
+ deps += [ "../rtc_base/win:scoped_com_initializer" ]
+ }
+ }
+
+ rtc_library("video_full_stack_tests") {
+ testonly = true
+
+ sources = [ "full_stack_tests.cc" ]
+ deps = [
+ ":video_quality_test",
+ "../api:simulated_network_api",
+ "../api:test_dependency_factory",
+ "../api:video_quality_test_fixture_api",
+ "../api/video_codecs:video_codecs_api",
+ "../modules/pacing",
+ "../modules/video_coding:webrtc_vp9",
+ "../rtc_base/experiments:alr_experiment",
+ "../system_wrappers:field_trial",
+ "../test:field_trial",
+ "../test:fileutils",
+ "../test:test_common",
+ "../test:test_support",
+ "../video/config:encoder_config",
+ "//testing/gtest",
+ ]
+ absl_deps = [
+ "//third_party/abseil-cpp/absl/flags:flag",
+ "//third_party/abseil-cpp/absl/flags:parse",
+ "//third_party/abseil-cpp/absl/types:optional",
+ ]
+ }
+
+ rtc_library("video_pc_full_stack_tests") {
+ testonly = true
+
+ sources = [ "pc_full_stack_tests.cc" ]
+ deps = [
+ "../api:create_network_emulation_manager",
+ "../api:create_peer_connection_quality_test_frame_generator",
+ "../api:create_peerconnection_quality_test_fixture",
+ "../api:frame_generator_api",
+ "../api:media_stream_interface",
+ "../api:network_emulation_manager_api",
+ "../api:peer_connection_quality_test_fixture_api",
+ "../api:simulated_network_api",
+ "../api:time_controller",
+ "../api/test/metrics:global_metrics_logger_and_exporter",
+ "../api/test/pclf:media_configuration",
+ "../api/test/pclf:media_quality_test_params",
+ "../api/test/pclf:peer_configurer",
+ "../api/video_codecs:video_codecs_api",
+ "../call:simulated_network",
+ "../modules/video_coding:webrtc_vp9",
+ "../system_wrappers:field_trial",
+ "../test:field_trial",
+ "../test:fileutils",
+ "../test:test_support",
+ "../test/pc/e2e:network_quality_metrics_reporter",
+ ]
+ }
+
+ rtc_library("video_loopback_lib") {
+ testonly = true
+ sources = [
+ "video_loopback.cc",
+ "video_loopback.h",
+ ]
+ deps = [
+ ":video_quality_test",
+ "../api:libjingle_peerconnection_api",
+ "../api:simulated_network_api",
+ "../api:video_quality_test_fixture_api",
+ "../api/transport:bitrate_settings",
+ "../api/video_codecs:video_codecs_api",
+ "../rtc_base:checks",
+ "../rtc_base:logging",
+ "../system_wrappers:field_trial",
+ "../test:field_trial",
+ "../test:run_test",
+ "../test:run_test_interface",
+ "../test:test_common",
+ "../test:test_renderer",
+ "../test:test_support",
+ "//testing/gtest",
+ ]
+ absl_deps = [
+ "//third_party/abseil-cpp/absl/flags:flag",
+ "//third_party/abseil-cpp/absl/flags:parse",
+ "//third_party/abseil-cpp/absl/types:optional",
+ ]
+ }
+
+ if (is_mac) {
+ mac_app_bundle("video_loopback") {
+ testonly = true
+ sources = [ "video_loopback_main.mm" ]
+ info_plist = "../test/mac/Info.plist"
+ deps = [ ":video_loopback_lib" ]
+ }
+ } else {
+ rtc_executable("video_loopback") {
+ testonly = true
+ sources = [ "video_loopback_main.cc" ]
+ deps = [ ":video_loopback_lib" ]
+ }
+ }
+
+ rtc_executable("screenshare_loopback") {
+ testonly = true
+ sources = [ "screenshare_loopback.cc" ]
+
+ deps = [
+ ":video_quality_test",
+ "../api:libjingle_peerconnection_api",
+ "../api:simulated_network_api",
+ "../api:video_quality_test_fixture_api",
+ "../api/transport:bitrate_settings",
+ "../api/video_codecs:video_codecs_api",
+ "../rtc_base:checks",
+ "../rtc_base:logging",
+ "../rtc_base:stringutils",
+ "../system_wrappers:field_trial",
+ "../test:field_trial",
+ "../test:run_test",
+ "../test:run_test_interface",
+ "../test:test_common",
+ "../test:test_renderer",
+ "../test:test_support",
+ "//third_party/abseil-cpp/absl/flags:flag",
+ "//third_party/abseil-cpp/absl/flags:parse",
+ "//third_party/abseil-cpp/absl/types:optional",
+ ]
+ }
+
+ rtc_executable("sv_loopback") {
+ testonly = true
+ sources = [ "sv_loopback.cc" ]
+ deps = [
+ ":video_quality_test",
+ "../api:libjingle_peerconnection_api",
+ "../api:simulated_network_api",
+ "../api:video_quality_test_fixture_api",
+ "../api/transport:bitrate_settings",
+ "../api/video_codecs:video_codecs_api",
+ "../rtc_base:checks",
+ "../rtc_base:logging",
+ "../rtc_base:stringutils",
+ "../system_wrappers:field_trial",
+ "../test:field_trial",
+ "../test:run_test",
+ "../test:run_test_interface",
+ "../test:test_common",
+ "../test:test_renderer",
+ "../test:test_support",
+ "//testing/gtest",
+ "//third_party/abseil-cpp/absl/flags:flag",
+ "//third_party/abseil-cpp/absl/flags:parse",
+ "//third_party/abseil-cpp/absl/types:optional",
+ ]
+ }
+ }
+
+ # TODO(pbos): Rename test suite.
+ rtc_library("video_tests") {
+ testonly = true
+
+ defines = []
+ sources = [
+ "alignment_adjuster_unittest.cc",
+ "buffered_frame_decryptor_unittest.cc",
+ "call_stats2_unittest.cc",
+ "cpu_scaling_tests.cc",
+ "decode_synchronizer_unittest.cc",
+ "encoder_bitrate_adjuster_unittest.cc",
+ "encoder_overshoot_detector_unittest.cc",
+ "encoder_rtcp_feedback_unittest.cc",
+ "end_to_end_tests/bandwidth_tests.cc",
+ "end_to_end_tests/call_operation_tests.cc",
+ "end_to_end_tests/codec_tests.cc",
+ "end_to_end_tests/config_tests.cc",
+ "end_to_end_tests/extended_reports_tests.cc",
+ "end_to_end_tests/fec_tests.cc",
+ "end_to_end_tests/frame_encryption_tests.cc",
+ "end_to_end_tests/histogram_tests.cc",
+ "end_to_end_tests/multi_codec_receive_tests.cc",
+ "end_to_end_tests/multi_stream_tester.cc",
+ "end_to_end_tests/multi_stream_tester.h",
+ "end_to_end_tests/multi_stream_tests.cc",
+ "end_to_end_tests/network_state_tests.cc",
+ "end_to_end_tests/resolution_bitrate_limits_tests.cc",
+ "end_to_end_tests/retransmission_tests.cc",
+ "end_to_end_tests/rtp_rtcp_tests.cc",
+ "end_to_end_tests/ssrc_tests.cc",
+ "end_to_end_tests/stats_tests.cc",
+ "end_to_end_tests/transport_feedback_tests.cc",
+ "frame_cadence_adapter_unittest.cc",
+ "frame_decode_timing_unittest.cc",
+ "frame_encode_metadata_writer_unittest.cc",
+ "picture_id_tests.cc",
+ "quality_limitation_reason_tracker_unittest.cc",
+ "quality_scaling_tests.cc",
+ "quality_threshold_unittest.cc",
+ "receive_statistics_proxy2_unittest.cc",
+ "report_block_stats_unittest.cc",
+ "rtp_video_stream_receiver2_unittest.cc",
+ "rtp_video_stream_receiver_frame_transformer_delegate_unittest.cc",
+ "send_delay_stats_unittest.cc",
+ "send_statistics_proxy_unittest.cc",
+ "stats_counter_unittest.cc",
+ "stream_synchronization_unittest.cc",
+ "task_queue_frame_decode_scheduler_unittest.cc",
+ "unique_timestamp_counter_unittest.cc",
+ "video_receive_stream2_unittest.cc",
+ "video_receive_stream_timeout_tracker_unittest.cc",
+ "video_send_stream_impl_unittest.cc",
+ "video_send_stream_tests.cc",
+ "video_source_sink_controller_unittest.cc",
+ "video_stream_buffer_controller_unittest.cc",
+ "video_stream_decoder_impl_unittest.cc",
+ "video_stream_encoder_unittest.cc",
+ ]
+ deps = [
+ ":decode_synchronizer",
+ ":frame_cadence_adapter",
+ ":frame_decode_scheduler",
+ ":frame_decode_timing",
+ ":task_queue_frame_decode_scheduler",
+ ":unique_timestamp_counter",
+ ":video",
+ ":video_mocks",
+ ":video_receive_stream_timeout_tracker",
+ ":video_stream_buffer_controller",
+ ":video_stream_decoder_impl",
+ ":video_stream_encoder_impl",
+ ":video_stream_encoder_interface",
+ "../api:create_frame_generator",
+ "../api:fake_frame_decryptor",
+ "../api:fake_frame_encryptor",
+ "../api:field_trials_view",
+ "../api:frame_generator_api",
+ "../api:libjingle_peerconnection_api",
+ "../api:mock_fec_controller_override",
+ "../api:mock_frame_decryptor",
+ "../api:mock_video_codec_factory",
+ "../api:mock_video_decoder",
+ "../api:mock_video_encoder",
+ "../api:rtp_headers",
+ "../api:rtp_parameters",
+ "../api:scoped_refptr",
+ "../api:sequence_checker",
+ "../api:simulated_network_api",
+ "../api:time_controller",
+ "../api:transport_api",
+ "../api/adaptation:resource_adaptation_api",
+ "../api/crypto:options",
+ "../api/metronome/test:fake_metronome",
+ "../api/rtc_event_log",
+ "../api/task_queue",
+ "../api/task_queue:default_task_queue_factory",
+ "../api/test/metrics:global_metrics_logger_and_exporter",
+ "../api/test/metrics:metric",
+ "../api/test/video:function_video_factory",
+ "../api/transport:field_trial_based_config",
+ "../api/units:data_rate",
+ "../api/units:frequency",
+ "../api/units:time_delta",
+ "../api/units:timestamp",
+ "../api/video:builtin_video_bitrate_allocator_factory",
+ "../api/video:encoded_image",
+ "../api/video:recordable_encoded_frame",
+ "../api/video:video_adaptation",
+ "../api/video:video_bitrate_allocation",
+ "../api/video:video_frame",
+ "../api/video:video_frame_type",
+ "../api/video:video_rtp_headers",
+ "../api/video/test:video_frame_matchers",
+ "../api/video_codecs:scalability_mode",
+ "../api/video_codecs:video_codecs_api",
+ "../api/video_codecs:vp8_temporal_layers_factory",
+ "../call:call_interfaces",
+ "../call:fake_network",
+ "../call:mock_bitrate_allocator",
+ "../call:mock_rtp_interfaces",
+ "../call:rtp_interfaces",
+ "../call:rtp_receiver",
+ "../call:rtp_sender",
+ "../call:simulated_network",
+ "../call:simulated_packet_receiver",
+ "../call:video_stream_api",
+ "../call/adaptation:resource_adaptation",
+ "../call/adaptation:resource_adaptation_test_utilities",
+ "../common_video",
+ "../common_video/test:utilities",
+ "../media:media_constants",
+ "../media:rtc_audio_video",
+ "../media:rtc_internal_video_codecs",
+ "../media:rtc_media",
+ "../media:rtc_media_base",
+ "../media:rtc_media_tests_utils",
+ "../media:rtc_simulcast_encoder_adapter",
+ "../modules:module_api_public",
+ "../modules/pacing",
+ "../modules/rtp_rtcp",
+ "../modules/rtp_rtcp:mock_rtp_rtcp",
+ "../modules/rtp_rtcp:rtp_rtcp_format",
+ "../modules/utility:utility",
+ "../modules/video_coding",
+ "../modules/video_coding:codec_globals_headers",
+ "../modules/video_coding:encoded_frame",
+ "../modules/video_coding:packet_buffer",
+ "../modules/video_coding:video_codec_interface",
+ "../modules/video_coding:video_coding_utility",
+ "../modules/video_coding:webrtc_h264",
+ "../modules/video_coding:webrtc_multiplex",
+ "../modules/video_coding:webrtc_vp8",
+ "../modules/video_coding:webrtc_vp9",
+ "../modules/video_coding:webrtc_vp9_helpers",
+ "../modules/video_coding/codecs/av1:libaom_av1_encoder",
+ "../modules/video_coding/svc:scalability_mode_util",
+ "../modules/video_coding/svc:scalability_structures",
+ "../modules/video_coding/svc:scalable_video_controller",
+ "../modules/video_coding/timing:timing_module",
+ "../rtc_base:byte_buffer",
+ "../rtc_base:checks",
+ "../rtc_base:gunit_helpers",
+ "../rtc_base:logging",
+ "../rtc_base:macromagic",
+ "../rtc_base:platform_thread",
+ "../rtc_base:rate_limiter",
+ "../rtc_base:rate_statistics",
+ "../rtc_base:refcount",
+ "../rtc_base:rtc_base_tests_utils",
+ "../rtc_base:rtc_event",
+ "../rtc_base:rtc_numerics",
+ "../rtc_base:rtc_task_queue",
+ "../rtc_base:safe_conversions",
+ "../rtc_base:stringutils",
+ "../rtc_base:task_queue_for_test",
+ "../rtc_base:threading",
+ "../rtc_base:timeutils",
+ "../rtc_base:unique_id_generator",
+ "../rtc_base/containers:flat_map",
+ "../rtc_base/experiments:alr_experiment",
+ "../rtc_base/experiments:encoder_info_settings",
+ "../rtc_base/synchronization:mutex",
+ "../system_wrappers",
+ "../system_wrappers:field_trial",
+ "../system_wrappers:metrics",
+ "../test:direct_transport",
+ "../test:encoder_settings",
+ "../test:fake_encoded_frame",
+ "../test:fake_video_codecs",
+ "../test:field_trial",
+ "../test:fileutils",
+ "../test:frame_utils",
+ "../test:mock_frame_transformer",
+ "../test:mock_transport",
+ "../test:null_transport",
+ "../test:rtp_test_utils",
+ "../test:run_loop",
+ "../test:scoped_key_value_config",
+ "../test:test_common",
+ "../test:test_support",
+ "../test:video_test_common",
+ "../test/time_controller",
+ "adaptation:video_adaptation",
+ "config:encoder_config",
+ "config:streams_config",
+ "config:video_config_tests",
+ ]
+ absl_deps = [
+ "//third_party/abseil-cpp/absl/algorithm:container",
+ "//third_party/abseil-cpp/absl/functional:any_invocable",
+ "//third_party/abseil-cpp/absl/functional:bind_front",
+ "//third_party/abseil-cpp/absl/memory",
+ "//third_party/abseil-cpp/absl/strings",
+ "//third_party/abseil-cpp/absl/types:optional",
+ "//third_party/abseil-cpp/absl/types:variant",
+ ]
+ if (!build_with_mozilla) {
+ deps += [ "../media:rtc_media_base" ]
+ }
+ }
+}
diff --git a/third_party/libwebrtc/video/DEPS b/third_party/libwebrtc/video/DEPS
new file mode 100644
index 0000000000..939db1bc27
--- /dev/null
+++ b/third_party/libwebrtc/video/DEPS
@@ -0,0 +1,17 @@
+include_rules = [
+ "+call",
+ "+common_video",
+ "+logging/rtc_event_log",
+ "+media/base",
+ "+media/engine",
+ "+modules/audio_device",
+ "+modules/audio_mixer",
+ "+modules/bitrate_controller",
+ "+modules/congestion_controller",
+ "+modules/pacing",
+ "+modules/remote_bitrate_estimator",
+ "+modules/rtp_rtcp",
+ "+modules/utility",
+ "+modules/video_coding",
+ "+system_wrappers",
+]
diff --git a/third_party/libwebrtc/video/OWNERS b/third_party/libwebrtc/video/OWNERS
new file mode 100644
index 0000000000..2206a59a18
--- /dev/null
+++ b/third_party/libwebrtc/video/OWNERS
@@ -0,0 +1,6 @@
+asapersson@webrtc.org
+ilnik@webrtc.org
+mflodman@webrtc.org
+philipel@webrtc.org
+sprang@webrtc.org
+stefan@webrtc.org
diff --git a/third_party/libwebrtc/video/adaptation/BUILD.gn b/third_party/libwebrtc/video/adaptation/BUILD.gn
new file mode 100644
index 0000000000..d206909853
--- /dev/null
+++ b/third_party/libwebrtc/video/adaptation/BUILD.gn
@@ -0,0 +1,125 @@
+# Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+import("../../webrtc.gni")
+
+rtc_library("video_adaptation") {
+ sources = [
+ "balanced_constraint.cc",
+ "balanced_constraint.h",
+ "bandwidth_quality_scaler_resource.cc",
+ "bandwidth_quality_scaler_resource.h",
+ "bitrate_constraint.cc",
+ "bitrate_constraint.h",
+ "encode_usage_resource.cc",
+ "encode_usage_resource.h",
+ "overuse_frame_detector.cc",
+ "overuse_frame_detector.h",
+ "pixel_limit_resource.cc",
+ "pixel_limit_resource.h",
+ "quality_rampup_experiment_helper.cc",
+ "quality_rampup_experiment_helper.h",
+ "quality_scaler_resource.cc",
+ "quality_scaler_resource.h",
+ "video_stream_encoder_resource.cc",
+ "video_stream_encoder_resource.h",
+ "video_stream_encoder_resource_manager.cc",
+ "video_stream_encoder_resource_manager.h",
+ ]
+
+ deps = [
+ "../../api:field_trials_view",
+ "../../api:rtp_parameters",
+ "../../api:scoped_refptr",
+ "../../api:sequence_checker",
+ "../../api/adaptation:resource_adaptation_api",
+ "../../api/task_queue:task_queue",
+ "../../api/units:data_rate",
+ "../../api/units:time_delta",
+ "../../api/video:video_adaptation",
+ "../../api/video:video_frame",
+ "../../api/video:video_stream_encoder",
+ "../../api/video_codecs:video_codecs_api",
+ "../../call/adaptation:resource_adaptation",
+ "../../modules/video_coding:video_coding_utility",
+ "../../modules/video_coding/svc:scalability_mode_util",
+ "../../rtc_base:checks",
+ "../../rtc_base:event_tracer",
+ "../../rtc_base:logging",
+ "../../rtc_base:macromagic",
+ "../../rtc_base:refcount",
+ "../../rtc_base:rtc_event",
+ "../../rtc_base:rtc_numerics",
+ "../../rtc_base:safe_conversions",
+ "../../rtc_base:stringutils",
+ "../../rtc_base:timeutils",
+ "../../rtc_base/experiments:balanced_degradation_settings",
+ "../../rtc_base/experiments:field_trial_parser",
+ "../../rtc_base/experiments:quality_rampup_experiment",
+ "../../rtc_base/experiments:quality_scaler_settings",
+ "../../rtc_base/synchronization:mutex",
+ "../../rtc_base/system:no_unique_address",
+ "../../rtc_base/task_utils:repeating_task",
+ "../../system_wrappers:field_trial",
+ "../../system_wrappers:system_wrappers",
+ "../../video:video_stream_encoder_interface",
+ "../../video/config:encoder_config",
+ ]
+ absl_deps = [
+ "//third_party/abseil-cpp/absl/algorithm:container",
+ "//third_party/abseil-cpp/absl/base:core_headers",
+ "//third_party/abseil-cpp/absl/types:optional",
+ ]
+}
+
+if (rtc_include_tests) {
+ rtc_library("video_adaptation_tests") {
+ testonly = true
+
+ defines = []
+ sources = [
+ "bitrate_constraint_unittest.cc",
+ "overuse_frame_detector_unittest.cc",
+ "pixel_limit_resource_unittest.cc",
+ "quality_scaler_resource_unittest.cc",
+ ]
+ deps = [
+ ":video_adaptation",
+ "../../api:field_trials_view",
+ "../../api:scoped_refptr",
+ "../../api/task_queue:task_queue",
+ "../../api/units:time_delta",
+ "../../api/units:timestamp",
+ "../../api/video:encoded_image",
+ "../../api/video:video_adaptation",
+ "../../api/video:video_frame",
+ "../../api/video_codecs:scalability_mode",
+ "../../api/video_codecs:video_codecs_api",
+ "../../call/adaptation:resource_adaptation",
+ "../../call/adaptation:resource_adaptation_test_utilities",
+ "../../modules/video_coding:video_coding_utility",
+ "../../rtc_base:checks",
+ "../../rtc_base:logging",
+ "../../rtc_base:random",
+ "../../rtc_base:rtc_base_tests_utils",
+ "../../rtc_base:rtc_event",
+ "../../rtc_base:rtc_numerics",
+ "../../rtc_base:task_queue_for_test",
+ "../../rtc_base:threading",
+ "../../test:field_trial",
+ "../../test:rtc_expect_death",
+ "../../test:scoped_key_value_config",
+ "../../test:test_support",
+ "../../test/time_controller:time_controller",
+ ]
+ absl_deps = [
+ "//third_party/abseil-cpp/absl/functional:any_invocable",
+ "//third_party/abseil-cpp/absl/types:optional",
+ ]
+ }
+}
diff --git a/third_party/libwebrtc/video/adaptation/OWNERS b/third_party/libwebrtc/video/adaptation/OWNERS
new file mode 100644
index 0000000000..bd56595d2e
--- /dev/null
+++ b/third_party/libwebrtc/video/adaptation/OWNERS
@@ -0,0 +1,3 @@
+eshr@webrtc.org
+hbos@webrtc.org
+ilnik@webrtc.org
diff --git a/third_party/libwebrtc/video/adaptation/balanced_constraint.cc b/third_party/libwebrtc/video/adaptation/balanced_constraint.cc
new file mode 100644
index 0000000000..f9ee08ac87
--- /dev/null
+++ b/third_party/libwebrtc/video/adaptation/balanced_constraint.cc
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2020 The WebRTC Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video/adaptation/balanced_constraint.h"
+
+#include <string>
+#include <utility>
+
+#include "api/sequence_checker.h"
+
+namespace webrtc {
+
+BalancedConstraint::BalancedConstraint(
+ DegradationPreferenceProvider* degradation_preference_provider,
+ const FieldTrialsView& field_trials)
+ : encoder_target_bitrate_bps_(absl::nullopt),
+ balanced_settings_(field_trials),
+ degradation_preference_provider_(degradation_preference_provider) {
+ RTC_DCHECK(degradation_preference_provider_);
+ sequence_checker_.Detach();
+}
+
+void BalancedConstraint::OnEncoderTargetBitrateUpdated(
+ absl::optional<uint32_t> encoder_target_bitrate_bps) {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+ encoder_target_bitrate_bps_ = std::move(encoder_target_bitrate_bps);
+}
+
+bool BalancedConstraint::IsAdaptationUpAllowed(
+ const VideoStreamInputState& input_state,
+ const VideoSourceRestrictions& restrictions_before,
+ const VideoSourceRestrictions& restrictions_after) const {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+ // Don't adapt if BalancedDegradationSettings applies and determines this will
+ // exceed bitrate constraints.
+ if (degradation_preference_provider_->degradation_preference() ==
+ DegradationPreference::BALANCED) {
+ int frame_size_pixels = input_state.single_active_stream_pixels().value_or(
+ input_state.frame_size_pixels().value());
+ if (!balanced_settings_.CanAdaptUp(
+ input_state.video_codec_type(), frame_size_pixels,
+ encoder_target_bitrate_bps_.value_or(0))) {
+ return false;
+ }
+ if (DidIncreaseResolution(restrictions_before, restrictions_after) &&
+ !balanced_settings_.CanAdaptUpResolution(
+ input_state.video_codec_type(), frame_size_pixels,
+ encoder_target_bitrate_bps_.value_or(0))) {
+ return false;
+ }
+ }
+ return true;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/video/adaptation/balanced_constraint.h b/third_party/libwebrtc/video/adaptation/balanced_constraint.h
new file mode 100644
index 0000000000..22c7d2923c
--- /dev/null
+++ b/third_party/libwebrtc/video/adaptation/balanced_constraint.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright 2020 The WebRTC Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VIDEO_ADAPTATION_BALANCED_CONSTRAINT_H_
+#define VIDEO_ADAPTATION_BALANCED_CONSTRAINT_H_
+
+#include <string>
+
+#include "absl/types/optional.h"
+#include "api/field_trials_view.h"
+#include "api/sequence_checker.h"
+#include "call/adaptation/adaptation_constraint.h"
+#include "call/adaptation/degradation_preference_provider.h"
+#include "rtc_base/experiments/balanced_degradation_settings.h"
+#include "rtc_base/system/no_unique_address.h"
+
+namespace webrtc {
+
+class BalancedConstraint : public AdaptationConstraint {
+ public:
+ BalancedConstraint(
+ DegradationPreferenceProvider* degradation_preference_provider,
+ const FieldTrialsView& field_trials);
+ ~BalancedConstraint() override = default;
+
+ void OnEncoderTargetBitrateUpdated(
+ absl::optional<uint32_t> encoder_target_bitrate_bps);
+
+ // AdaptationConstraint implementation.
+ std::string Name() const override { return "BalancedConstraint"; }
+ bool IsAdaptationUpAllowed(
+ const VideoStreamInputState& input_state,
+ const VideoSourceRestrictions& restrictions_before,
+ const VideoSourceRestrictions& restrictions_after) const override;
+
+ private:
+ RTC_NO_UNIQUE_ADDRESS SequenceChecker sequence_checker_;
+ absl::optional<uint32_t> encoder_target_bitrate_bps_
+ RTC_GUARDED_BY(&sequence_checker_);
+ const BalancedDegradationSettings balanced_settings_;
+ const DegradationPreferenceProvider* degradation_preference_provider_;
+};
+
+} // namespace webrtc
+
+#endif // VIDEO_ADAPTATION_BALANCED_CONSTRAINT_H_
diff --git a/third_party/libwebrtc/video/adaptation/bandwidth_quality_scaler_resource.cc b/third_party/libwebrtc/video/adaptation/bandwidth_quality_scaler_resource.cc
new file mode 100644
index 0000000000..485019f309
--- /dev/null
+++ b/third_party/libwebrtc/video/adaptation/bandwidth_quality_scaler_resource.cc
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video/adaptation/bandwidth_quality_scaler_resource.h"
+
+#include <utility>
+
+#include "rtc_base/checks.h"
+#include "rtc_base/experiments/balanced_degradation_settings.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/time_utils.h"
+
+namespace webrtc {
+
+// static
+rtc::scoped_refptr<BandwidthQualityScalerResource>
+BandwidthQualityScalerResource::Create() {
+ return rtc::make_ref_counted<BandwidthQualityScalerResource>();
+}
+
+BandwidthQualityScalerResource::BandwidthQualityScalerResource()
+ : VideoStreamEncoderResource("BandwidthQualityScalerResource"),
+ bandwidth_quality_scaler_(nullptr) {}
+
+BandwidthQualityScalerResource::~BandwidthQualityScalerResource() {
+ RTC_DCHECK(!bandwidth_quality_scaler_);
+}
+
+bool BandwidthQualityScalerResource::is_started() const {
+ RTC_DCHECK_RUN_ON(encoder_queue());
+ return bandwidth_quality_scaler_.get();
+}
+
+void BandwidthQualityScalerResource::StartCheckForOveruse(
+ const std::vector<VideoEncoder::ResolutionBitrateLimits>&
+ resolution_bitrate_limits) {
+ RTC_DCHECK_RUN_ON(encoder_queue());
+ RTC_DCHECK(!is_started());
+ bandwidth_quality_scaler_ = std::make_unique<BandwidthQualityScaler>(this);
+
+ // If the configuration parameters more than one, we should define and
+ // declare the function BandwidthQualityScaler::Initialize() and call it.
+ bandwidth_quality_scaler_->SetResolutionBitrateLimits(
+ resolution_bitrate_limits);
+}
+
+void BandwidthQualityScalerResource::StopCheckForOveruse() {
+ RTC_DCHECK_RUN_ON(encoder_queue());
+ RTC_DCHECK(is_started());
+ // Ensure we have no pending callbacks. This makes it safe to destroy the
+ // BandwidthQualityScaler and even task queues with tasks in-flight.
+ bandwidth_quality_scaler_.reset();
+}
+
+void BandwidthQualityScalerResource::OnReportUsageBandwidthHigh() {
+ OnResourceUsageStateMeasured(ResourceUsageState::kOveruse);
+}
+
+void BandwidthQualityScalerResource::OnReportUsageBandwidthLow() {
+ OnResourceUsageStateMeasured(ResourceUsageState::kUnderuse);
+}
+
+void BandwidthQualityScalerResource::OnEncodeCompleted(
+ const EncodedImage& encoded_image,
+ int64_t time_sent_in_us,
+ int64_t encoded_image_size_bytes) {
+ RTC_DCHECK_RUN_ON(encoder_queue());
+
+ if (bandwidth_quality_scaler_) {
+ bandwidth_quality_scaler_->ReportEncodeInfo(
+ encoded_image_size_bytes, time_sent_in_us / 1000,
+ encoded_image._encodedWidth, encoded_image._encodedHeight);
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/video/adaptation/bandwidth_quality_scaler_resource.h b/third_party/libwebrtc/video/adaptation/bandwidth_quality_scaler_resource.h
new file mode 100644
index 0000000000..a57c9907a4
--- /dev/null
+++ b/third_party/libwebrtc/video/adaptation/bandwidth_quality_scaler_resource.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VIDEO_ADAPTATION_BANDWIDTH_QUALITY_SCALER_RESOURCE_H_
+#define VIDEO_ADAPTATION_BANDWIDTH_QUALITY_SCALER_RESOURCE_H_
+
+#include <memory>
+#include <queue>
+#include <string>
+#include <vector>
+
+#include "absl/types/optional.h"
+#include "api/scoped_refptr.h"
+#include "api/video/video_adaptation_reason.h"
+#include "api/video_codecs/video_encoder.h"
+#include "call/adaptation/degradation_preference_provider.h"
+#include "call/adaptation/resource_adaptation_processor_interface.h"
+#include "modules/video_coding/utility/bandwidth_quality_scaler.h"
+#include "video/adaptation/video_stream_encoder_resource.h"
+
+namespace webrtc {
+
+// Handles interaction with the BandwidthQualityScaler.
+class BandwidthQualityScalerResource
+ : public VideoStreamEncoderResource,
+ public BandwidthQualityScalerUsageHandlerInterface {
+ public:
+ static rtc::scoped_refptr<BandwidthQualityScalerResource> Create();
+
+ BandwidthQualityScalerResource();
+ ~BandwidthQualityScalerResource() override;
+
+ bool is_started() const;
+
+ void OnEncodeCompleted(const EncodedImage& encoded_image,
+ int64_t time_sent_in_us,
+ int64_t encoded_image_size_bytes);
+
+ void StartCheckForOveruse(
+ const std::vector<VideoEncoder::ResolutionBitrateLimits>&
+ resolution_bitrate_limits);
+ void StopCheckForOveruse();
+
+ // BandwidthScalerQpUsageHandlerInterface implementation.
+ void OnReportUsageBandwidthHigh() override;
+ void OnReportUsageBandwidthLow() override;
+
+ private:
+ std::unique_ptr<BandwidthQualityScaler> bandwidth_quality_scaler_
+ RTC_GUARDED_BY(encoder_queue());
+};
+
+} // namespace webrtc
+
+#endif // VIDEO_ADAPTATION_BANDWIDTH_QUALITY_SCALER_RESOURCE_H_
diff --git a/third_party/libwebrtc/video/adaptation/bitrate_constraint.cc b/third_party/libwebrtc/video/adaptation/bitrate_constraint.cc
new file mode 100644
index 0000000000..bc36723d48
--- /dev/null
+++ b/third_party/libwebrtc/video/adaptation/bitrate_constraint.cc
@@ -0,0 +1,87 @@
+/*
+ * Copyright 2020 The WebRTC Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video/adaptation/bitrate_constraint.h"
+
+#include <utility>
+#include <vector>
+
+#include "api/sequence_checker.h"
+#include "call/adaptation/video_stream_adapter.h"
+#include "video/adaptation/video_stream_encoder_resource_manager.h"
+
+namespace webrtc {
+
+BitrateConstraint::BitrateConstraint()
+ : encoder_settings_(absl::nullopt),
+ encoder_target_bitrate_bps_(absl::nullopt) {
+ sequence_checker_.Detach();
+}
+
+void BitrateConstraint::OnEncoderSettingsUpdated(
+ absl::optional<EncoderSettings> encoder_settings) {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+ encoder_settings_ = std::move(encoder_settings);
+}
+
+void BitrateConstraint::OnEncoderTargetBitrateUpdated(
+ absl::optional<uint32_t> encoder_target_bitrate_bps) {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+ encoder_target_bitrate_bps_ = std::move(encoder_target_bitrate_bps);
+}
+
+// Checks if resolution is allowed to adapt up based on the current bitrate and
+// ResolutionBitrateLimits.min_start_bitrate_bps for the next higher resolution.
+// Bitrate limits usage is restricted to a single active stream/layer (e.g. when
+// quality scaling is enabled).
+bool BitrateConstraint::IsAdaptationUpAllowed(
+ const VideoStreamInputState& input_state,
+ const VideoSourceRestrictions& restrictions_before,
+ const VideoSourceRestrictions& restrictions_after) const {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+ // Make sure bitrate limits are not violated.
+ if (DidIncreaseResolution(restrictions_before, restrictions_after)) {
+ if (!encoder_settings_.has_value()) {
+ return true;
+ }
+
+ uint32_t bitrate_bps = encoder_target_bitrate_bps_.value_or(0);
+ if (bitrate_bps == 0) {
+ return true;
+ }
+
+ if (VideoStreamEncoderResourceManager::IsSimulcastOrMultipleSpatialLayers(
+ encoder_settings_->encoder_config())) {
+ // Resolution bitrate limits usage is restricted to singlecast.
+ return true;
+ }
+
+ absl::optional<int> current_frame_size_px =
+ input_state.single_active_stream_pixels();
+ if (!current_frame_size_px.has_value()) {
+ return true;
+ }
+
+ absl::optional<VideoEncoder::ResolutionBitrateLimits> bitrate_limits =
+ encoder_settings_->encoder_info().GetEncoderBitrateLimitsForResolution(
+ // Need some sort of expected resulting pixels to be used
+ // instead of unrestricted.
+ GetHigherResolutionThan(*current_frame_size_px));
+
+ if (bitrate_limits.has_value()) {
+ RTC_DCHECK_GE(bitrate_limits->frame_size_pixels, *current_frame_size_px);
+ return bitrate_bps >=
+ static_cast<uint32_t>(bitrate_limits->min_start_bitrate_bps);
+ }
+ }
+ return true;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/video/adaptation/bitrate_constraint.h b/third_party/libwebrtc/video/adaptation/bitrate_constraint.h
new file mode 100644
index 0000000000..a608e5db5d
--- /dev/null
+++ b/third_party/libwebrtc/video/adaptation/bitrate_constraint.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright 2020 The WebRTC Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VIDEO_ADAPTATION_BITRATE_CONSTRAINT_H_
+#define VIDEO_ADAPTATION_BITRATE_CONSTRAINT_H_
+
+#include <string>
+
+#include "absl/types/optional.h"
+#include "api/sequence_checker.h"
+#include "call/adaptation/adaptation_constraint.h"
+#include "call/adaptation/encoder_settings.h"
+#include "call/adaptation/video_source_restrictions.h"
+#include "call/adaptation/video_stream_input_state.h"
+#include "rtc_base/system/no_unique_address.h"
+
+namespace webrtc {
+
+class BitrateConstraint : public AdaptationConstraint {
+ public:
+ BitrateConstraint();
+ ~BitrateConstraint() override = default;
+
+ void OnEncoderSettingsUpdated(
+ absl::optional<EncoderSettings> encoder_settings);
+ void OnEncoderTargetBitrateUpdated(
+ absl::optional<uint32_t> encoder_target_bitrate_bps);
+
+ // AdaptationConstraint implementation.
+ std::string Name() const override { return "BitrateConstraint"; }
+ bool IsAdaptationUpAllowed(
+ const VideoStreamInputState& input_state,
+ const VideoSourceRestrictions& restrictions_before,
+ const VideoSourceRestrictions& restrictions_after) const override;
+
+ private:
+ RTC_NO_UNIQUE_ADDRESS SequenceChecker sequence_checker_;
+ absl::optional<EncoderSettings> encoder_settings_
+ RTC_GUARDED_BY(&sequence_checker_);
+ absl::optional<uint32_t> encoder_target_bitrate_bps_
+ RTC_GUARDED_BY(&sequence_checker_);
+};
+
+} // namespace webrtc
+
+#endif // VIDEO_ADAPTATION_BITRATE_CONSTRAINT_H_
diff --git a/third_party/libwebrtc/video/adaptation/bitrate_constraint_unittest.cc b/third_party/libwebrtc/video/adaptation/bitrate_constraint_unittest.cc
new file mode 100644
index 0000000000..f9cb87e3c1
--- /dev/null
+++ b/third_party/libwebrtc/video/adaptation/bitrate_constraint_unittest.cc
@@ -0,0 +1,320 @@
+/*
+ * Copyright 2021 The WebRTC Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video/adaptation/bitrate_constraint.h"
+
+#include <utility>
+#include <vector>
+
+#include "api/video_codecs/scalability_mode.h"
+#include "api/video_codecs/video_encoder.h"
+#include "call/adaptation/encoder_settings.h"
+#include "call/adaptation/test/fake_frame_rate_provider.h"
+#include "call/adaptation/video_source_restrictions.h"
+#include "call/adaptation/video_stream_input_state_provider.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+namespace {
+const VideoSourceRestrictions k180p{/*max_pixels_per_frame=*/320 * 180,
+ /*target_pixels_per_frame=*/320 * 180,
+ /*max_frame_rate=*/30};
+const VideoSourceRestrictions k360p{/*max_pixels_per_frame=*/640 * 360,
+ /*target_pixels_per_frame=*/640 * 360,
+ /*max_frame_rate=*/30};
+const VideoSourceRestrictions k720p{/*max_pixels_per_frame=*/1280 * 720,
+ /*target_pixels_per_frame=*/1280 * 720,
+ /*max_frame_rate=*/30};
+
+struct TestParams {
+ bool active;
+ absl::optional<ScalabilityMode> scalability_mode;
+};
+
+void FillCodecConfig(VideoCodec* video_codec,
+ VideoEncoderConfig* encoder_config,
+ int width_px,
+ int height_px,
+ const std::vector<TestParams>& params,
+ bool svc) {
+ size_t num_layers = params.size();
+ video_codec->codecType = kVideoCodecVP8;
+ video_codec->numberOfSimulcastStreams = num_layers;
+
+ encoder_config->number_of_streams = svc ? 1 : num_layers;
+ encoder_config->simulcast_layers.resize(num_layers);
+
+ for (size_t layer_idx = 0; layer_idx < num_layers; ++layer_idx) {
+ int layer_width_px = width_px >> (num_layers - 1 - layer_idx);
+ int layer_height_px = height_px >> (num_layers - 1 - layer_idx);
+
+ if (params[layer_idx].scalability_mode)
+ video_codec->SetScalabilityMode(*params[layer_idx].scalability_mode);
+ video_codec->simulcastStream[layer_idx].active = params[layer_idx].active;
+ video_codec->simulcastStream[layer_idx].width = layer_width_px;
+ video_codec->simulcastStream[layer_idx].height = layer_height_px;
+
+ encoder_config->simulcast_layers[layer_idx].scalability_mode =
+ params[layer_idx].scalability_mode;
+ encoder_config->simulcast_layers[layer_idx].active =
+ params[layer_idx].active;
+ encoder_config->simulcast_layers[layer_idx].width = layer_width_px;
+ encoder_config->simulcast_layers[layer_idx].height = layer_height_px;
+ }
+}
+
+constexpr int kStartBitrateBps360p = 500000;
+constexpr int kStartBitrateBps720p = 1000000;
+
+VideoEncoder::EncoderInfo MakeEncoderInfo() {
+ VideoEncoder::EncoderInfo encoder_info;
+ encoder_info.resolution_bitrate_limits = {
+ {640 * 360, kStartBitrateBps360p, 0, 5000000},
+ {1280 * 720, kStartBitrateBps720p, 0, 5000000},
+ {1920 * 1080, 2000000, 0, 5000000}};
+ return encoder_info;
+}
+
+} // namespace
+
+class BitrateConstraintTest : public ::testing::Test {
+ public:
+ BitrateConstraintTest()
+ : frame_rate_provider_(), input_state_provider_(&frame_rate_provider_) {}
+
+ protected:
+ void OnEncoderSettingsUpdated(int width_px,
+ int height_px,
+ const std::vector<TestParams>& params,
+ bool svc = false) {
+ VideoCodec video_codec;
+ VideoEncoderConfig encoder_config;
+ FillCodecConfig(&video_codec, &encoder_config, width_px, height_px, params,
+ svc);
+
+ EncoderSettings encoder_settings(MakeEncoderInfo(),
+ std::move(encoder_config), video_codec);
+ bitrate_constraint_.OnEncoderSettingsUpdated(encoder_settings);
+ input_state_provider_.OnEncoderSettingsChanged(encoder_settings);
+ }
+
+ FakeFrameRateProvider frame_rate_provider_;
+ VideoStreamInputStateProvider input_state_provider_;
+ BitrateConstraint bitrate_constraint_;
+};
+
+TEST_F(BitrateConstraintTest, AdaptUpAllowedAtSinglecastIfBitrateIsEnough) {
+ OnEncoderSettingsUpdated(/*width_px=*/640, /*height_px=*/360,
+ {{.active = true}});
+
+ bitrate_constraint_.OnEncoderTargetBitrateUpdated(kStartBitrateBps720p);
+
+ EXPECT_TRUE(bitrate_constraint_.IsAdaptationUpAllowed(
+ input_state_provider_.InputState(),
+ /*restrictions_before=*/k360p,
+ /*restrictions_after=*/k720p));
+}
+
+TEST_F(BitrateConstraintTest,
+ AdaptUpDisallowedAtSinglecastIfBitrateIsNotEnough) {
+ OnEncoderSettingsUpdated(/*width_px=*/640, /*height_px=*/360,
+ {{.active = true}});
+
+ // 1 bps less than needed for 720p.
+ bitrate_constraint_.OnEncoderTargetBitrateUpdated(kStartBitrateBps720p - 1);
+
+ EXPECT_FALSE(bitrate_constraint_.IsAdaptationUpAllowed(
+ input_state_provider_.InputState(),
+ /*restrictions_before=*/k360p,
+ /*restrictions_after=*/k720p));
+}
+
+TEST_F(BitrateConstraintTest,
+ AdaptUpAllowedAtSinglecastIfBitrateIsEnoughForOneSpatialLayer) {
+ OnEncoderSettingsUpdated(
+ /*width_px=*/640, /*height_px=*/360,
+ {{.active = true, .scalability_mode = ScalabilityMode::kL1T1}});
+
+ bitrate_constraint_.OnEncoderTargetBitrateUpdated(kStartBitrateBps720p);
+
+ EXPECT_TRUE(bitrate_constraint_.IsAdaptationUpAllowed(
+ input_state_provider_.InputState(),
+ /*restrictions_before=*/k360p,
+ /*restrictions_after=*/k720p));
+}
+
+TEST_F(BitrateConstraintTest,
+ AdaptUpDisallowedAtSinglecastIfBitrateIsNotEnoughForOneSpatialLayer) {
+ OnEncoderSettingsUpdated(
+ /*width_px=*/640, /*height_px=*/360,
+ {{.active = true, .scalability_mode = ScalabilityMode::kL1T1}});
+
+ // 1 bps less than needed for 720p.
+ bitrate_constraint_.OnEncoderTargetBitrateUpdated(kStartBitrateBps720p - 1);
+
+ EXPECT_FALSE(bitrate_constraint_.IsAdaptationUpAllowed(
+ input_state_provider_.InputState(),
+ /*restrictions_before=*/k360p,
+ /*restrictions_after=*/k720p));
+}
+
+TEST_F(BitrateConstraintTest,
+ AdaptUpAllowedAtSinglecastIfBitrateIsNotEnoughForMultipleSpatialLayers) {
+ OnEncoderSettingsUpdated(
+ /*width_px=*/640, /*height_px=*/360,
+ {{.active = true, .scalability_mode = ScalabilityMode::kL2T1}});
+
+ // 1 bps less than needed for 720p.
+ bitrate_constraint_.OnEncoderTargetBitrateUpdated(kStartBitrateBps720p - 1);
+
+ EXPECT_TRUE(bitrate_constraint_.IsAdaptationUpAllowed(
+ input_state_provider_.InputState(),
+ /*restrictions_before=*/k360p,
+ /*restrictions_after=*/k720p));
+}
+
+TEST_F(BitrateConstraintTest,
+ AdaptUpAllowedAtSinglecastUpperLayerActiveIfBitrateIsEnough) {
+ OnEncoderSettingsUpdated(
+ /*width_px=*/640, /*height_px=*/360,
+ {{.active = false, .scalability_mode = ScalabilityMode::kL2T1},
+ {.active = true}});
+
+ bitrate_constraint_.OnEncoderTargetBitrateUpdated(kStartBitrateBps720p);
+
+ EXPECT_TRUE(bitrate_constraint_.IsAdaptationUpAllowed(
+ input_state_provider_.InputState(),
+ /*restrictions_before=*/k360p,
+ /*restrictions_after=*/k720p));
+}
+
+TEST_F(BitrateConstraintTest,
+ AdaptUpDisallowedAtSinglecastUpperLayerActiveIfBitrateIsNotEnough) {
+ OnEncoderSettingsUpdated(
+ /*width_px=*/640, /*height_px=*/360,
+ {{.active = false, .scalability_mode = ScalabilityMode::kL2T1},
+ {.active = true}});
+
+ // 1 bps less than needed for 720p.
+ bitrate_constraint_.OnEncoderTargetBitrateUpdated(kStartBitrateBps720p - 1);
+
+ EXPECT_FALSE(bitrate_constraint_.IsAdaptationUpAllowed(
+ input_state_provider_.InputState(),
+ /*restrictions_before=*/k360p,
+ /*restrictions_after=*/k720p));
+}
+
+TEST_F(BitrateConstraintTest, AdaptUpAllowedLowestActiveIfBitrateIsNotEnough) {
+ OnEncoderSettingsUpdated(/*width_px=*/640, /*height_px=*/360,
+ {{.active = true}, {.active = false}});
+
+ // 1 bps less than needed for 360p.
+ bitrate_constraint_.OnEncoderTargetBitrateUpdated(kStartBitrateBps360p - 1);
+
+ EXPECT_TRUE(bitrate_constraint_.IsAdaptationUpAllowed(
+ input_state_provider_.InputState(),
+ /*restrictions_before=*/k180p,
+ /*restrictions_after=*/k360p));
+}
+
+TEST_F(BitrateConstraintTest,
+ AdaptUpAllowedLowestActiveIfBitrateIsNotEnoughForOneSpatialLayer) {
+ OnEncoderSettingsUpdated(
+ /*width_px=*/640, /*height_px=*/360,
+ {{.active = true, .scalability_mode = ScalabilityMode::kL1T2},
+ {.active = false}});
+
+ // 1 bps less than needed for 360p.
+ bitrate_constraint_.OnEncoderTargetBitrateUpdated(kStartBitrateBps360p - 1);
+
+ EXPECT_TRUE(bitrate_constraint_.IsAdaptationUpAllowed(
+ input_state_provider_.InputState(),
+ /*restrictions_before=*/k180p,
+ /*restrictions_after=*/k360p));
+}
+
+TEST_F(BitrateConstraintTest,
+ AdaptUpAllowedLowestActiveIfBitrateIsEnoughForOneSpatialLayerSvc) {
+ OnEncoderSettingsUpdated(
+ /*width_px=*/640, /*height_px=*/360,
+ {{.active = true, .scalability_mode = ScalabilityMode::kL1T1},
+ {.active = false}},
+ /*svc=*/true);
+
+ bitrate_constraint_.OnEncoderTargetBitrateUpdated(kStartBitrateBps360p);
+
+ EXPECT_TRUE(bitrate_constraint_.IsAdaptationUpAllowed(
+ input_state_provider_.InputState(),
+ /*restrictions_before=*/k180p,
+ /*restrictions_after=*/k360p));
+}
+
+TEST_F(BitrateConstraintTest,
+ AdaptUpDisallowedLowestActiveIfBitrateIsNotEnoughForOneSpatialLayerSvc) {
+ OnEncoderSettingsUpdated(
+ /*width_px=*/640, /*height_px=*/360,
+ {{.active = true, .scalability_mode = ScalabilityMode::kL1T1},
+ {.active = false}},
+ /*svc=*/true);
+
+ // 1 bps less than needed for 360p.
+ bitrate_constraint_.OnEncoderTargetBitrateUpdated(kStartBitrateBps360p - 1);
+
+ EXPECT_FALSE(bitrate_constraint_.IsAdaptationUpAllowed(
+ input_state_provider_.InputState(),
+ /*restrictions_before=*/k180p,
+ /*restrictions_after=*/k360p));
+}
+
+TEST_F(BitrateConstraintTest,
+ AdaptUpAllowedLowestActiveIfBitrateIsNotEnoughForTwoSpatialLayersSvc) {
+ OnEncoderSettingsUpdated(
+ /*width_px=*/640, /*height_px=*/360,
+ {{.active = true, .scalability_mode = ScalabilityMode::kL2T1},
+ {.active = false}},
+ /*svc=*/true);
+
+ // 1 bps less than needed for 360p.
+ bitrate_constraint_.OnEncoderTargetBitrateUpdated(kStartBitrateBps360p - 1);
+
+ EXPECT_TRUE(bitrate_constraint_.IsAdaptationUpAllowed(
+ input_state_provider_.InputState(),
+ /*restrictions_before=*/k180p,
+ /*restrictions_after=*/k360p));
+}
+
+TEST_F(BitrateConstraintTest, AdaptUpAllowedAtSimulcastIfBitrateIsNotEnough) {
+ OnEncoderSettingsUpdated(/*width_px=*/640, /*height_px=*/360,
+ {{.active = true}, {.active = true}});
+
+ // 1 bps less than needed for 720p.
+ bitrate_constraint_.OnEncoderTargetBitrateUpdated(kStartBitrateBps720p - 1);
+
+ EXPECT_TRUE(bitrate_constraint_.IsAdaptationUpAllowed(
+ input_state_provider_.InputState(),
+ /*restrictions_before=*/k360p,
+ /*restrictions_after=*/k720p));
+}
+
+TEST_F(BitrateConstraintTest,
+ AdaptUpInFpsAllowedAtNoResolutionIncreaseIfBitrateIsNotEnough) {
+ OnEncoderSettingsUpdated(/*width_px=*/640, /*height_px=*/360,
+ {{.active = true}});
+
+ bitrate_constraint_.OnEncoderTargetBitrateUpdated(1);
+
+ EXPECT_TRUE(bitrate_constraint_.IsAdaptationUpAllowed(
+ input_state_provider_.InputState(),
+ /*restrictions_before=*/k360p,
+ /*restrictions_after=*/k360p));
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/video/adaptation/encode_usage_resource.cc b/third_party/libwebrtc/video/adaptation/encode_usage_resource.cc
new file mode 100644
index 0000000000..4a97881b04
--- /dev/null
+++ b/third_party/libwebrtc/video/adaptation/encode_usage_resource.cc
@@ -0,0 +1,105 @@
+/*
+ * Copyright 2020 The WebRTC Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video/adaptation/encode_usage_resource.h"
+
+#include <limits>
+#include <utility>
+
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+// static
+rtc::scoped_refptr<EncodeUsageResource> EncodeUsageResource::Create(
+ std::unique_ptr<OveruseFrameDetector> overuse_detector) {
+ return rtc::make_ref_counted<EncodeUsageResource>(
+ std::move(overuse_detector));
+}
+
+EncodeUsageResource::EncodeUsageResource(
+ std::unique_ptr<OveruseFrameDetector> overuse_detector)
+ : VideoStreamEncoderResource("EncoderUsageResource"),
+ overuse_detector_(std::move(overuse_detector)),
+ is_started_(false),
+ target_frame_rate_(absl::nullopt) {
+ RTC_DCHECK(overuse_detector_);
+}
+
+EncodeUsageResource::~EncodeUsageResource() {}
+
+bool EncodeUsageResource::is_started() const {
+ RTC_DCHECK_RUN_ON(encoder_queue());
+ return is_started_;
+}
+
+void EncodeUsageResource::StartCheckForOveruse(CpuOveruseOptions options) {
+ RTC_DCHECK_RUN_ON(encoder_queue());
+ RTC_DCHECK(!is_started_);
+ overuse_detector_->StartCheckForOveruse(TaskQueueBase::Current(),
+ std::move(options), this);
+ is_started_ = true;
+ overuse_detector_->OnTargetFramerateUpdated(TargetFrameRateAsInt());
+}
+
+void EncodeUsageResource::StopCheckForOveruse() {
+ RTC_DCHECK_RUN_ON(encoder_queue());
+ overuse_detector_->StopCheckForOveruse();
+ is_started_ = false;
+}
+
+void EncodeUsageResource::SetTargetFrameRate(
+ absl::optional<double> target_frame_rate) {
+ RTC_DCHECK_RUN_ON(encoder_queue());
+ if (target_frame_rate == target_frame_rate_)
+ return;
+ target_frame_rate_ = target_frame_rate;
+ if (is_started_)
+ overuse_detector_->OnTargetFramerateUpdated(TargetFrameRateAsInt());
+}
+
+void EncodeUsageResource::OnEncodeStarted(const VideoFrame& cropped_frame,
+ int64_t time_when_first_seen_us) {
+ RTC_DCHECK_RUN_ON(encoder_queue());
+ // TODO(hbos): Rename FrameCaptured() to something more appropriate (e.g.
+ // "OnEncodeStarted"?) or revise usage.
+ overuse_detector_->FrameCaptured(cropped_frame, time_when_first_seen_us);
+}
+
+void EncodeUsageResource::OnEncodeCompleted(
+ uint32_t timestamp,
+ int64_t time_sent_in_us,
+ int64_t capture_time_us,
+ absl::optional<int> encode_duration_us) {
+ RTC_DCHECK_RUN_ON(encoder_queue());
+ // TODO(hbos): Rename FrameSent() to something more appropriate (e.g.
+ // "OnEncodeCompleted"?).
+ overuse_detector_->FrameSent(timestamp, time_sent_in_us, capture_time_us,
+ encode_duration_us);
+}
+
+void EncodeUsageResource::AdaptUp() {
+ RTC_DCHECK_RUN_ON(encoder_queue());
+ OnResourceUsageStateMeasured(ResourceUsageState::kUnderuse);
+}
+
+void EncodeUsageResource::AdaptDown() {
+ RTC_DCHECK_RUN_ON(encoder_queue());
+ OnResourceUsageStateMeasured(ResourceUsageState::kOveruse);
+}
+
+int EncodeUsageResource::TargetFrameRateAsInt() {
+ RTC_DCHECK_RUN_ON(encoder_queue());
+ return target_frame_rate_.has_value()
+ ? static_cast<int>(target_frame_rate_.value())
+ : std::numeric_limits<int>::max();
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/video/adaptation/encode_usage_resource.h b/third_party/libwebrtc/video/adaptation/encode_usage_resource.h
new file mode 100644
index 0000000000..c391132e57
--- /dev/null
+++ b/third_party/libwebrtc/video/adaptation/encode_usage_resource.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright 2020 The WebRTC Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VIDEO_ADAPTATION_ENCODE_USAGE_RESOURCE_H_
+#define VIDEO_ADAPTATION_ENCODE_USAGE_RESOURCE_H_
+
+#include <memory>
+#include <string>
+
+#include "absl/types/optional.h"
+#include "api/scoped_refptr.h"
+#include "api/video/video_adaptation_reason.h"
+#include "video/adaptation/overuse_frame_detector.h"
+#include "video/adaptation/video_stream_encoder_resource.h"
+
+namespace webrtc {
+
+// Handles interaction with the OveruseDetector.
+// TODO(hbos): Add unittests specific to this class, it is currently only tested
+// indirectly by usage in the ResourceAdaptationProcessor (which is only tested
+// because of its usage in VideoStreamEncoder); all tests are currently in
+// video_stream_encoder_unittest.cc.
+class EncodeUsageResource : public VideoStreamEncoderResource,
+ public OveruseFrameDetectorObserverInterface {
+ public:
+ static rtc::scoped_refptr<EncodeUsageResource> Create(
+ std::unique_ptr<OveruseFrameDetector> overuse_detector);
+
+ explicit EncodeUsageResource(
+ std::unique_ptr<OveruseFrameDetector> overuse_detector);
+ ~EncodeUsageResource() override;
+
+ bool is_started() const;
+
+ void StartCheckForOveruse(CpuOveruseOptions options);
+ void StopCheckForOveruse();
+
+ void SetTargetFrameRate(absl::optional<double> target_frame_rate);
+ void OnEncodeStarted(const VideoFrame& cropped_frame,
+ int64_t time_when_first_seen_us);
+ void OnEncodeCompleted(uint32_t timestamp,
+ int64_t time_sent_in_us,
+ int64_t capture_time_us,
+ absl::optional<int> encode_duration_us);
+
+ // OveruseFrameDetectorObserverInterface implementation.
+ void AdaptUp() override;
+ void AdaptDown() override;
+
+ private:
+ int TargetFrameRateAsInt();
+
+ const std::unique_ptr<OveruseFrameDetector> overuse_detector_
+ RTC_GUARDED_BY(encoder_queue());
+ bool is_started_ RTC_GUARDED_BY(encoder_queue());
+ absl::optional<double> target_frame_rate_ RTC_GUARDED_BY(encoder_queue());
+};
+
+} // namespace webrtc
+
+#endif // VIDEO_ADAPTATION_ENCODE_USAGE_RESOURCE_H_
diff --git a/third_party/libwebrtc/video/adaptation/overuse_frame_detector.cc b/third_party/libwebrtc/video/adaptation/overuse_frame_detector.cc
new file mode 100644
index 0000000000..9836a466b5
--- /dev/null
+++ b/third_party/libwebrtc/video/adaptation/overuse_frame_detector.cc
@@ -0,0 +1,722 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video/adaptation/overuse_frame_detector.h"
+
+#include <math.h>
+#include <stdio.h>
+
+#include <algorithm>
+#include <list>
+#include <map>
+#include <memory>
+#include <string>
+#include <utility>
+
+#include "api/video/video_frame.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/numerics/exp_filter.h"
+#include "rtc_base/time_utils.h"
+#include "system_wrappers/include/field_trial.h"
+
+#if defined(WEBRTC_MAC) && !defined(WEBRTC_IOS)
+#include <mach/mach.h>
+#endif // defined(WEBRTC_MAC) && !defined(WEBRTC_IOS)
+
+namespace webrtc {
+
+namespace {
+const int64_t kCheckForOveruseIntervalMs = 5000;
+const int64_t kTimeToFirstCheckForOveruseMs = 100;
+
+// Delay between consecutive rampups. (Used for quick recovery.)
+const int kQuickRampUpDelayMs = 10 * 1000;
+// Delay between rampup attempts. Initially uses standard, scales up to max.
+const int kStandardRampUpDelayMs = 40 * 1000;
+const int kMaxRampUpDelayMs = 240 * 1000;
+// Expontential back-off factor, to prevent annoying up-down behaviour.
+const double kRampUpBackoffFactor = 2.0;
+
+// Max number of overuses detected before always applying the rampup delay.
+const int kMaxOverusesBeforeApplyRampupDelay = 4;
+
+// The maximum exponent to use in VCMExpFilter.
+const float kMaxExp = 7.0f;
+// Default value used before first reconfiguration.
+const int kDefaultFrameRate = 30;
+// Default sample diff, default frame rate.
+const float kDefaultSampleDiffMs = 1000.0f / kDefaultFrameRate;
+// A factor applied to the sample diff on OnTargetFramerateUpdated to determine
+// a max limit for the sample diff. For instance, with a framerate of 30fps,
+// the sample diff is capped to (1000 / 30) * 1.35 = 45ms. This prevents
+// triggering too soon if there are individual very large outliers.
+const float kMaxSampleDiffMarginFactor = 1.35f;
+// Minimum framerate allowed for usage calculation. This prevents crazy long
+// encode times from being accepted if the frame rate happens to be low.
+const int kMinFramerate = 7;
+const int kMaxFramerate = 30;
+
+// Class for calculating the processing usage on the send-side (the average
+// processing time of a frame divided by the average time difference between
+// captured frames).
+class SendProcessingUsage1 : public OveruseFrameDetector::ProcessingUsage {
+ public:
+ explicit SendProcessingUsage1(const CpuOveruseOptions& options)
+ : kWeightFactorFrameDiff(0.998f),
+ kWeightFactorProcessing(0.995f),
+ kInitialSampleDiffMs(40.0f),
+ options_(options),
+ count_(0),
+ last_processed_capture_time_us_(-1),
+ max_sample_diff_ms_(kDefaultSampleDiffMs * kMaxSampleDiffMarginFactor),
+ filtered_processing_ms_(new rtc::ExpFilter(kWeightFactorProcessing)),
+ filtered_frame_diff_ms_(new rtc::ExpFilter(kWeightFactorFrameDiff)) {
+ Reset();
+ }
+ ~SendProcessingUsage1() override {}
+
+ void Reset() override {
+ frame_timing_.clear();
+ count_ = 0;
+ last_processed_capture_time_us_ = -1;
+ max_sample_diff_ms_ = kDefaultSampleDiffMs * kMaxSampleDiffMarginFactor;
+ filtered_frame_diff_ms_->Reset(kWeightFactorFrameDiff);
+ filtered_frame_diff_ms_->Apply(1.0f, kInitialSampleDiffMs);
+ filtered_processing_ms_->Reset(kWeightFactorProcessing);
+ filtered_processing_ms_->Apply(1.0f, InitialProcessingMs());
+ }
+
+ void SetMaxSampleDiffMs(float diff_ms) override {
+ max_sample_diff_ms_ = diff_ms;
+ }
+
+ void FrameCaptured(const VideoFrame& frame,
+ int64_t time_when_first_seen_us,
+ int64_t last_capture_time_us) override {
+ if (last_capture_time_us != -1)
+ AddCaptureSample(1e-3 * (time_when_first_seen_us - last_capture_time_us));
+
+ frame_timing_.push_back(FrameTiming(frame.timestamp_us(), frame.timestamp(),
+ time_when_first_seen_us));
+ }
+
+ absl::optional<int> FrameSent(
+ uint32_t timestamp,
+ int64_t time_sent_in_us,
+ int64_t /* capture_time_us */,
+ absl::optional<int> /* encode_duration_us */) override {
+ absl::optional<int> encode_duration_us;
+ // Delay before reporting actual encoding time, used to have the ability to
+ // detect total encoding time when encoding more than one layer. Encoding is
+ // here assumed to finish within a second (or that we get enough long-time
+ // samples before one second to trigger an overuse even when this is not the
+ // case).
+ static const int64_t kEncodingTimeMeasureWindowMs = 1000;
+ for (auto& it : frame_timing_) {
+ if (it.timestamp == timestamp) {
+ it.last_send_us = time_sent_in_us;
+ break;
+ }
+ }
+ // TODO(pbos): Handle the case/log errors when not finding the corresponding
+ // frame (either very slow encoding or incorrect wrong timestamps returned
+ // from the encoder).
+ // This is currently the case for all frames on ChromeOS, so logging them
+ // would be spammy, and triggering overuse would be wrong.
+ // https://crbug.com/350106
+ while (!frame_timing_.empty()) {
+ FrameTiming timing = frame_timing_.front();
+ if (time_sent_in_us - timing.capture_us <
+ kEncodingTimeMeasureWindowMs * rtc::kNumMicrosecsPerMillisec) {
+ break;
+ }
+ if (timing.last_send_us != -1) {
+ encode_duration_us.emplace(
+ static_cast<int>(timing.last_send_us - timing.capture_us));
+
+ if (last_processed_capture_time_us_ != -1) {
+ int64_t diff_us = timing.capture_us - last_processed_capture_time_us_;
+ AddSample(1e-3 * (*encode_duration_us), 1e-3 * diff_us);
+ }
+ last_processed_capture_time_us_ = timing.capture_us;
+ }
+ frame_timing_.pop_front();
+ }
+ return encode_duration_us;
+ }
+
+ int Value() override {
+ if (count_ < static_cast<uint32_t>(options_.min_frame_samples)) {
+ return static_cast<int>(InitialUsageInPercent() + 0.5f);
+ }
+ float frame_diff_ms = std::max(filtered_frame_diff_ms_->filtered(), 1.0f);
+ frame_diff_ms = std::min(frame_diff_ms, max_sample_diff_ms_);
+ float encode_usage_percent =
+ 100.0f * filtered_processing_ms_->filtered() / frame_diff_ms;
+ return static_cast<int>(encode_usage_percent + 0.5);
+ }
+
+ private:
+ struct FrameTiming {
+ FrameTiming(int64_t capture_time_us, uint32_t timestamp, int64_t now)
+ : capture_time_us(capture_time_us),
+ timestamp(timestamp),
+ capture_us(now),
+ last_send_us(-1) {}
+ int64_t capture_time_us;
+ uint32_t timestamp;
+ int64_t capture_us;
+ int64_t last_send_us;
+ };
+
+ void AddCaptureSample(float sample_ms) {
+ float exp = sample_ms / kDefaultSampleDiffMs;
+ exp = std::min(exp, kMaxExp);
+ filtered_frame_diff_ms_->Apply(exp, sample_ms);
+ }
+
+ void AddSample(float processing_ms, int64_t diff_last_sample_ms) {
+ ++count_;
+ float exp = diff_last_sample_ms / kDefaultSampleDiffMs;
+ exp = std::min(exp, kMaxExp);
+ filtered_processing_ms_->Apply(exp, processing_ms);
+ }
+
+ float InitialUsageInPercent() const {
+ // Start in between the underuse and overuse threshold.
+ return (options_.low_encode_usage_threshold_percent +
+ options_.high_encode_usage_threshold_percent) /
+ 2.0f;
+ }
+
+ float InitialProcessingMs() const {
+ return InitialUsageInPercent() * kInitialSampleDiffMs / 100;
+ }
+
+ const float kWeightFactorFrameDiff;
+ const float kWeightFactorProcessing;
+ const float kInitialSampleDiffMs;
+
+ const CpuOveruseOptions options_;
+ std::list<FrameTiming> frame_timing_;
+ uint64_t count_;
+ int64_t last_processed_capture_time_us_;
+ float max_sample_diff_ms_;
+ std::unique_ptr<rtc::ExpFilter> filtered_processing_ms_;
+ std::unique_ptr<rtc::ExpFilter> filtered_frame_diff_ms_;
+};
+
+// New cpu load estimator.
+// TODO(bugs.webrtc.org/8504): For some period of time, we need to
+// switch between the two versions of the estimator for experiments.
+// When problems are sorted out, the old estimator should be deleted.
+class SendProcessingUsage2 : public OveruseFrameDetector::ProcessingUsage {
+ public:
+ explicit SendProcessingUsage2(const CpuOveruseOptions& options)
+ : options_(options) {
+ Reset();
+ }
+ ~SendProcessingUsage2() override = default;
+
+ void Reset() override {
+ prev_time_us_ = -1;
+ // Start in between the underuse and overuse threshold.
+ load_estimate_ = (options_.low_encode_usage_threshold_percent +
+ options_.high_encode_usage_threshold_percent) /
+ 200.0;
+ }
+
+ void SetMaxSampleDiffMs(float /* diff_ms */) override {}
+
+ void FrameCaptured(const VideoFrame& frame,
+ int64_t time_when_first_seen_us,
+ int64_t last_capture_time_us) override {}
+
+ absl::optional<int> FrameSent(
+ uint32_t /* timestamp */,
+ int64_t /* time_sent_in_us */,
+ int64_t capture_time_us,
+ absl::optional<int> encode_duration_us) override {
+ if (encode_duration_us) {
+ int duration_per_frame_us =
+ DurationPerInputFrame(capture_time_us, *encode_duration_us);
+ if (prev_time_us_ != -1) {
+ if (capture_time_us < prev_time_us_) {
+ // The weighting in AddSample assumes that samples are processed with
+ // non-decreasing measurement timestamps. We could implement
+ // appropriate weights for samples arriving late, but since it is a
+ // rare case, keep things simple, by just pushing those measurements a
+ // bit forward in time.
+ capture_time_us = prev_time_us_;
+ }
+ AddSample(1e-6 * duration_per_frame_us,
+ 1e-6 * (capture_time_us - prev_time_us_));
+ }
+ }
+ prev_time_us_ = capture_time_us;
+
+ return encode_duration_us;
+ }
+
+ private:
+ void AddSample(double encode_time, double diff_time) {
+ RTC_CHECK_GE(diff_time, 0.0);
+
+ // Use the filter update
+ //
+ // load <-- x/d (1-exp (-d/T)) + exp (-d/T) load
+ //
+ // where we must take care for small d, using the proper limit
+ // (1 - exp(-d/tau)) / d = 1/tau - d/2tau^2 + O(d^2)
+ double tau = (1e-3 * options_.filter_time_ms);
+ double e = diff_time / tau;
+ double c;
+ if (e < 0.0001) {
+ c = (1 - e / 2) / tau;
+ } else {
+ c = -expm1(-e) / diff_time;
+ }
+ load_estimate_ = c * encode_time + exp(-e) * load_estimate_;
+ }
+
+ int64_t DurationPerInputFrame(int64_t capture_time_us,
+ int64_t encode_time_us) {
+ // Discard data on old frames; limit 2 seconds.
+ static constexpr int64_t kMaxAge = 2 * rtc::kNumMicrosecsPerSec;
+ for (auto it = max_encode_time_per_input_frame_.begin();
+ it != max_encode_time_per_input_frame_.end() &&
+ it->first < capture_time_us - kMaxAge;) {
+ it = max_encode_time_per_input_frame_.erase(it);
+ }
+
+ std::map<int64_t, int>::iterator it;
+ bool inserted;
+ std::tie(it, inserted) = max_encode_time_per_input_frame_.emplace(
+ capture_time_us, encode_time_us);
+ if (inserted) {
+ // First encoded frame for this input frame.
+ return encode_time_us;
+ }
+ if (encode_time_us <= it->second) {
+ // Shorter encode time than previous frame (unlikely). Count it as being
+ // done in parallel.
+ return 0;
+ }
+ // Record new maximum encode time, and return increase from previous max.
+ int increase = encode_time_us - it->second;
+ it->second = encode_time_us;
+ return increase;
+ }
+
+ int Value() override {
+ return static_cast<int>(100.0 * load_estimate_ + 0.5);
+ }
+
+ const CpuOveruseOptions options_;
+ // Indexed by the capture timestamp, used as frame id.
+ std::map<int64_t, int> max_encode_time_per_input_frame_;
+
+ int64_t prev_time_us_ = -1;
+ double load_estimate_;
+};
+
+// Class used for manual testing of overuse, enabled via field trial flag.
+class OverdoseInjector : public OveruseFrameDetector::ProcessingUsage {
+ public:
+ OverdoseInjector(std::unique_ptr<OveruseFrameDetector::ProcessingUsage> usage,
+ int64_t normal_period_ms,
+ int64_t overuse_period_ms,
+ int64_t underuse_period_ms)
+ : usage_(std::move(usage)),
+ normal_period_ms_(normal_period_ms),
+ overuse_period_ms_(overuse_period_ms),
+ underuse_period_ms_(underuse_period_ms),
+ state_(State::kNormal),
+ last_toggling_ms_(-1) {
+ RTC_DCHECK_GT(overuse_period_ms, 0);
+ RTC_DCHECK_GT(normal_period_ms, 0);
+ RTC_LOG(LS_INFO) << "Simulating overuse with intervals " << normal_period_ms
+ << "ms normal mode, " << overuse_period_ms
+ << "ms overuse mode.";
+ }
+
+ ~OverdoseInjector() override {}
+
+ void Reset() override { usage_->Reset(); }
+
+ void SetMaxSampleDiffMs(float diff_ms) override {
+ usage_->SetMaxSampleDiffMs(diff_ms);
+ }
+
+ void FrameCaptured(const VideoFrame& frame,
+ int64_t time_when_first_seen_us,
+ int64_t last_capture_time_us) override {
+ usage_->FrameCaptured(frame, time_when_first_seen_us, last_capture_time_us);
+ }
+
+ absl::optional<int> FrameSent(
+ // These two argument used by old estimator.
+ uint32_t timestamp,
+ int64_t time_sent_in_us,
+ // And these two by the new estimator.
+ int64_t capture_time_us,
+ absl::optional<int> encode_duration_us) override {
+ return usage_->FrameSent(timestamp, time_sent_in_us, capture_time_us,
+ encode_duration_us);
+ }
+
+ int Value() override {
+ int64_t now_ms = rtc::TimeMillis();
+ if (last_toggling_ms_ == -1) {
+ last_toggling_ms_ = now_ms;
+ } else {
+ switch (state_) {
+ case State::kNormal:
+ if (now_ms > last_toggling_ms_ + normal_period_ms_) {
+ state_ = State::kOveruse;
+ last_toggling_ms_ = now_ms;
+ RTC_LOG(LS_INFO) << "Simulating CPU overuse.";
+ }
+ break;
+ case State::kOveruse:
+ if (now_ms > last_toggling_ms_ + overuse_period_ms_) {
+ state_ = State::kUnderuse;
+ last_toggling_ms_ = now_ms;
+ RTC_LOG(LS_INFO) << "Simulating CPU underuse.";
+ }
+ break;
+ case State::kUnderuse:
+ if (now_ms > last_toggling_ms_ + underuse_period_ms_) {
+ state_ = State::kNormal;
+ last_toggling_ms_ = now_ms;
+ RTC_LOG(LS_INFO) << "Actual CPU overuse measurements in effect.";
+ }
+ break;
+ }
+ }
+
+ absl::optional<int> overried_usage_value;
+ switch (state_) {
+ case State::kNormal:
+ break;
+ case State::kOveruse:
+ overried_usage_value.emplace(250);
+ break;
+ case State::kUnderuse:
+ overried_usage_value.emplace(5);
+ break;
+ }
+
+ return overried_usage_value.value_or(usage_->Value());
+ }
+
+ private:
+ const std::unique_ptr<OveruseFrameDetector::ProcessingUsage> usage_;
+ const int64_t normal_period_ms_;
+ const int64_t overuse_period_ms_;
+ const int64_t underuse_period_ms_;
+ enum class State { kNormal, kOveruse, kUnderuse } state_;
+ int64_t last_toggling_ms_;
+};
+
+} // namespace
+
+CpuOveruseOptions::CpuOveruseOptions(const FieldTrialsView& field_trials)
+ : high_encode_usage_threshold_percent(85),
+ frame_timeout_interval_ms(1500),
+ min_frame_samples(120),
+ min_process_count(3),
+ high_threshold_consecutive_count(2),
+ // Disabled by default.
+ filter_time_ms(0) {
+#if defined(WEBRTC_MAC) && !defined(WEBRTC_IOS)
+ // Kill switch for re-enabling special adaptation rules for macOS.
+ // TODO(bugs.webrtc.org/14138): Remove once removal is deemed safe.
+ if (field_trials.IsEnabled(
+ "WebRTC-MacSpecialOveruseRulesRemovalKillSwitch")) {
+ // This is proof-of-concept code for letting the physical core count affect
+ // the interval into which we attempt to scale. For now, the code is Mac OS
+ // specific, since that's the platform were we saw most problems.
+ // TODO(torbjorng): Enhance SystemInfo to return this metric.
+
+ mach_port_t mach_host = mach_host_self();
+ host_basic_info hbi = {};
+ mach_msg_type_number_t info_count = HOST_BASIC_INFO_COUNT;
+ kern_return_t kr =
+ host_info(mach_host, HOST_BASIC_INFO,
+ reinterpret_cast<host_info_t>(&hbi), &info_count);
+ mach_port_deallocate(mach_task_self(), mach_host);
+
+ int n_physical_cores;
+ if (kr != KERN_SUCCESS) {
+ // If we couldn't get # of physical CPUs, don't panic. Assume we have 1.
+ n_physical_cores = 1;
+ RTC_LOG(LS_ERROR)
+ << "Failed to determine number of physical cores, assuming 1";
+ } else {
+ n_physical_cores = hbi.physical_cpu;
+ RTC_LOG(LS_INFO) << "Number of physical cores:" << n_physical_cores;
+ }
+
+ // Change init list default for few core systems. The assumption here is
+ // that encoding, which we measure here, takes about 1/4 of the processing
+ // of a two-way call. This is roughly true for x86 using both vp8 and vp9
+ // without hardware encoding. Since we don't affect the incoming stream
+ // here, we only control about 1/2 of the total processing needs, but this
+ // is not taken into account.
+ if (n_physical_cores == 1)
+ high_encode_usage_threshold_percent = 20; // Roughly 1/4 of 100%.
+ else if (n_physical_cores == 2)
+ high_encode_usage_threshold_percent = 40; // Roughly 1/4 of 200%.
+ }
+#endif // defined(WEBRTC_MAC) && !defined(WEBRTC_IOS)
+ // Note that we make the interval 2x+epsilon wide, since libyuv scaling steps
+ // are close to that (when squared). This wide interval makes sure that
+ // scaling up or down does not jump all the way across the interval.
+ low_encode_usage_threshold_percent =
+ (high_encode_usage_threshold_percent - 1) / 2;
+}
+
+std::unique_ptr<OveruseFrameDetector::ProcessingUsage>
+OveruseFrameDetector::CreateProcessingUsage(const CpuOveruseOptions& options) {
+ std::unique_ptr<ProcessingUsage> instance;
+ if (options.filter_time_ms > 0) {
+ instance = std::make_unique<SendProcessingUsage2>(options);
+ } else {
+ instance = std::make_unique<SendProcessingUsage1>(options);
+ }
+ std::string toggling_interval =
+ field_trial::FindFullName("WebRTC-ForceSimulatedOveruseIntervalMs");
+ if (!toggling_interval.empty()) {
+ int normal_period_ms = 0;
+ int overuse_period_ms = 0;
+ int underuse_period_ms = 0;
+ if (sscanf(toggling_interval.c_str(), "%d-%d-%d", &normal_period_ms,
+ &overuse_period_ms, &underuse_period_ms) == 3) {
+ if (normal_period_ms > 0 && overuse_period_ms > 0 &&
+ underuse_period_ms > 0) {
+ instance = std::make_unique<OverdoseInjector>(
+ std::move(instance), normal_period_ms, overuse_period_ms,
+ underuse_period_ms);
+ } else {
+ RTC_LOG(LS_WARNING)
+ << "Invalid (non-positive) normal/overuse/underuse periods: "
+ << normal_period_ms << " / " << overuse_period_ms << " / "
+ << underuse_period_ms;
+ }
+ } else {
+ RTC_LOG(LS_WARNING) << "Malformed toggling interval: "
+ << toggling_interval;
+ }
+ }
+ return instance;
+}
+
+OveruseFrameDetector::OveruseFrameDetector(
+ CpuOveruseMetricsObserver* metrics_observer,
+ const FieldTrialsView& field_trials)
+ : options_(field_trials),
+ metrics_observer_(metrics_observer),
+ num_process_times_(0),
+ // TODO(bugs.webrtc.org/9078): Use absl::optional
+ last_capture_time_us_(-1),
+ num_pixels_(0),
+ max_framerate_(kDefaultFrameRate),
+ last_overuse_time_ms_(-1),
+ checks_above_threshold_(0),
+ num_overuse_detections_(0),
+ last_rampup_time_ms_(-1),
+ in_quick_rampup_(false),
+ current_rampup_delay_ms_(kStandardRampUpDelayMs) {
+ task_checker_.Detach();
+ ParseFieldTrial({&filter_time_constant_},
+ field_trial::FindFullName("WebRTC-CpuLoadEstimator"));
+}
+
+OveruseFrameDetector::~OveruseFrameDetector() {}
+
+void OveruseFrameDetector::StartCheckForOveruse(
+ TaskQueueBase* task_queue_base,
+ const CpuOveruseOptions& options,
+ OveruseFrameDetectorObserverInterface* overuse_observer) {
+ RTC_DCHECK_RUN_ON(&task_checker_);
+ RTC_DCHECK(!check_overuse_task_.Running());
+ RTC_DCHECK(overuse_observer != nullptr);
+
+ SetOptions(options);
+ check_overuse_task_ = RepeatingTaskHandle::DelayedStart(
+ task_queue_base, TimeDelta::Millis(kTimeToFirstCheckForOveruseMs),
+ [this, overuse_observer] {
+ CheckForOveruse(overuse_observer);
+ return TimeDelta::Millis(kCheckForOveruseIntervalMs);
+ });
+}
+void OveruseFrameDetector::StopCheckForOveruse() {
+ RTC_DCHECK_RUN_ON(&task_checker_);
+ check_overuse_task_.Stop();
+}
+
+void OveruseFrameDetector::EncodedFrameTimeMeasured(int encode_duration_ms) {
+ RTC_DCHECK_RUN_ON(&task_checker_);
+ encode_usage_percent_ = usage_->Value();
+
+ metrics_observer_->OnEncodedFrameTimeMeasured(encode_duration_ms,
+ *encode_usage_percent_);
+}
+
+bool OveruseFrameDetector::FrameSizeChanged(int num_pixels) const {
+ RTC_DCHECK_RUN_ON(&task_checker_);
+ if (num_pixels != num_pixels_) {
+ return true;
+ }
+ return false;
+}
+
+bool OveruseFrameDetector::FrameTimeoutDetected(int64_t now_us) const {
+ RTC_DCHECK_RUN_ON(&task_checker_);
+ if (last_capture_time_us_ == -1)
+ return false;
+ return (now_us - last_capture_time_us_) >
+ options_.frame_timeout_interval_ms * rtc::kNumMicrosecsPerMillisec;
+}
+
+void OveruseFrameDetector::ResetAll(int num_pixels) {
+ // Reset state, as a result resolution being changed. Do not however change
+ // the current frame rate back to the default.
+ RTC_DCHECK_RUN_ON(&task_checker_);
+ num_pixels_ = num_pixels;
+ usage_->Reset();
+ last_capture_time_us_ = -1;
+ num_process_times_ = 0;
+ encode_usage_percent_ = absl::nullopt;
+ OnTargetFramerateUpdated(max_framerate_);
+}
+
+void OveruseFrameDetector::OnTargetFramerateUpdated(int framerate_fps) {
+ RTC_DCHECK_RUN_ON(&task_checker_);
+ RTC_DCHECK_GE(framerate_fps, 0);
+ max_framerate_ = std::min(kMaxFramerate, framerate_fps);
+ usage_->SetMaxSampleDiffMs((1000 / std::max(kMinFramerate, max_framerate_)) *
+ kMaxSampleDiffMarginFactor);
+}
+
+void OveruseFrameDetector::FrameCaptured(const VideoFrame& frame,
+ int64_t time_when_first_seen_us) {
+ RTC_DCHECK_RUN_ON(&task_checker_);
+
+ if (FrameSizeChanged(frame.width() * frame.height()) ||
+ FrameTimeoutDetected(time_when_first_seen_us)) {
+ ResetAll(frame.width() * frame.height());
+ }
+
+ usage_->FrameCaptured(frame, time_when_first_seen_us, last_capture_time_us_);
+ last_capture_time_us_ = time_when_first_seen_us;
+}
+
+void OveruseFrameDetector::FrameSent(uint32_t timestamp,
+ int64_t time_sent_in_us,
+ int64_t capture_time_us,
+ absl::optional<int> encode_duration_us) {
+ RTC_DCHECK_RUN_ON(&task_checker_);
+ encode_duration_us = usage_->FrameSent(timestamp, time_sent_in_us,
+ capture_time_us, encode_duration_us);
+
+ if (encode_duration_us) {
+ EncodedFrameTimeMeasured(*encode_duration_us /
+ rtc::kNumMicrosecsPerMillisec);
+ }
+}
+
+void OveruseFrameDetector::CheckForOveruse(
+ OveruseFrameDetectorObserverInterface* observer) {
+ RTC_DCHECK_RUN_ON(&task_checker_);
+ RTC_DCHECK(observer);
+ ++num_process_times_;
+ if (num_process_times_ <= options_.min_process_count ||
+ !encode_usage_percent_)
+ return;
+
+ int64_t now_ms = rtc::TimeMillis();
+
+ if (IsOverusing(*encode_usage_percent_)) {
+ // If the last thing we did was going up, and now have to back down, we need
+ // to check if this peak was short. If so we should back off to avoid going
+ // back and forth between this load, the system doesn't seem to handle it.
+ bool check_for_backoff = last_rampup_time_ms_ > last_overuse_time_ms_;
+ if (check_for_backoff) {
+ if (now_ms - last_rampup_time_ms_ < kStandardRampUpDelayMs ||
+ num_overuse_detections_ > kMaxOverusesBeforeApplyRampupDelay) {
+ // Going up was not ok for very long, back off.
+ current_rampup_delay_ms_ *= kRampUpBackoffFactor;
+ if (current_rampup_delay_ms_ > kMaxRampUpDelayMs)
+ current_rampup_delay_ms_ = kMaxRampUpDelayMs;
+ } else {
+ // Not currently backing off, reset rampup delay.
+ current_rampup_delay_ms_ = kStandardRampUpDelayMs;
+ }
+ }
+
+ last_overuse_time_ms_ = now_ms;
+ in_quick_rampup_ = false;
+ checks_above_threshold_ = 0;
+ ++num_overuse_detections_;
+
+ observer->AdaptDown();
+ } else if (IsUnderusing(*encode_usage_percent_, now_ms)) {
+ last_rampup_time_ms_ = now_ms;
+ in_quick_rampup_ = true;
+
+ observer->AdaptUp();
+ }
+
+ int rampup_delay =
+ in_quick_rampup_ ? kQuickRampUpDelayMs : current_rampup_delay_ms_;
+
+ RTC_LOG(LS_VERBOSE) << " Frame stats: "
+ " encode usage "
+ << *encode_usage_percent_ << " overuse detections "
+ << num_overuse_detections_ << " rampup delay "
+ << rampup_delay;
+}
+
+void OveruseFrameDetector::SetOptions(const CpuOveruseOptions& options) {
+ RTC_DCHECK_RUN_ON(&task_checker_);
+ options_ = options;
+
+ // Time constant config overridable by field trial.
+ if (filter_time_constant_) {
+ options_.filter_time_ms = filter_time_constant_->ms();
+ }
+ // Force reset with next frame.
+ num_pixels_ = 0;
+ usage_ = CreateProcessingUsage(options);
+}
+
+bool OveruseFrameDetector::IsOverusing(int usage_percent) {
+ RTC_DCHECK_RUN_ON(&task_checker_);
+
+ if (usage_percent >= options_.high_encode_usage_threshold_percent) {
+ ++checks_above_threshold_;
+ } else {
+ checks_above_threshold_ = 0;
+ }
+ return checks_above_threshold_ >= options_.high_threshold_consecutive_count;
+}
+
+bool OveruseFrameDetector::IsUnderusing(int usage_percent, int64_t time_now) {
+ RTC_DCHECK_RUN_ON(&task_checker_);
+ int delay = in_quick_rampup_ ? kQuickRampUpDelayMs : current_rampup_delay_ms_;
+ if (time_now < last_rampup_time_ms_ + delay)
+ return false;
+
+ return usage_percent < options_.low_encode_usage_threshold_percent;
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/video/adaptation/overuse_frame_detector.h b/third_party/libwebrtc/video/adaptation/overuse_frame_detector.h
new file mode 100644
index 0000000000..4e1f6a83a4
--- /dev/null
+++ b/third_party/libwebrtc/video/adaptation/overuse_frame_detector.h
@@ -0,0 +1,172 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VIDEO_ADAPTATION_OVERUSE_FRAME_DETECTOR_H_
+#define VIDEO_ADAPTATION_OVERUSE_FRAME_DETECTOR_H_
+
+#include <list>
+#include <memory>
+
+#include "absl/types/optional.h"
+#include "api/field_trials_view.h"
+#include "api/sequence_checker.h"
+#include "api/task_queue/task_queue_base.h"
+#include "rtc_base/experiments/field_trial_parser.h"
+#include "rtc_base/numerics/exp_filter.h"
+#include "rtc_base/system/no_unique_address.h"
+#include "rtc_base/task_utils/repeating_task.h"
+#include "rtc_base/thread_annotations.h"
+#include "video/video_stream_encoder_observer.h"
+
+namespace webrtc {
+
+class VideoFrame;
+
+struct CpuOveruseOptions {
+ explicit CpuOveruseOptions(const FieldTrialsView& field_trials);
+
+ int low_encode_usage_threshold_percent; // Threshold for triggering underuse.
+ int high_encode_usage_threshold_percent; // Threshold for triggering overuse.
+ // General settings.
+ int frame_timeout_interval_ms; // The maximum allowed interval between two
+ // frames before resetting estimations.
+ int min_frame_samples; // The minimum number of frames required.
+ int min_process_count; // The number of initial process times required before
+ // triggering an overuse/underuse.
+ int high_threshold_consecutive_count; // The number of consecutive checks
+ // above the high threshold before
+ // triggering an overuse.
+ // New estimator enabled if this is set non-zero.
+ int filter_time_ms; // Time constant for averaging
+};
+
+class OveruseFrameDetectorObserverInterface {
+ public:
+ // Called to signal that we can handle larger or more frequent frames.
+ virtual void AdaptUp() = 0;
+ // Called to signal that the source should reduce the resolution or framerate.
+ virtual void AdaptDown() = 0;
+
+ protected:
+ virtual ~OveruseFrameDetectorObserverInterface() {}
+};
+
+// Use to detect system overuse based on the send-side processing time of
+// incoming frames. All methods must be called on a single task queue but it can
+// be created and destroyed on an arbitrary thread.
+// OveruseFrameDetector::StartCheckForOveruse must be called to periodically
+// check for overuse.
+class OveruseFrameDetector {
+ public:
+ explicit OveruseFrameDetector(CpuOveruseMetricsObserver* metrics_observer,
+ const FieldTrialsView& field_trials);
+ virtual ~OveruseFrameDetector();
+
+ OveruseFrameDetector(const OveruseFrameDetector&) = delete;
+ OveruseFrameDetector& operator=(const OveruseFrameDetector&) = delete;
+
+ // Start to periodically check for overuse.
+ void StartCheckForOveruse(
+ TaskQueueBase* task_queue_base,
+ const CpuOveruseOptions& options,
+ OveruseFrameDetectorObserverInterface* overuse_observer);
+
+ // StopCheckForOveruse must be called before destruction if
+ // StartCheckForOveruse has been called.
+ void StopCheckForOveruse();
+
+ // Defines the current maximum framerate targeted by the capturer. This is
+ // used to make sure the encode usage percent doesn't drop unduly if the
+ // capturer has quiet periods (for instance caused by screen capturers with
+ // variable capture rate depending on content updates), otherwise we might
+ // experience adaptation toggling.
+ virtual void OnTargetFramerateUpdated(int framerate_fps);
+
+ // Called for each captured frame.
+ void FrameCaptured(const VideoFrame& frame, int64_t time_when_first_seen_us);
+
+ // Called for each sent frame.
+ void FrameSent(uint32_t timestamp,
+ int64_t time_sent_in_us,
+ int64_t capture_time_us,
+ absl::optional<int> encode_duration_us);
+
+ // Interface for cpu load estimation. Intended for internal use only.
+ class ProcessingUsage {
+ public:
+ virtual void Reset() = 0;
+ virtual void SetMaxSampleDiffMs(float diff_ms) = 0;
+ virtual void FrameCaptured(const VideoFrame& frame,
+ int64_t time_when_first_seen_us,
+ int64_t last_capture_time_us) = 0;
+ // Returns encode_time in us, if there's a new measurement.
+ virtual absl::optional<int> FrameSent(
+ // These two argument used by old estimator.
+ uint32_t timestamp,
+ int64_t time_sent_in_us,
+ // And these two by the new estimator.
+ int64_t capture_time_us,
+ absl::optional<int> encode_duration_us) = 0;
+
+ virtual int Value() = 0;
+ virtual ~ProcessingUsage() = default;
+ };
+
+ protected:
+ // Protected for test purposes.
+ void CheckForOveruse(OveruseFrameDetectorObserverInterface* overuse_observer);
+ void SetOptions(const CpuOveruseOptions& options);
+
+ CpuOveruseOptions options_;
+
+ private:
+ void EncodedFrameTimeMeasured(int encode_duration_ms);
+ bool IsOverusing(int encode_usage_percent);
+ bool IsUnderusing(int encode_usage_percent, int64_t time_now);
+
+ bool FrameTimeoutDetected(int64_t now) const;
+ bool FrameSizeChanged(int num_pixels) const;
+
+ void ResetAll(int num_pixels);
+
+ static std::unique_ptr<ProcessingUsage> CreateProcessingUsage(
+ const CpuOveruseOptions& options);
+
+ RTC_NO_UNIQUE_ADDRESS SequenceChecker task_checker_;
+ // Owned by the task queue from where StartCheckForOveruse is called.
+ RepeatingTaskHandle check_overuse_task_ RTC_GUARDED_BY(task_checker_);
+
+ // Stats metrics.
+ CpuOveruseMetricsObserver* const metrics_observer_;
+ absl::optional<int> encode_usage_percent_ RTC_GUARDED_BY(task_checker_);
+
+ int64_t num_process_times_ RTC_GUARDED_BY(task_checker_);
+
+ int64_t last_capture_time_us_ RTC_GUARDED_BY(task_checker_);
+
+ // Number of pixels of last captured frame.
+ int num_pixels_ RTC_GUARDED_BY(task_checker_);
+ int max_framerate_ RTC_GUARDED_BY(task_checker_);
+ int64_t last_overuse_time_ms_ RTC_GUARDED_BY(task_checker_);
+ int checks_above_threshold_ RTC_GUARDED_BY(task_checker_);
+ int num_overuse_detections_ RTC_GUARDED_BY(task_checker_);
+ int64_t last_rampup_time_ms_ RTC_GUARDED_BY(task_checker_);
+ bool in_quick_rampup_ RTC_GUARDED_BY(task_checker_);
+ int current_rampup_delay_ms_ RTC_GUARDED_BY(task_checker_);
+
+ std::unique_ptr<ProcessingUsage> usage_ RTC_PT_GUARDED_BY(task_checker_);
+
+ // If set by field trial, overrides CpuOveruseOptions::filter_time_ms.
+ FieldTrialOptional<TimeDelta> filter_time_constant_{"tau"};
+};
+
+} // namespace webrtc
+
+#endif // VIDEO_ADAPTATION_OVERUSE_FRAME_DETECTOR_H_
diff --git a/third_party/libwebrtc/video/adaptation/overuse_frame_detector_unittest.cc b/third_party/libwebrtc/video/adaptation/overuse_frame_detector_unittest.cc
new file mode 100644
index 0000000000..5098c9c2ec
--- /dev/null
+++ b/third_party/libwebrtc/video/adaptation/overuse_frame_detector_unittest.cc
@@ -0,0 +1,1023 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video/adaptation/overuse_frame_detector.h"
+
+#include <memory>
+
+#include "api/field_trials_view.h"
+#include "api/video/encoded_image.h"
+#include "api/video/i420_buffer.h"
+#include "api/video/video_adaptation_reason.h"
+#include "modules/video_coding/utility/quality_scaler.h"
+#include "rtc_base/event.h"
+#include "rtc_base/fake_clock.h"
+#include "rtc_base/random.h"
+#include "rtc_base/task_queue_for_test.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+#include "test/scoped_key_value_config.h"
+
+namespace webrtc {
+
+using ::testing::_;
+using ::testing::InvokeWithoutArgs;
+
+namespace {
+const int kWidth = 640;
+const int kHeight = 480;
+// Corresponds to load of 15%
+const int kFrameIntervalUs = 33 * rtc::kNumMicrosecsPerMillisec;
+const int kProcessTimeUs = 5 * rtc::kNumMicrosecsPerMillisec;
+const test::ScopedKeyValueConfig kFieldTrials;
+} // namespace
+
+class MockCpuOveruseObserver : public OveruseFrameDetectorObserverInterface {
+ public:
+ MockCpuOveruseObserver() {}
+ virtual ~MockCpuOveruseObserver() {}
+
+ MOCK_METHOD(void, AdaptUp, (), (override));
+ MOCK_METHOD(void, AdaptDown, (), (override));
+};
+
+class CpuOveruseObserverImpl : public OveruseFrameDetectorObserverInterface {
+ public:
+ CpuOveruseObserverImpl() : overuse_(0), normaluse_(0) {}
+ virtual ~CpuOveruseObserverImpl() {}
+
+ void AdaptDown() override { ++overuse_; }
+ void AdaptUp() override { ++normaluse_; }
+
+ int overuse_;
+ int normaluse_;
+};
+
+class OveruseFrameDetectorUnderTest : public OveruseFrameDetector {
+ public:
+ explicit OveruseFrameDetectorUnderTest(
+ CpuOveruseMetricsObserver* metrics_observer)
+ : OveruseFrameDetector(metrics_observer, kFieldTrials) {}
+ ~OveruseFrameDetectorUnderTest() {}
+
+ using OveruseFrameDetector::CheckForOveruse;
+ using OveruseFrameDetector::SetOptions;
+};
+
+class OveruseFrameDetectorTest : public ::testing::Test,
+ public CpuOveruseMetricsObserver {
+ protected:
+ OveruseFrameDetectorTest() : options_(kFieldTrials) {}
+
+ void SetUp() override {
+ observer_ = &mock_observer_;
+ options_.min_process_count = 0;
+ overuse_detector_ = std::make_unique<OveruseFrameDetectorUnderTest>(this);
+ // Unfortunately, we can't call SetOptions here, since that would break
+ // single-threading requirements in the RunOnTqNormalUsage test.
+ }
+
+ void OnEncodedFrameTimeMeasured(int encode_time_ms,
+ int encode_usage_percent) override {
+ encode_usage_percent_ = encode_usage_percent;
+ }
+
+ int InitialUsage() {
+ return ((options_.low_encode_usage_threshold_percent +
+ options_.high_encode_usage_threshold_percent) /
+ 2.0f) +
+ 0.5;
+ }
+
+ virtual void InsertAndSendFramesWithInterval(int num_frames,
+ int interval_us,
+ int width,
+ int height,
+ int delay_us) {
+ VideoFrame frame =
+ VideoFrame::Builder()
+ .set_video_frame_buffer(I420Buffer::Create(width, height))
+ .set_rotation(webrtc::kVideoRotation_0)
+ .set_timestamp_us(0)
+ .build();
+ uint32_t timestamp = 0;
+ while (num_frames-- > 0) {
+ frame.set_timestamp(timestamp);
+ int64_t capture_time_us = rtc::TimeMicros();
+ overuse_detector_->FrameCaptured(frame, capture_time_us);
+ clock_.AdvanceTime(TimeDelta::Micros(delay_us));
+ overuse_detector_->FrameSent(timestamp, rtc::TimeMicros(),
+ capture_time_us, delay_us);
+ clock_.AdvanceTime(TimeDelta::Micros(interval_us - delay_us));
+ timestamp += interval_us * 90 / 1000;
+ }
+ }
+
+ virtual void InsertAndSendSimulcastFramesWithInterval(
+ int num_frames,
+ int interval_us,
+ int width,
+ int height,
+ // One element per layer
+ rtc::ArrayView<const int> delays_us) {
+ VideoFrame frame =
+ VideoFrame::Builder()
+ .set_video_frame_buffer(I420Buffer::Create(width, height))
+ .set_rotation(webrtc::kVideoRotation_0)
+ .set_timestamp_us(0)
+ .build();
+ uint32_t timestamp = 0;
+ while (num_frames-- > 0) {
+ frame.set_timestamp(timestamp);
+ int64_t capture_time_us = rtc::TimeMicros();
+ overuse_detector_->FrameCaptured(frame, capture_time_us);
+ int max_delay_us = 0;
+ for (int delay_us : delays_us) {
+ if (delay_us > max_delay_us) {
+ clock_.AdvanceTime(TimeDelta::Micros(delay_us - max_delay_us));
+ max_delay_us = delay_us;
+ }
+
+ overuse_detector_->FrameSent(timestamp, rtc::TimeMicros(),
+ capture_time_us, delay_us);
+ }
+ overuse_detector_->CheckForOveruse(observer_);
+ clock_.AdvanceTime(TimeDelta::Micros(interval_us - max_delay_us));
+ timestamp += interval_us * 90 / 1000;
+ }
+ }
+
+ virtual void InsertAndSendFramesWithRandomInterval(int num_frames,
+ int min_interval_us,
+ int max_interval_us,
+ int width,
+ int height,
+ int delay_us) {
+ webrtc::Random random(17);
+
+ VideoFrame frame =
+ VideoFrame::Builder()
+ .set_video_frame_buffer(I420Buffer::Create(width, height))
+ .set_rotation(webrtc::kVideoRotation_0)
+ .set_timestamp_us(0)
+ .build();
+ uint32_t timestamp = 0;
+ while (num_frames-- > 0) {
+ frame.set_timestamp(timestamp);
+ int interval_us = random.Rand(min_interval_us, max_interval_us);
+ int64_t capture_time_us = rtc::TimeMicros();
+ overuse_detector_->FrameCaptured(frame, capture_time_us);
+ clock_.AdvanceTime(TimeDelta::Micros(delay_us));
+ overuse_detector_->FrameSent(timestamp, rtc::TimeMicros(),
+ capture_time_us,
+ absl::optional<int>(delay_us));
+
+ overuse_detector_->CheckForOveruse(observer_);
+ // Avoid turning clock backwards.
+ if (interval_us > delay_us)
+ clock_.AdvanceTime(TimeDelta::Micros(interval_us - delay_us));
+
+ timestamp += interval_us * 90 / 1000;
+ }
+ }
+
+ virtual void ForceUpdate(int width, int height) {
+ // Insert one frame, wait a second and then put in another to force update
+ // the usage. From the tests where these are used, adding another sample
+ // doesn't affect the expected outcome (this is mainly to check initial
+ // values and whether the overuse detector has been reset or not).
+ InsertAndSendFramesWithInterval(2, rtc::kNumMicrosecsPerSec, width, height,
+ kFrameIntervalUs);
+ }
+ void TriggerOveruse(int num_times) {
+ const int kDelayUs = 32 * rtc::kNumMicrosecsPerMillisec;
+ for (int i = 0; i < num_times; ++i) {
+ InsertAndSendFramesWithInterval(1000, kFrameIntervalUs, kWidth, kHeight,
+ kDelayUs);
+ overuse_detector_->CheckForOveruse(observer_);
+ }
+ }
+
+ void TriggerUnderuse() {
+ const int kDelayUs1 = 5000;
+ const int kDelayUs2 = 6000;
+ InsertAndSendFramesWithInterval(1300, kFrameIntervalUs, kWidth, kHeight,
+ kDelayUs1);
+ InsertAndSendFramesWithInterval(1, kFrameIntervalUs, kWidth, kHeight,
+ kDelayUs2);
+ overuse_detector_->CheckForOveruse(observer_);
+ }
+
+ int UsagePercent() { return encode_usage_percent_; }
+
+ int64_t OveruseProcessingTimeLimitForFramerate(int fps) const {
+ int64_t frame_interval = rtc::kNumMicrosecsPerSec / fps;
+ int64_t max_processing_time_us =
+ (frame_interval * options_.high_encode_usage_threshold_percent) / 100;
+ return max_processing_time_us;
+ }
+
+ int64_t UnderuseProcessingTimeLimitForFramerate(int fps) const {
+ int64_t frame_interval = rtc::kNumMicrosecsPerSec / fps;
+ int64_t max_processing_time_us =
+ (frame_interval * options_.low_encode_usage_threshold_percent) / 100;
+ return max_processing_time_us;
+ }
+
+ CpuOveruseOptions options_;
+ rtc::ScopedFakeClock clock_;
+ MockCpuOveruseObserver mock_observer_;
+ OveruseFrameDetectorObserverInterface* observer_;
+ std::unique_ptr<OveruseFrameDetectorUnderTest> overuse_detector_;
+ int encode_usage_percent_ = -1;
+};
+
+// UsagePercent() > high_encode_usage_threshold_percent => overuse.
+// UsagePercent() < low_encode_usage_threshold_percent => underuse.
+TEST_F(OveruseFrameDetectorTest, TriggerOveruse) {
+ // usage > high => overuse
+ overuse_detector_->SetOptions(options_);
+ EXPECT_CALL(mock_observer_, AdaptDown()).Times(1);
+ TriggerOveruse(options_.high_threshold_consecutive_count);
+}
+
+TEST_F(OveruseFrameDetectorTest, OveruseAndRecover) {
+ // usage > high => overuse
+ overuse_detector_->SetOptions(options_);
+ EXPECT_CALL(mock_observer_, AdaptDown()).Times(1);
+ TriggerOveruse(options_.high_threshold_consecutive_count);
+ // usage < low => underuse
+ EXPECT_CALL(mock_observer_, AdaptUp()).Times(::testing::AtLeast(1));
+ TriggerUnderuse();
+}
+
+TEST_F(OveruseFrameDetectorTest, DoubleOveruseAndRecover) {
+ overuse_detector_->SetOptions(options_);
+ EXPECT_CALL(mock_observer_, AdaptDown()).Times(2);
+ TriggerOveruse(options_.high_threshold_consecutive_count);
+ TriggerOveruse(options_.high_threshold_consecutive_count);
+ EXPECT_CALL(mock_observer_, AdaptUp()).Times(::testing::AtLeast(1));
+ TriggerUnderuse();
+}
+
+TEST_F(OveruseFrameDetectorTest, TriggerUnderuseWithMinProcessCount) {
+ const int kProcessIntervalUs = 5 * rtc::kNumMicrosecsPerSec;
+ options_.min_process_count = 1;
+ CpuOveruseObserverImpl overuse_observer;
+ observer_ = nullptr;
+ overuse_detector_->SetOptions(options_);
+ InsertAndSendFramesWithInterval(1200, kFrameIntervalUs, kWidth, kHeight,
+ kProcessTimeUs);
+ overuse_detector_->CheckForOveruse(&overuse_observer);
+ EXPECT_EQ(0, overuse_observer.normaluse_);
+ clock_.AdvanceTime(TimeDelta::Micros(kProcessIntervalUs));
+ overuse_detector_->CheckForOveruse(&overuse_observer);
+ EXPECT_EQ(1, overuse_observer.normaluse_);
+}
+
+TEST_F(OveruseFrameDetectorTest, ConstantOveruseGivesNoNormalUsage) {
+ overuse_detector_->SetOptions(options_);
+ EXPECT_CALL(mock_observer_, AdaptUp()).Times(0);
+ EXPECT_CALL(mock_observer_, AdaptDown()).Times(64);
+ for (size_t i = 0; i < 64; ++i) {
+ TriggerOveruse(options_.high_threshold_consecutive_count);
+ }
+}
+
+TEST_F(OveruseFrameDetectorTest, ConsecutiveCountTriggersOveruse) {
+ overuse_detector_->SetOptions(options_);
+ EXPECT_CALL(mock_observer_, AdaptDown()).Times(1);
+ options_.high_threshold_consecutive_count = 2;
+ overuse_detector_->SetOptions(options_);
+ TriggerOveruse(2);
+}
+
+TEST_F(OveruseFrameDetectorTest, IncorrectConsecutiveCountTriggersNoOveruse) {
+ overuse_detector_->SetOptions(options_);
+ EXPECT_CALL(mock_observer_, AdaptDown()).Times(0);
+ options_.high_threshold_consecutive_count = 2;
+ overuse_detector_->SetOptions(options_);
+ TriggerOveruse(1);
+}
+
+TEST_F(OveruseFrameDetectorTest, ProcessingUsage) {
+ overuse_detector_->SetOptions(options_);
+ InsertAndSendFramesWithInterval(1000, kFrameIntervalUs, kWidth, kHeight,
+ kProcessTimeUs);
+ EXPECT_EQ(kProcessTimeUs * 100 / kFrameIntervalUs, UsagePercent());
+}
+
+TEST_F(OveruseFrameDetectorTest, ResetAfterResolutionChange) {
+ overuse_detector_->SetOptions(options_);
+ ForceUpdate(kWidth, kHeight);
+ EXPECT_EQ(InitialUsage(), UsagePercent());
+ InsertAndSendFramesWithInterval(1000, kFrameIntervalUs, kWidth, kHeight,
+ kProcessTimeUs);
+ EXPECT_NE(InitialUsage(), UsagePercent());
+ // Verify reset (with new width/height).
+ ForceUpdate(kWidth, kHeight + 1);
+ EXPECT_EQ(InitialUsage(), UsagePercent());
+}
+
+TEST_F(OveruseFrameDetectorTest, ResetAfterFrameTimeout) {
+ overuse_detector_->SetOptions(options_);
+ ForceUpdate(kWidth, kHeight);
+ EXPECT_EQ(InitialUsage(), UsagePercent());
+ InsertAndSendFramesWithInterval(1000, kFrameIntervalUs, kWidth, kHeight,
+ kProcessTimeUs);
+ EXPECT_NE(InitialUsage(), UsagePercent());
+ InsertAndSendFramesWithInterval(
+ 2, options_.frame_timeout_interval_ms * rtc::kNumMicrosecsPerMillisec,
+ kWidth, kHeight, kProcessTimeUs);
+ EXPECT_NE(InitialUsage(), UsagePercent());
+ // Verify reset.
+ InsertAndSendFramesWithInterval(
+ 2,
+ (options_.frame_timeout_interval_ms + 1) * rtc::kNumMicrosecsPerMillisec,
+ kWidth, kHeight, kProcessTimeUs);
+ ForceUpdate(kWidth, kHeight);
+ EXPECT_EQ(InitialUsage(), UsagePercent());
+}
+
+TEST_F(OveruseFrameDetectorTest, MinFrameSamplesBeforeUpdating) {
+ options_.min_frame_samples = 40;
+ overuse_detector_->SetOptions(options_);
+ InsertAndSendFramesWithInterval(40, kFrameIntervalUs, kWidth, kHeight,
+ kProcessTimeUs);
+ EXPECT_EQ(InitialUsage(), UsagePercent());
+ // Pass time far enough to digest all previous samples.
+ clock_.AdvanceTime(TimeDelta::Seconds(1));
+ InsertAndSendFramesWithInterval(1, kFrameIntervalUs, kWidth, kHeight,
+ kProcessTimeUs);
+ // The last sample has not been processed here.
+ EXPECT_EQ(InitialUsage(), UsagePercent());
+
+ // Pass time far enough to digest all previous samples, 41 in total.
+ clock_.AdvanceTime(TimeDelta::Seconds(1));
+ InsertAndSendFramesWithInterval(1, kFrameIntervalUs, kWidth, kHeight,
+ kProcessTimeUs);
+ EXPECT_NE(InitialUsage(), UsagePercent());
+}
+
+TEST_F(OveruseFrameDetectorTest, InitialProcessingUsage) {
+ overuse_detector_->SetOptions(options_);
+ ForceUpdate(kWidth, kHeight);
+ EXPECT_EQ(InitialUsage(), UsagePercent());
+}
+
+TEST_F(OveruseFrameDetectorTest, MeasuresMultipleConcurrentSamples) {
+ overuse_detector_->SetOptions(options_);
+ EXPECT_CALL(mock_observer_, AdaptDown()).Times(::testing::AtLeast(1));
+ static const int kIntervalUs = 33 * rtc::kNumMicrosecsPerMillisec;
+ static const size_t kNumFramesEncodingDelay = 3;
+ VideoFrame frame =
+ VideoFrame::Builder()
+ .set_video_frame_buffer(I420Buffer::Create(kWidth, kHeight))
+ .set_rotation(webrtc::kVideoRotation_0)
+ .set_timestamp_us(0)
+ .build();
+ for (size_t i = 0; i < 1000; ++i) {
+ // Unique timestamps.
+ frame.set_timestamp(static_cast<uint32_t>(i));
+ int64_t capture_time_us = rtc::TimeMicros();
+ overuse_detector_->FrameCaptured(frame, capture_time_us);
+ clock_.AdvanceTime(TimeDelta::Micros(kIntervalUs));
+ if (i > kNumFramesEncodingDelay) {
+ overuse_detector_->FrameSent(
+ static_cast<uint32_t>(i - kNumFramesEncodingDelay), rtc::TimeMicros(),
+ capture_time_us, kIntervalUs);
+ }
+ overuse_detector_->CheckForOveruse(observer_);
+ }
+}
+
+TEST_F(OveruseFrameDetectorTest, UpdatesExistingSamples) {
+ // >85% encoding time should trigger overuse.
+ overuse_detector_->SetOptions(options_);
+ EXPECT_CALL(mock_observer_, AdaptDown()).Times(::testing::AtLeast(1));
+ static const int kIntervalUs = 33 * rtc::kNumMicrosecsPerMillisec;
+ static const int kDelayUs = 30 * rtc::kNumMicrosecsPerMillisec;
+ VideoFrame frame =
+ VideoFrame::Builder()
+ .set_video_frame_buffer(I420Buffer::Create(kWidth, kHeight))
+ .set_rotation(webrtc::kVideoRotation_0)
+ .set_timestamp_us(0)
+ .build();
+ uint32_t timestamp = 0;
+ for (size_t i = 0; i < 1000; ++i) {
+ frame.set_timestamp(timestamp);
+ int64_t capture_time_us = rtc::TimeMicros();
+ overuse_detector_->FrameCaptured(frame, capture_time_us);
+ // Encode and send first parts almost instantly.
+ clock_.AdvanceTime(TimeDelta::Millis(1));
+ overuse_detector_->FrameSent(timestamp, rtc::TimeMicros(), capture_time_us,
+ rtc::kNumMicrosecsPerMillisec);
+ // Encode heavier part, resulting in >85% usage total.
+ clock_.AdvanceTime(TimeDelta::Micros(kDelayUs) - TimeDelta::Millis(1));
+ overuse_detector_->FrameSent(timestamp, rtc::TimeMicros(), capture_time_us,
+ kDelayUs);
+ clock_.AdvanceTime(TimeDelta::Micros(kIntervalUs - kDelayUs));
+ timestamp += kIntervalUs * 90 / 1000;
+ overuse_detector_->CheckForOveruse(observer_);
+ }
+}
+
+TEST_F(OveruseFrameDetectorTest, RunOnTqNormalUsage) {
+ TaskQueueForTest queue("OveruseFrameDetectorTestQueue");
+
+ queue.SendTask([&] {
+ overuse_detector_->StartCheckForOveruse(queue.Get(), options_, observer_);
+ });
+
+ rtc::Event event;
+ // Expect NormalUsage(). When called, stop the `overuse_detector_` and then
+ // set `event` to end the test.
+ EXPECT_CALL(mock_observer_, AdaptUp())
+ .WillOnce(InvokeWithoutArgs([this, &event] {
+ overuse_detector_->StopCheckForOveruse();
+ event.Set();
+ }));
+
+ queue.PostTask([this] {
+ const int kDelayUs1 = 5 * rtc::kNumMicrosecsPerMillisec;
+ const int kDelayUs2 = 6 * rtc::kNumMicrosecsPerMillisec;
+ InsertAndSendFramesWithInterval(1300, kFrameIntervalUs, kWidth, kHeight,
+ kDelayUs1);
+ InsertAndSendFramesWithInterval(1, kFrameIntervalUs, kWidth, kHeight,
+ kDelayUs2);
+ });
+
+ EXPECT_TRUE(event.Wait(TimeDelta::Seconds(10)));
+}
+
+// TODO(crbug.com/webrtc/12846): investigate why the test fails on MAC bots.
+#if !defined(WEBRTC_MAC)
+TEST_F(OveruseFrameDetectorTest, MaxIntervalScalesWithFramerate) {
+ const int kCapturerMaxFrameRate = 30;
+ const int kEncodeMaxFrameRate = 20; // Maximum fps the encoder can sustain.
+
+ overuse_detector_->SetOptions(options_);
+ // Trigger overuse.
+ int64_t frame_interval_us = rtc::kNumMicrosecsPerSec / kCapturerMaxFrameRate;
+ // Processing time just below over use limit given kEncodeMaxFrameRate.
+ int64_t processing_time_us =
+ (98 * OveruseProcessingTimeLimitForFramerate(kEncodeMaxFrameRate)) / 100;
+ EXPECT_CALL(mock_observer_, AdaptDown()).Times(1);
+ for (int i = 0; i < options_.high_threshold_consecutive_count; ++i) {
+ InsertAndSendFramesWithInterval(1200, frame_interval_us, kWidth, kHeight,
+ processing_time_us);
+ overuse_detector_->CheckForOveruse(observer_);
+ }
+
+ // Simulate frame rate reduction and normal usage.
+ frame_interval_us = rtc::kNumMicrosecsPerSec / kEncodeMaxFrameRate;
+ overuse_detector_->OnTargetFramerateUpdated(kEncodeMaxFrameRate);
+ EXPECT_CALL(mock_observer_, AdaptDown()).Times(0);
+ for (int i = 0; i < options_.high_threshold_consecutive_count; ++i) {
+ InsertAndSendFramesWithInterval(1200, frame_interval_us, kWidth, kHeight,
+ processing_time_us);
+ overuse_detector_->CheckForOveruse(observer_);
+ }
+
+ // Reduce processing time to trigger underuse.
+ processing_time_us =
+ (98 * UnderuseProcessingTimeLimitForFramerate(kEncodeMaxFrameRate)) / 100;
+ EXPECT_CALL(mock_observer_, AdaptUp()).Times(1);
+ InsertAndSendFramesWithInterval(1200, frame_interval_us, kWidth, kHeight,
+ processing_time_us);
+ overuse_detector_->CheckForOveruse(observer_);
+}
+#endif
+
+TEST_F(OveruseFrameDetectorTest, RespectsMinFramerate) {
+ const int kMinFrameRate = 7; // Minimum fps allowed by current detector impl.
+ overuse_detector_->SetOptions(options_);
+ overuse_detector_->OnTargetFramerateUpdated(kMinFrameRate);
+
+ // Normal usage just at the limit.
+ int64_t frame_interval_us = rtc::kNumMicrosecsPerSec / kMinFrameRate;
+ // Processing time just below over use limit given kEncodeMaxFrameRate.
+ int64_t processing_time_us =
+ (98 * OveruseProcessingTimeLimitForFramerate(kMinFrameRate)) / 100;
+ EXPECT_CALL(mock_observer_, AdaptDown()).Times(0);
+ for (int i = 0; i < options_.high_threshold_consecutive_count; ++i) {
+ InsertAndSendFramesWithInterval(1200, frame_interval_us, kWidth, kHeight,
+ processing_time_us);
+ overuse_detector_->CheckForOveruse(observer_);
+ }
+
+ // Over the limit to overuse.
+ processing_time_us =
+ (102 * OveruseProcessingTimeLimitForFramerate(kMinFrameRate)) / 100;
+ EXPECT_CALL(mock_observer_, AdaptDown()).Times(1);
+ for (int i = 0; i < options_.high_threshold_consecutive_count; ++i) {
+ InsertAndSendFramesWithInterval(1200, frame_interval_us, kWidth, kHeight,
+ processing_time_us);
+ overuse_detector_->CheckForOveruse(observer_);
+ }
+
+ // Reduce input frame rate. Should still trigger overuse.
+ overuse_detector_->OnTargetFramerateUpdated(kMinFrameRate - 1);
+ EXPECT_CALL(mock_observer_, AdaptDown()).Times(1);
+ for (int i = 0; i < options_.high_threshold_consecutive_count; ++i) {
+ InsertAndSendFramesWithInterval(1200, frame_interval_us, kWidth, kHeight,
+ processing_time_us);
+ overuse_detector_->CheckForOveruse(observer_);
+ }
+}
+
+TEST_F(OveruseFrameDetectorTest, LimitsMaxFrameInterval) {
+ const int kMaxFrameRate = 20;
+ overuse_detector_->SetOptions(options_);
+ overuse_detector_->OnTargetFramerateUpdated(kMaxFrameRate);
+ int64_t frame_interval_us = rtc::kNumMicrosecsPerSec / kMaxFrameRate;
+ // Maximum frame interval allowed is 35% above ideal.
+ int64_t max_frame_interval_us = (135 * frame_interval_us) / 100;
+ // Maximum processing time, without triggering overuse, allowed with the above
+ // frame interval.
+ int64_t max_processing_time_us =
+ (max_frame_interval_us * options_.high_encode_usage_threshold_percent) /
+ 100;
+
+ // Processing time just below overuse limit given kMaxFrameRate.
+ int64_t processing_time_us = (98 * max_processing_time_us) / 100;
+ EXPECT_CALL(mock_observer_, AdaptDown()).Times(0);
+ for (int i = 0; i < options_.high_threshold_consecutive_count; ++i) {
+ InsertAndSendFramesWithInterval(1200, max_frame_interval_us, kWidth,
+ kHeight, processing_time_us);
+ overuse_detector_->CheckForOveruse(observer_);
+ }
+
+ // Go above limit, trigger overuse.
+ processing_time_us = (102 * max_processing_time_us) / 100;
+ EXPECT_CALL(mock_observer_, AdaptDown()).Times(1);
+ for (int i = 0; i < options_.high_threshold_consecutive_count; ++i) {
+ InsertAndSendFramesWithInterval(1200, max_frame_interval_us, kWidth,
+ kHeight, processing_time_us);
+ overuse_detector_->CheckForOveruse(observer_);
+ }
+
+ // Increase frame interval, should still trigger overuse.
+ max_frame_interval_us *= 2;
+ EXPECT_CALL(mock_observer_, AdaptDown()).Times(1);
+ for (int i = 0; i < options_.high_threshold_consecutive_count; ++i) {
+ InsertAndSendFramesWithInterval(1200, max_frame_interval_us, kWidth,
+ kHeight, processing_time_us);
+ overuse_detector_->CheckForOveruse(observer_);
+ }
+}
+
+// Models screencast, with irregular arrival of frames which are heavy
+// to encode.
+TEST_F(OveruseFrameDetectorTest, NoOveruseForLargeRandomFrameInterval) {
+ // TODO(bugs.webrtc.org/8504): When new estimator is relanded,
+ // behavior is improved in this scenario, with only AdaptUp events,
+ // and estimated load closer to the true average.
+
+ // EXPECT_CALL(mock_observer_, AdaptDown()).Times(0);
+ // EXPECT_CALL(mock_observer_, AdaptUp())
+ // .Times(::testing::AtLeast(1));
+ overuse_detector_->SetOptions(options_);
+
+ const int kNumFrames = 500;
+ const int kEncodeTimeUs = 100 * rtc::kNumMicrosecsPerMillisec;
+
+ const int kMinIntervalUs = 30 * rtc::kNumMicrosecsPerMillisec;
+ const int kMaxIntervalUs = 1000 * rtc::kNumMicrosecsPerMillisec;
+
+ const int kTargetFramerate = 5;
+
+ overuse_detector_->OnTargetFramerateUpdated(kTargetFramerate);
+
+ InsertAndSendFramesWithRandomInterval(kNumFrames, kMinIntervalUs,
+ kMaxIntervalUs, kWidth, kHeight,
+ kEncodeTimeUs);
+ // Average usage 19%. Check that estimate is in the right ball park.
+ // EXPECT_NEAR(UsagePercent(), 20, 10);
+ EXPECT_NEAR(UsagePercent(), 20, 35);
+}
+
+// Models screencast, with irregular arrival of frames, often
+// exceeding the timeout interval.
+TEST_F(OveruseFrameDetectorTest, NoOveruseForRandomFrameIntervalWithReset) {
+ // TODO(bugs.webrtc.org/8504): When new estimator is relanded,
+ // behavior is improved in this scenario, and we get AdaptUp events.
+ overuse_detector_->SetOptions(options_);
+ EXPECT_CALL(mock_observer_, AdaptDown()).Times(0);
+ // EXPECT_CALL(mock_observer_, AdaptUp())
+ // .Times(::testing::AtLeast(1));
+
+ const int kNumFrames = 500;
+ const int kEncodeTimeUs = 100 * rtc::kNumMicrosecsPerMillisec;
+
+ const int kMinIntervalUs = 30 * rtc::kNumMicrosecsPerMillisec;
+ const int kMaxIntervalUs = 3000 * rtc::kNumMicrosecsPerMillisec;
+
+ const int kTargetFramerate = 5;
+
+ overuse_detector_->OnTargetFramerateUpdated(kTargetFramerate);
+
+ InsertAndSendFramesWithRandomInterval(kNumFrames, kMinIntervalUs,
+ kMaxIntervalUs, kWidth, kHeight,
+ kEncodeTimeUs);
+
+ // Average usage 6.6%, but since the frame_timeout_interval_ms is
+ // only 1500 ms, we often reset the estimate to the initial value.
+ // Check that estimate is in the right ball park.
+ EXPECT_GE(UsagePercent(), 1);
+ EXPECT_LE(UsagePercent(), InitialUsage() + 5);
+}
+
+// Models simulcast, with multiple encoded frames for each input frame.
+// Load estimate should be based on the maximum encode time per input frame.
+TEST_F(OveruseFrameDetectorTest, NoOveruseForSimulcast) {
+ overuse_detector_->SetOptions(options_);
+ EXPECT_CALL(mock_observer_, AdaptDown()).Times(0);
+
+ constexpr int kNumFrames = 500;
+ constexpr int kEncodeTimesUs[] = {
+ 10 * rtc::kNumMicrosecsPerMillisec,
+ 8 * rtc::kNumMicrosecsPerMillisec,
+ 12 * rtc::kNumMicrosecsPerMillisec,
+ };
+ constexpr int kIntervalUs = 30 * rtc::kNumMicrosecsPerMillisec;
+
+ InsertAndSendSimulcastFramesWithInterval(kNumFrames, kIntervalUs, kWidth,
+ kHeight, kEncodeTimesUs);
+
+ // Average usage 40%. 12 ms / 30 ms.
+ EXPECT_GE(UsagePercent(), 35);
+ EXPECT_LE(UsagePercent(), 45);
+}
+
+// Tests using new cpu load estimator
+class OveruseFrameDetectorTest2 : public OveruseFrameDetectorTest {
+ protected:
+ void SetUp() override {
+ options_.filter_time_ms = 5 * rtc::kNumMillisecsPerSec;
+ OveruseFrameDetectorTest::SetUp();
+ }
+
+ void InsertAndSendFramesWithInterval(int num_frames,
+ int interval_us,
+ int width,
+ int height,
+ int delay_us) override {
+ VideoFrame frame =
+ VideoFrame::Builder()
+ .set_video_frame_buffer(I420Buffer::Create(width, height))
+ .set_rotation(webrtc::kVideoRotation_0)
+ .set_timestamp_us(0)
+ .build();
+ while (num_frames-- > 0) {
+ int64_t capture_time_us = rtc::TimeMicros();
+ overuse_detector_->FrameCaptured(frame, capture_time_us /* ignored */);
+ overuse_detector_->FrameSent(0 /* ignored timestamp */,
+ 0 /* ignored send_time_us */,
+ capture_time_us, delay_us);
+ clock_.AdvanceTime(TimeDelta::Micros(interval_us));
+ }
+ }
+
+ void InsertAndSendFramesWithRandomInterval(int num_frames,
+ int min_interval_us,
+ int max_interval_us,
+ int width,
+ int height,
+ int delay_us) override {
+ webrtc::Random random(17);
+
+ VideoFrame frame =
+ VideoFrame::Builder()
+ .set_video_frame_buffer(I420Buffer::Create(width, height))
+ .set_rotation(webrtc::kVideoRotation_0)
+ .set_timestamp_us(0)
+ .build();
+ for (int i = 0; i < num_frames; i++) {
+ int interval_us = random.Rand(min_interval_us, max_interval_us);
+ int64_t capture_time_us = rtc::TimeMicros();
+ overuse_detector_->FrameCaptured(frame, capture_time_us);
+ overuse_detector_->FrameSent(0 /* ignored timestamp */,
+ 0 /* ignored send_time_us */,
+ capture_time_us, delay_us);
+
+ overuse_detector_->CheckForOveruse(observer_);
+ clock_.AdvanceTime(TimeDelta::Micros(interval_us));
+ }
+ }
+
+ void ForceUpdate(int width, int height) override {
+ // This is mainly to check initial values and whether the overuse
+ // detector has been reset or not.
+ InsertAndSendFramesWithInterval(1, rtc::kNumMicrosecsPerSec, width, height,
+ kFrameIntervalUs);
+ }
+};
+
+// UsagePercent() > high_encode_usage_threshold_percent => overuse.
+// UsagePercent() < low_encode_usage_threshold_percent => underuse.
+TEST_F(OveruseFrameDetectorTest2, TriggerOveruse) {
+ // usage > high => overuse
+ overuse_detector_->SetOptions(options_);
+ EXPECT_CALL(mock_observer_, AdaptDown()).Times(1);
+ TriggerOveruse(options_.high_threshold_consecutive_count);
+}
+
+TEST_F(OveruseFrameDetectorTest2, OveruseAndRecover) {
+ // usage > high => overuse
+ overuse_detector_->SetOptions(options_);
+ EXPECT_CALL(mock_observer_, AdaptDown()).Times(1);
+ TriggerOveruse(options_.high_threshold_consecutive_count);
+ // usage < low => underuse
+ EXPECT_CALL(mock_observer_, AdaptUp()).Times(::testing::AtLeast(1));
+ TriggerUnderuse();
+}
+
+TEST_F(OveruseFrameDetectorTest2, DoubleOveruseAndRecover) {
+ overuse_detector_->SetOptions(options_);
+ EXPECT_CALL(mock_observer_, AdaptDown()).Times(2);
+ TriggerOveruse(options_.high_threshold_consecutive_count);
+ TriggerOveruse(options_.high_threshold_consecutive_count);
+ EXPECT_CALL(mock_observer_, AdaptUp()).Times(::testing::AtLeast(1));
+ TriggerUnderuse();
+}
+
+TEST_F(OveruseFrameDetectorTest2, TriggerUnderuseWithMinProcessCount) {
+ const int kProcessIntervalUs = 5 * rtc::kNumMicrosecsPerSec;
+ options_.min_process_count = 1;
+ CpuOveruseObserverImpl overuse_observer;
+ observer_ = nullptr;
+ overuse_detector_->SetOptions(options_);
+ InsertAndSendFramesWithInterval(1200, kFrameIntervalUs, kWidth, kHeight,
+ kProcessTimeUs);
+ overuse_detector_->CheckForOveruse(&overuse_observer);
+ EXPECT_EQ(0, overuse_observer.normaluse_);
+ clock_.AdvanceTime(TimeDelta::Micros(kProcessIntervalUs));
+ overuse_detector_->CheckForOveruse(&overuse_observer);
+ EXPECT_EQ(1, overuse_observer.normaluse_);
+}
+
+TEST_F(OveruseFrameDetectorTest2, ConstantOveruseGivesNoNormalUsage) {
+ overuse_detector_->SetOptions(options_);
+ EXPECT_CALL(mock_observer_, AdaptUp()).Times(0);
+ EXPECT_CALL(mock_observer_, AdaptDown()).Times(64);
+ for (size_t i = 0; i < 64; ++i) {
+ TriggerOveruse(options_.high_threshold_consecutive_count);
+ }
+}
+
+TEST_F(OveruseFrameDetectorTest2, ConsecutiveCountTriggersOveruse) {
+ EXPECT_CALL(mock_observer_, AdaptDown()).Times(1);
+ options_.high_threshold_consecutive_count = 2;
+ overuse_detector_->SetOptions(options_);
+ TriggerOveruse(2);
+}
+
+TEST_F(OveruseFrameDetectorTest2, IncorrectConsecutiveCountTriggersNoOveruse) {
+ EXPECT_CALL(mock_observer_, AdaptDown()).Times(0);
+ options_.high_threshold_consecutive_count = 2;
+ overuse_detector_->SetOptions(options_);
+ TriggerOveruse(1);
+}
+
+TEST_F(OveruseFrameDetectorTest2, ProcessingUsage) {
+ overuse_detector_->SetOptions(options_);
+ InsertAndSendFramesWithInterval(1000, kFrameIntervalUs, kWidth, kHeight,
+ kProcessTimeUs);
+ EXPECT_EQ(kProcessTimeUs * 100 / kFrameIntervalUs, UsagePercent());
+}
+
+TEST_F(OveruseFrameDetectorTest2, ResetAfterResolutionChange) {
+ overuse_detector_->SetOptions(options_);
+ ForceUpdate(kWidth, kHeight);
+ EXPECT_EQ(InitialUsage(), UsagePercent());
+ InsertAndSendFramesWithInterval(1000, kFrameIntervalUs, kWidth, kHeight,
+ kProcessTimeUs);
+ EXPECT_NE(InitialUsage(), UsagePercent());
+ // Verify reset (with new width/height).
+ ForceUpdate(kWidth, kHeight + 1);
+ EXPECT_EQ(InitialUsage(), UsagePercent());
+}
+
+TEST_F(OveruseFrameDetectorTest2, ResetAfterFrameTimeout) {
+ overuse_detector_->SetOptions(options_);
+ ForceUpdate(kWidth, kHeight);
+ EXPECT_EQ(InitialUsage(), UsagePercent());
+ InsertAndSendFramesWithInterval(1000, kFrameIntervalUs, kWidth, kHeight,
+ kProcessTimeUs);
+ EXPECT_NE(InitialUsage(), UsagePercent());
+ InsertAndSendFramesWithInterval(
+ 2, options_.frame_timeout_interval_ms * rtc::kNumMicrosecsPerMillisec,
+ kWidth, kHeight, kProcessTimeUs);
+ EXPECT_NE(InitialUsage(), UsagePercent());
+ // Verify reset.
+ InsertAndSendFramesWithInterval(
+ 2,
+ (options_.frame_timeout_interval_ms + 1) * rtc::kNumMicrosecsPerMillisec,
+ kWidth, kHeight, kProcessTimeUs);
+ ForceUpdate(kWidth, kHeight);
+ EXPECT_EQ(InitialUsage(), UsagePercent());
+}
+
+TEST_F(OveruseFrameDetectorTest2, ConvergesSlowly) {
+ overuse_detector_->SetOptions(options_);
+ InsertAndSendFramesWithInterval(1, kFrameIntervalUs, kWidth, kHeight,
+ kProcessTimeUs);
+ // No update for the first sample.
+ EXPECT_EQ(InitialUsage(), UsagePercent());
+
+ // Total time approximately 40 * 33ms = 1.3s, significantly less
+ // than the 5s time constant.
+ InsertAndSendFramesWithInterval(40, kFrameIntervalUs, kWidth, kHeight,
+ kProcessTimeUs);
+
+ // Should have started to approach correct load of 15%, but not very far.
+ EXPECT_LT(UsagePercent(), InitialUsage());
+ EXPECT_GT(UsagePercent(), (InitialUsage() * 3 + 8) / 4);
+
+ // Run for roughly 10s more, should now be closer.
+ InsertAndSendFramesWithInterval(300, kFrameIntervalUs, kWidth, kHeight,
+ kProcessTimeUs);
+ EXPECT_NEAR(UsagePercent(), 20, 5);
+}
+
+TEST_F(OveruseFrameDetectorTest2, InitialProcessingUsage) {
+ overuse_detector_->SetOptions(options_);
+ ForceUpdate(kWidth, kHeight);
+ EXPECT_EQ(InitialUsage(), UsagePercent());
+}
+
+TEST_F(OveruseFrameDetectorTest2, MeasuresMultipleConcurrentSamples) {
+ overuse_detector_->SetOptions(options_);
+ EXPECT_CALL(mock_observer_, AdaptDown()).Times(::testing::AtLeast(1));
+ static const int kIntervalUs = 33 * rtc::kNumMicrosecsPerMillisec;
+ static const size_t kNumFramesEncodingDelay = 3;
+ VideoFrame frame =
+ VideoFrame::Builder()
+ .set_video_frame_buffer(I420Buffer::Create(kWidth, kHeight))
+ .set_rotation(webrtc::kVideoRotation_0)
+ .set_timestamp_us(0)
+ .build();
+ for (size_t i = 0; i < 1000; ++i) {
+ // Unique timestamps.
+ frame.set_timestamp(static_cast<uint32_t>(i));
+ int64_t capture_time_us = rtc::TimeMicros();
+ overuse_detector_->FrameCaptured(frame, capture_time_us);
+ clock_.AdvanceTime(TimeDelta::Micros(kIntervalUs));
+ if (i > kNumFramesEncodingDelay) {
+ overuse_detector_->FrameSent(
+ static_cast<uint32_t>(i - kNumFramesEncodingDelay), rtc::TimeMicros(),
+ capture_time_us, kIntervalUs);
+ }
+ overuse_detector_->CheckForOveruse(observer_);
+ }
+}
+
+TEST_F(OveruseFrameDetectorTest2, UpdatesExistingSamples) {
+ // >85% encoding time should trigger overuse.
+ overuse_detector_->SetOptions(options_);
+ EXPECT_CALL(mock_observer_, AdaptDown()).Times(::testing::AtLeast(1));
+ static const int kIntervalUs = 33 * rtc::kNumMicrosecsPerMillisec;
+ static const int kDelayUs = 30 * rtc::kNumMicrosecsPerMillisec;
+ VideoFrame frame =
+ VideoFrame::Builder()
+ .set_video_frame_buffer(I420Buffer::Create(kWidth, kHeight))
+ .set_rotation(webrtc::kVideoRotation_0)
+ .set_timestamp_us(0)
+ .build();
+ uint32_t timestamp = 0;
+ for (size_t i = 0; i < 1000; ++i) {
+ frame.set_timestamp(timestamp);
+ int64_t capture_time_us = rtc::TimeMicros();
+ overuse_detector_->FrameCaptured(frame, capture_time_us);
+ // Encode and send first parts almost instantly.
+ clock_.AdvanceTime(TimeDelta::Millis(1));
+ overuse_detector_->FrameSent(timestamp, rtc::TimeMicros(), capture_time_us,
+ rtc::kNumMicrosecsPerMillisec);
+ // Encode heavier part, resulting in >85% usage total.
+ clock_.AdvanceTime(TimeDelta::Micros(kDelayUs) - TimeDelta::Millis(1));
+ overuse_detector_->FrameSent(timestamp, rtc::TimeMicros(), capture_time_us,
+ kDelayUs);
+ clock_.AdvanceTime(TimeDelta::Micros(kIntervalUs - kDelayUs));
+ timestamp += kIntervalUs * 90 / 1000;
+ overuse_detector_->CheckForOveruse(observer_);
+ }
+}
+
+TEST_F(OveruseFrameDetectorTest2, RunOnTqNormalUsage) {
+ TaskQueueForTest queue("OveruseFrameDetectorTestQueue");
+
+ queue.SendTask([&] {
+ overuse_detector_->StartCheckForOveruse(queue.Get(), options_, observer_);
+ });
+
+ rtc::Event event;
+ // Expect NormalUsage(). When called, stop the `overuse_detector_` and then
+ // set `event` to end the test.
+ EXPECT_CALL(mock_observer_, AdaptUp())
+ .WillOnce(InvokeWithoutArgs([this, &event] {
+ overuse_detector_->StopCheckForOveruse();
+ event.Set();
+ }));
+
+ queue.PostTask([this] {
+ const int kDelayUs1 = 5 * rtc::kNumMicrosecsPerMillisec;
+ const int kDelayUs2 = 6 * rtc::kNumMicrosecsPerMillisec;
+ InsertAndSendFramesWithInterval(1300, kFrameIntervalUs, kWidth, kHeight,
+ kDelayUs1);
+ InsertAndSendFramesWithInterval(1, kFrameIntervalUs, kWidth, kHeight,
+ kDelayUs2);
+ });
+
+ EXPECT_TRUE(event.Wait(TimeDelta::Seconds(10)));
+}
+
+// Models screencast, with irregular arrival of frames which are heavy
+// to encode.
+TEST_F(OveruseFrameDetectorTest2, NoOveruseForLargeRandomFrameInterval) {
+ overuse_detector_->SetOptions(options_);
+ EXPECT_CALL(mock_observer_, AdaptDown()).Times(0);
+ EXPECT_CALL(mock_observer_, AdaptUp()).Times(::testing::AtLeast(1));
+
+ const int kNumFrames = 500;
+ const int kEncodeTimeUs = 100 * rtc::kNumMicrosecsPerMillisec;
+
+ const int kMinIntervalUs = 30 * rtc::kNumMicrosecsPerMillisec;
+ const int kMaxIntervalUs = 1000 * rtc::kNumMicrosecsPerMillisec;
+
+ InsertAndSendFramesWithRandomInterval(kNumFrames, kMinIntervalUs,
+ kMaxIntervalUs, kWidth, kHeight,
+ kEncodeTimeUs);
+ // Average usage 19%. Check that estimate is in the right ball park.
+ EXPECT_NEAR(UsagePercent(), 20, 10);
+}
+
+// Models screencast, with irregular arrival of frames, often
+// exceeding the timeout interval.
+TEST_F(OveruseFrameDetectorTest2, NoOveruseForRandomFrameIntervalWithReset) {
+ overuse_detector_->SetOptions(options_);
+ EXPECT_CALL(mock_observer_, AdaptDown()).Times(0);
+ EXPECT_CALL(mock_observer_, AdaptUp()).Times(::testing::AtLeast(1));
+
+ const int kNumFrames = 500;
+ const int kEncodeTimeUs = 100 * rtc::kNumMicrosecsPerMillisec;
+
+ const int kMinIntervalUs = 30 * rtc::kNumMicrosecsPerMillisec;
+ const int kMaxIntervalUs = 3000 * rtc::kNumMicrosecsPerMillisec;
+
+ InsertAndSendFramesWithRandomInterval(kNumFrames, kMinIntervalUs,
+ kMaxIntervalUs, kWidth, kHeight,
+ kEncodeTimeUs);
+
+ // Average usage 6.6%, but since the frame_timeout_interval_ms is
+ // only 1500 ms, we often reset the estimate to the initial value.
+ // Check that estimate is in the right ball park.
+ EXPECT_GE(UsagePercent(), 1);
+ EXPECT_LE(UsagePercent(), InitialUsage() + 5);
+}
+
+TEST_F(OveruseFrameDetectorTest2, ToleratesOutOfOrderFrames) {
+ overuse_detector_->SetOptions(options_);
+ // Represents a cpu utilization close to 100%. First input frame results in
+ // three encoded frames, and the last of those isn't finished until after the
+ // first encoded frame corresponding to the next input frame.
+ const int kEncodeTimeUs = 30 * rtc::kNumMicrosecsPerMillisec;
+ const int kCaptureTimesMs[] = {33, 33, 66, 33};
+
+ for (int capture_time_ms : kCaptureTimesMs) {
+ overuse_detector_->FrameSent(
+ 0, 0, capture_time_ms * rtc::kNumMicrosecsPerMillisec, kEncodeTimeUs);
+ }
+ EXPECT_GE(UsagePercent(), InitialUsage());
+}
+
+// Models simulcast, with multiple encoded frames for each input frame.
+// Load estimate should be based on the maximum encode time per input frame.
+TEST_F(OveruseFrameDetectorTest2, NoOveruseForSimulcast) {
+ overuse_detector_->SetOptions(options_);
+ EXPECT_CALL(mock_observer_, AdaptDown()).Times(0);
+
+ constexpr int kNumFrames = 500;
+ constexpr int kEncodeTimesUs[] = {
+ 10 * rtc::kNumMicrosecsPerMillisec,
+ 8 * rtc::kNumMicrosecsPerMillisec,
+ 12 * rtc::kNumMicrosecsPerMillisec,
+ };
+ constexpr int kIntervalUs = 30 * rtc::kNumMicrosecsPerMillisec;
+
+ InsertAndSendSimulcastFramesWithInterval(kNumFrames, kIntervalUs, kWidth,
+ kHeight, kEncodeTimesUs);
+
+ // Average usage 40%. 12 ms / 30 ms.
+ EXPECT_GE(UsagePercent(), 35);
+ EXPECT_LE(UsagePercent(), 45);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/video/adaptation/pixel_limit_resource.cc b/third_party/libwebrtc/video/adaptation/pixel_limit_resource.cc
new file mode 100644
index 0000000000..872e169879
--- /dev/null
+++ b/third_party/libwebrtc/video/adaptation/pixel_limit_resource.cc
@@ -0,0 +1,101 @@
+/*
+ * Copyright 2020 The WebRTC Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video/adaptation/pixel_limit_resource.h"
+
+#include "api/sequence_checker.h"
+#include "api/units/time_delta.h"
+#include "call/adaptation/video_stream_adapter.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+namespace {
+
+constexpr TimeDelta kResourceUsageCheckIntervalMs = TimeDelta::Seconds(5);
+
+} // namespace
+
+// static
+rtc::scoped_refptr<PixelLimitResource> PixelLimitResource::Create(
+ TaskQueueBase* task_queue,
+ VideoStreamInputStateProvider* input_state_provider) {
+ return rtc::make_ref_counted<PixelLimitResource>(task_queue,
+ input_state_provider);
+}
+
+PixelLimitResource::PixelLimitResource(
+ TaskQueueBase* task_queue,
+ VideoStreamInputStateProvider* input_state_provider)
+ : task_queue_(task_queue),
+ input_state_provider_(input_state_provider),
+ max_pixels_(absl::nullopt) {
+ RTC_DCHECK(task_queue_);
+ RTC_DCHECK(input_state_provider_);
+}
+
+PixelLimitResource::~PixelLimitResource() {
+ RTC_DCHECK(!listener_);
+ RTC_DCHECK(!repeating_task_.Running());
+}
+
+void PixelLimitResource::SetMaxPixels(int max_pixels) {
+ RTC_DCHECK_RUN_ON(task_queue_);
+ max_pixels_ = max_pixels;
+}
+
+void PixelLimitResource::SetResourceListener(ResourceListener* listener) {
+ RTC_DCHECK_RUN_ON(task_queue_);
+ listener_ = listener;
+ if (listener_) {
+ repeating_task_.Stop();
+ repeating_task_ = RepeatingTaskHandle::Start(task_queue_, [&] {
+ RTC_DCHECK_RUN_ON(task_queue_);
+ if (!listener_) {
+ // We don't have a listener so resource adaptation must not be running,
+ // try again later.
+ return kResourceUsageCheckIntervalMs;
+ }
+ if (!max_pixels_.has_value()) {
+ // No pixel limit configured yet, try again later.
+ return kResourceUsageCheckIntervalMs;
+ }
+ absl::optional<int> frame_size_pixels =
+ input_state_provider_->InputState().frame_size_pixels();
+ if (!frame_size_pixels.has_value()) {
+ // We haven't observed a frame yet so we don't know if it's going to be
+ // too big or too small, try again later.
+ return kResourceUsageCheckIntervalMs;
+ }
+ int current_pixels = frame_size_pixels.value();
+ int target_pixel_upper_bounds = max_pixels_.value();
+ // To avoid toggling, we allow any resolutions between
+ // `target_pixel_upper_bounds` and video_stream_adapter.h's
+ // GetLowerResolutionThan(). This is the pixels we end up if we adapt down
+ // from `target_pixel_upper_bounds`.
+ int target_pixels_lower_bounds =
+ GetLowerResolutionThan(target_pixel_upper_bounds);
+ if (current_pixels > target_pixel_upper_bounds) {
+ listener_->OnResourceUsageStateMeasured(
+ rtc::scoped_refptr<Resource>(this), ResourceUsageState::kOveruse);
+ } else if (current_pixels < target_pixels_lower_bounds) {
+ listener_->OnResourceUsageStateMeasured(
+ rtc::scoped_refptr<Resource>(this), ResourceUsageState::kUnderuse);
+ }
+ return kResourceUsageCheckIntervalMs;
+ });
+ } else {
+ repeating_task_.Stop();
+ }
+ // The task must be running if we have a listener.
+ RTC_DCHECK(repeating_task_.Running() || !listener_);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/video/adaptation/pixel_limit_resource.h b/third_party/libwebrtc/video/adaptation/pixel_limit_resource.h
new file mode 100644
index 0000000000..b42f92434f
--- /dev/null
+++ b/third_party/libwebrtc/video/adaptation/pixel_limit_resource.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright 2020 The WebRTC Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VIDEO_ADAPTATION_PIXEL_LIMIT_RESOURCE_H_
+#define VIDEO_ADAPTATION_PIXEL_LIMIT_RESOURCE_H_
+
+#include <string>
+
+#include "absl/types/optional.h"
+#include "api/adaptation/resource.h"
+#include "api/scoped_refptr.h"
+#include "call/adaptation/video_stream_input_state_provider.h"
+#include "rtc_base/task_utils/repeating_task.h"
+#include "rtc_base/thread_annotations.h"
+
+namespace webrtc {
+
+// An adaptation resource designed to be used in the TestBed. Used to simulate
+// being CPU limited.
+//
+// Periodically reports "overuse" or "underuse" (every 5 seconds) until the
+// stream is within the bounds specified in terms of a maximum resolution and
+// one resolution step lower than that (this avoids toggling when this is the
+// only resource in play). When multiple resources come in to play some amount
+// of toggling is still possible in edge cases but that is OK for testing
+// purposes.
+class PixelLimitResource : public Resource {
+ public:
+ static rtc::scoped_refptr<PixelLimitResource> Create(
+ TaskQueueBase* task_queue,
+ VideoStreamInputStateProvider* input_state_provider);
+
+ PixelLimitResource(TaskQueueBase* task_queue,
+ VideoStreamInputStateProvider* input_state_provider);
+ ~PixelLimitResource() override;
+
+ void SetMaxPixels(int max_pixels);
+
+ // Resource implementation.
+ std::string Name() const override { return "PixelLimitResource"; }
+ void SetResourceListener(ResourceListener* listener) override;
+
+ private:
+ TaskQueueBase* const task_queue_;
+ VideoStreamInputStateProvider* const input_state_provider_;
+ absl::optional<int> max_pixels_ RTC_GUARDED_BY(task_queue_);
+ webrtc::ResourceListener* listener_ RTC_GUARDED_BY(task_queue_);
+ RepeatingTaskHandle repeating_task_ RTC_GUARDED_BY(task_queue_);
+};
+
+} // namespace webrtc
+
+#endif // VIDEO_ADAPTATION_PIXEL_LIMIT_RESOURCE_H_
diff --git a/third_party/libwebrtc/video/adaptation/pixel_limit_resource_unittest.cc b/third_party/libwebrtc/video/adaptation/pixel_limit_resource_unittest.cc
new file mode 100644
index 0000000000..28eb19b1aa
--- /dev/null
+++ b/third_party/libwebrtc/video/adaptation/pixel_limit_resource_unittest.cc
@@ -0,0 +1,147 @@
+/*
+ * Copyright 2020 The WebRTC Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video/adaptation/pixel_limit_resource.h"
+
+#include <memory>
+#include <utility>
+
+#include "absl/functional/any_invocable.h"
+#include "api/units/timestamp.h"
+#include "call/adaptation/test/fake_video_stream_input_state_provider.h"
+#include "call/adaptation/test/mock_resource_listener.h"
+#include "call/adaptation/video_stream_adapter.h"
+#include "rtc_base/task_queue_for_test.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+#include "test/time_controller/simulated_time_controller.h"
+
+using testing::_;
+
+namespace webrtc {
+
+namespace {
+
+constexpr TimeDelta kResourceUsageCheckIntervalMs = TimeDelta::Seconds(5);
+
+} // namespace
+
+class PixelLimitResourceTest : public ::testing::Test {
+ public:
+ PixelLimitResourceTest()
+ : time_controller_(Timestamp::Micros(1234)),
+ task_queue_(time_controller_.GetTaskQueueFactory()->CreateTaskQueue(
+ "TestQueue",
+ TaskQueueFactory::Priority::NORMAL)),
+ input_state_provider_() {}
+
+ void SetCurrentPixels(int current_pixels) {
+ input_state_provider_.SetInputState(current_pixels, 30, current_pixels);
+ }
+
+ void RunTaskOnTaskQueue(absl::AnyInvocable<void() &&> task) {
+ task_queue_->PostTask(std::move(task));
+ time_controller_.AdvanceTime(TimeDelta::Zero());
+ }
+
+ protected:
+ // Posted tasks, including repeated tasks, are executed when simulated time is
+ // advanced by time_controller_.AdvanceTime().
+ GlobalSimulatedTimeController time_controller_;
+ std::unique_ptr<TaskQueueBase, TaskQueueDeleter> task_queue_;
+ FakeVideoStreamInputStateProvider input_state_provider_;
+};
+
+TEST_F(PixelLimitResourceTest, ResourceIsSilentByDefault) {
+ // Because our mock is strick, the test would fail if
+ // OnResourceUsageStateMeasured() is invoked.
+ testing::StrictMock<MockResourceListener> resource_listener;
+ RunTaskOnTaskQueue([&]() {
+ rtc::scoped_refptr<PixelLimitResource> pixel_limit_resource =
+ PixelLimitResource::Create(task_queue_.get(), &input_state_provider_);
+ pixel_limit_resource->SetResourceListener(&resource_listener);
+ // Set a current pixel count.
+ SetCurrentPixels(1280 * 720);
+ // Advance a significant amount of time.
+ time_controller_.AdvanceTime(kResourceUsageCheckIntervalMs * 10);
+ pixel_limit_resource->SetResourceListener(nullptr);
+ });
+}
+
+TEST_F(PixelLimitResourceTest,
+ OveruseIsReportedWhileCurrentPixelsIsGreaterThanMaxPixels) {
+ constexpr int kMaxPixels = 640 * 480;
+ testing::StrictMock<MockResourceListener> resource_listener;
+ RunTaskOnTaskQueue([&]() {
+ rtc::scoped_refptr<PixelLimitResource> pixel_limit_resource =
+ PixelLimitResource::Create(task_queue_.get(), &input_state_provider_);
+ pixel_limit_resource->SetResourceListener(&resource_listener);
+ time_controller_.AdvanceTime(TimeDelta::Zero());
+
+ pixel_limit_resource->SetMaxPixels(kMaxPixels);
+ SetCurrentPixels(kMaxPixels + 1);
+ EXPECT_CALL(resource_listener,
+ OnResourceUsageStateMeasured(_, ResourceUsageState::kOveruse))
+ .Times(1);
+ time_controller_.AdvanceTime(kResourceUsageCheckIntervalMs);
+
+ // As long as the current pixels has not updated, the overuse signal is
+ // repeated at a fixed interval.
+ EXPECT_CALL(resource_listener,
+ OnResourceUsageStateMeasured(_, ResourceUsageState::kOveruse))
+ .Times(3);
+ time_controller_.AdvanceTime(kResourceUsageCheckIntervalMs * 3);
+
+ // When the overuse signal has resulted in a lower resolution, the overuse
+ // signals stops.
+ SetCurrentPixels(kMaxPixels);
+ EXPECT_CALL(resource_listener, OnResourceUsageStateMeasured(_, _)).Times(0);
+ time_controller_.AdvanceTime(kResourceUsageCheckIntervalMs * 3);
+
+ pixel_limit_resource->SetResourceListener(nullptr);
+ });
+}
+
+TEST_F(PixelLimitResourceTest,
+ UnderuseIsReportedWhileCurrentPixelsIsLessThanMinPixels) {
+ constexpr int kMaxPixels = 640 * 480;
+ const int kMinPixels = GetLowerResolutionThan(kMaxPixels);
+ testing::StrictMock<MockResourceListener> resource_listener;
+ RunTaskOnTaskQueue([&]() {
+ rtc::scoped_refptr<PixelLimitResource> pixel_limit_resource =
+ PixelLimitResource::Create(task_queue_.get(), &input_state_provider_);
+ pixel_limit_resource->SetResourceListener(&resource_listener);
+ time_controller_.AdvanceTime(TimeDelta::Zero());
+
+ pixel_limit_resource->SetMaxPixels(kMaxPixels);
+ SetCurrentPixels(kMinPixels - 1);
+ EXPECT_CALL(resource_listener,
+ OnResourceUsageStateMeasured(_, ResourceUsageState::kUnderuse))
+ .Times(1);
+ time_controller_.AdvanceTime(kResourceUsageCheckIntervalMs);
+
+ // As long as the current pixels has not updated, the underuse signal is
+ // repeated at a fixed interval.
+ EXPECT_CALL(resource_listener,
+ OnResourceUsageStateMeasured(_, ResourceUsageState::kUnderuse))
+ .Times(3);
+ time_controller_.AdvanceTime(kResourceUsageCheckIntervalMs * 3);
+
+ // When the underuse signal has resulted in a higher resolution, the
+ // underuse signals stops.
+ SetCurrentPixels(kMinPixels);
+ EXPECT_CALL(resource_listener, OnResourceUsageStateMeasured(_, _)).Times(0);
+ time_controller_.AdvanceTime(kResourceUsageCheckIntervalMs * 3);
+
+ pixel_limit_resource->SetResourceListener(nullptr);
+ });
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/video/adaptation/quality_rampup_experiment_helper.cc b/third_party/libwebrtc/video/adaptation/quality_rampup_experiment_helper.cc
new file mode 100644
index 0000000000..adcad40c03
--- /dev/null
+++ b/third_party/libwebrtc/video/adaptation/quality_rampup_experiment_helper.cc
@@ -0,0 +1,88 @@
+/*
+ * Copyright 2020 The WebRTC Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video/adaptation/quality_rampup_experiment_helper.h"
+
+#include <memory>
+#include <utility>
+
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+
+QualityRampUpExperimentHelper::QualityRampUpExperimentHelper(
+ QualityRampUpExperimentListener* experiment_listener,
+ Clock* clock,
+ QualityRampupExperiment experiment)
+ : experiment_listener_(experiment_listener),
+ clock_(clock),
+ quality_rampup_experiment_(std::move(experiment)),
+ cpu_adapted_(false),
+ qp_resolution_adaptations_(0) {
+ RTC_DCHECK(experiment_listener_);
+ RTC_DCHECK(clock_);
+}
+
+std::unique_ptr<QualityRampUpExperimentHelper>
+QualityRampUpExperimentHelper::CreateIfEnabled(
+ QualityRampUpExperimentListener* experiment_listener,
+ Clock* clock) {
+ QualityRampupExperiment experiment = QualityRampupExperiment::ParseSettings();
+ if (experiment.Enabled()) {
+ return std::unique_ptr<QualityRampUpExperimentHelper>(
+ new QualityRampUpExperimentHelper(experiment_listener, clock,
+ experiment));
+ }
+ return nullptr;
+}
+
+void QualityRampUpExperimentHelper::ConfigureQualityRampupExperiment(
+ bool reset,
+ absl::optional<uint32_t> pixels,
+ absl::optional<DataRate> max_bitrate) {
+ if (reset)
+ quality_rampup_experiment_.Reset();
+ if (pixels && max_bitrate)
+ quality_rampup_experiment_.SetMaxBitrate(*pixels, max_bitrate->kbps());
+}
+
+void QualityRampUpExperimentHelper::PerformQualityRampupExperiment(
+ rtc::scoped_refptr<QualityScalerResource> quality_scaler_resource,
+ DataRate bandwidth,
+ DataRate encoder_target_bitrate,
+ absl::optional<DataRate> max_bitrate) {
+ if (!quality_scaler_resource->is_started() || !max_bitrate)
+ return;
+
+ int64_t now_ms = clock_->TimeInMilliseconds();
+
+ bool try_quality_rampup = false;
+ if (quality_rampup_experiment_.BwHigh(now_ms, bandwidth.kbps())) {
+ // Verify that encoder is at max bitrate and the QP is low.
+ if (encoder_target_bitrate == *max_bitrate &&
+ quality_scaler_resource->QpFastFilterLow()) {
+ try_quality_rampup = true;
+ }
+ }
+ if (try_quality_rampup && qp_resolution_adaptations_ > 0 && !cpu_adapted_) {
+ experiment_listener_->OnQualityRampUp();
+ }
+}
+
+void QualityRampUpExperimentHelper::cpu_adapted(bool cpu_adapted) {
+ cpu_adapted_ = cpu_adapted;
+}
+
+void QualityRampUpExperimentHelper::qp_resolution_adaptations(
+ int qp_resolution_adaptations) {
+ qp_resolution_adaptations_ = qp_resolution_adaptations;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/video/adaptation/quality_rampup_experiment_helper.h b/third_party/libwebrtc/video/adaptation/quality_rampup_experiment_helper.h
new file mode 100644
index 0000000000..4fe1f24876
--- /dev/null
+++ b/third_party/libwebrtc/video/adaptation/quality_rampup_experiment_helper.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright 2020 The WebRTC Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VIDEO_ADAPTATION_QUALITY_RAMPUP_EXPERIMENT_HELPER_H_
+#define VIDEO_ADAPTATION_QUALITY_RAMPUP_EXPERIMENT_HELPER_H_
+
+#include <memory>
+
+#include "api/scoped_refptr.h"
+#include "api/units/data_rate.h"
+#include "rtc_base/experiments/quality_rampup_experiment.h"
+#include "system_wrappers/include/clock.h"
+#include "video/adaptation/quality_scaler_resource.h"
+
+namespace webrtc {
+
+class QualityRampUpExperimentListener {
+ public:
+ virtual ~QualityRampUpExperimentListener() = default;
+ virtual void OnQualityRampUp() = 0;
+};
+
+// Helper class for orchestrating the WebRTC-Video-QualityRampupSettings
+// experiment.
+class QualityRampUpExperimentHelper {
+ public:
+ // Returns a QualityRampUpExperimentHelper if the experiment is enabled,
+ // an nullptr otherwise.
+ static std::unique_ptr<QualityRampUpExperimentHelper> CreateIfEnabled(
+ QualityRampUpExperimentListener* experiment_listener,
+ Clock* clock);
+
+ QualityRampUpExperimentHelper(const QualityRampUpExperimentHelper&) = delete;
+ QualityRampUpExperimentHelper& operator=(
+ const QualityRampUpExperimentHelper&) = delete;
+
+ void cpu_adapted(bool cpu_adapted);
+ void qp_resolution_adaptations(int qp_adaptations);
+
+ void ConfigureQualityRampupExperiment(bool reset,
+ absl::optional<uint32_t> pixels,
+ absl::optional<DataRate> max_bitrate);
+
+ void PerformQualityRampupExperiment(
+ rtc::scoped_refptr<QualityScalerResource> quality_scaler_resource,
+ DataRate bandwidth,
+ DataRate encoder_target_bitrate,
+ absl::optional<DataRate> max_bitrate);
+
+ private:
+ QualityRampUpExperimentHelper(
+ QualityRampUpExperimentListener* experiment_listener,
+ Clock* clock,
+ QualityRampupExperiment experiment);
+ QualityRampUpExperimentListener* const experiment_listener_;
+ Clock* clock_;
+ QualityRampupExperiment quality_rampup_experiment_;
+ bool cpu_adapted_;
+ int qp_resolution_adaptations_;
+};
+
+} // namespace webrtc
+
+#endif // VIDEO_ADAPTATION_QUALITY_RAMPUP_EXPERIMENT_HELPER_H_
diff --git a/third_party/libwebrtc/video/adaptation/quality_scaler_resource.cc b/third_party/libwebrtc/video/adaptation/quality_scaler_resource.cc
new file mode 100644
index 0000000000..68d56fe29e
--- /dev/null
+++ b/third_party/libwebrtc/video/adaptation/quality_scaler_resource.cc
@@ -0,0 +1,99 @@
+/*
+ * Copyright 2020 The WebRTC Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video/adaptation/quality_scaler_resource.h"
+
+#include <utility>
+
+#include "rtc_base/checks.h"
+#include "rtc_base/experiments/balanced_degradation_settings.h"
+#include "rtc_base/time_utils.h"
+
+namespace webrtc {
+
+// static
+rtc::scoped_refptr<QualityScalerResource> QualityScalerResource::Create() {
+ return rtc::make_ref_counted<QualityScalerResource>();
+}
+
+QualityScalerResource::QualityScalerResource()
+ : VideoStreamEncoderResource("QualityScalerResource"),
+ quality_scaler_(nullptr) {}
+
+QualityScalerResource::~QualityScalerResource() {
+ RTC_DCHECK(!quality_scaler_);
+}
+
+bool QualityScalerResource::is_started() const {
+ RTC_DCHECK_RUN_ON(encoder_queue());
+ return quality_scaler_.get();
+}
+
+void QualityScalerResource::StartCheckForOveruse(
+ VideoEncoder::QpThresholds qp_thresholds) {
+ RTC_DCHECK_RUN_ON(encoder_queue());
+ RTC_DCHECK(!is_started());
+ quality_scaler_ =
+ std::make_unique<QualityScaler>(this, std::move(qp_thresholds));
+}
+
+void QualityScalerResource::StopCheckForOveruse() {
+ RTC_DCHECK_RUN_ON(encoder_queue());
+ RTC_DCHECK(is_started());
+ // Ensure we have no pending callbacks. This makes it safe to destroy the
+ // QualityScaler and even task queues with tasks in-flight.
+ quality_scaler_.reset();
+}
+
+void QualityScalerResource::SetQpThresholds(
+ VideoEncoder::QpThresholds qp_thresholds) {
+ RTC_DCHECK_RUN_ON(encoder_queue());
+ RTC_DCHECK(is_started());
+ quality_scaler_->SetQpThresholds(std::move(qp_thresholds));
+}
+
+bool QualityScalerResource::QpFastFilterLow() {
+ RTC_DCHECK_RUN_ON(encoder_queue());
+ RTC_DCHECK(is_started());
+ return quality_scaler_->QpFastFilterLow();
+}
+
+void QualityScalerResource::OnEncodeCompleted(const EncodedImage& encoded_image,
+ int64_t time_sent_in_us) {
+ RTC_DCHECK_RUN_ON(encoder_queue());
+ if (quality_scaler_ && encoded_image.qp_ >= 0) {
+ quality_scaler_->ReportQp(encoded_image.qp_, time_sent_in_us);
+ }
+}
+
+void QualityScalerResource::OnFrameDropped(
+ EncodedImageCallback::DropReason reason) {
+ RTC_DCHECK_RUN_ON(encoder_queue());
+ if (!quality_scaler_)
+ return;
+ switch (reason) {
+ case EncodedImageCallback::DropReason::kDroppedByMediaOptimizations:
+ quality_scaler_->ReportDroppedFrameByMediaOpt();
+ break;
+ case EncodedImageCallback::DropReason::kDroppedByEncoder:
+ quality_scaler_->ReportDroppedFrameByEncoder();
+ break;
+ }
+}
+
+void QualityScalerResource::OnReportQpUsageHigh() {
+ OnResourceUsageStateMeasured(ResourceUsageState::kOveruse);
+}
+
+void QualityScalerResource::OnReportQpUsageLow() {
+ OnResourceUsageStateMeasured(ResourceUsageState::kUnderuse);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/video/adaptation/quality_scaler_resource.h b/third_party/libwebrtc/video/adaptation/quality_scaler_resource.h
new file mode 100644
index 0000000000..cbb6d3d06f
--- /dev/null
+++ b/third_party/libwebrtc/video/adaptation/quality_scaler_resource.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright 2020 The WebRTC Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VIDEO_ADAPTATION_QUALITY_SCALER_RESOURCE_H_
+#define VIDEO_ADAPTATION_QUALITY_SCALER_RESOURCE_H_
+
+#include <memory>
+#include <queue>
+#include <string>
+
+#include "absl/types/optional.h"
+#include "api/scoped_refptr.h"
+#include "api/video/video_adaptation_reason.h"
+#include "api/video_codecs/video_encoder.h"
+#include "call/adaptation/degradation_preference_provider.h"
+#include "call/adaptation/resource_adaptation_processor_interface.h"
+#include "modules/video_coding/utility/quality_scaler.h"
+#include "video/adaptation/video_stream_encoder_resource.h"
+
+namespace webrtc {
+
+// Handles interaction with the QualityScaler.
+class QualityScalerResource : public VideoStreamEncoderResource,
+ public QualityScalerQpUsageHandlerInterface {
+ public:
+ static rtc::scoped_refptr<QualityScalerResource> Create();
+
+ QualityScalerResource();
+ ~QualityScalerResource() override;
+
+ bool is_started() const;
+
+ void StartCheckForOveruse(VideoEncoder::QpThresholds qp_thresholds);
+ void StopCheckForOveruse();
+ void SetQpThresholds(VideoEncoder::QpThresholds qp_thresholds);
+ bool QpFastFilterLow();
+ void OnEncodeCompleted(const EncodedImage& encoded_image,
+ int64_t time_sent_in_us);
+ void OnFrameDropped(EncodedImageCallback::DropReason reason);
+
+ // QualityScalerQpUsageHandlerInterface implementation.
+ void OnReportQpUsageHigh() override;
+ void OnReportQpUsageLow() override;
+
+ private:
+ std::unique_ptr<QualityScaler> quality_scaler_
+ RTC_GUARDED_BY(encoder_queue());
+};
+
+} // namespace webrtc
+
+#endif // VIDEO_ADAPTATION_QUALITY_SCALER_RESOURCE_H_
diff --git a/third_party/libwebrtc/video/adaptation/quality_scaler_resource_unittest.cc b/third_party/libwebrtc/video/adaptation/quality_scaler_resource_unittest.cc
new file mode 100644
index 0000000000..70d297588f
--- /dev/null
+++ b/third_party/libwebrtc/video/adaptation/quality_scaler_resource_unittest.cc
@@ -0,0 +1,76 @@
+/*
+ * Copyright 2020 The WebRTC Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video/adaptation/quality_scaler_resource.h"
+
+#include <memory>
+
+#include "absl/types/optional.h"
+#include "api/task_queue/task_queue_base.h"
+#include "api/video_codecs/video_encoder.h"
+#include "call/adaptation/test/mock_resource_listener.h"
+#include "rtc_base/thread.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+using testing::_;
+using testing::Eq;
+using testing::StrictMock;
+
+namespace {
+
+class FakeDegradationPreferenceProvider : public DegradationPreferenceProvider {
+ public:
+ ~FakeDegradationPreferenceProvider() override = default;
+
+ DegradationPreference degradation_preference() const override {
+ return DegradationPreference::MAINTAIN_FRAMERATE;
+ }
+};
+
+} // namespace
+
+class QualityScalerResourceTest : public ::testing::Test {
+ public:
+ QualityScalerResourceTest()
+ : quality_scaler_resource_(QualityScalerResource::Create()) {
+ quality_scaler_resource_->RegisterEncoderTaskQueue(
+ TaskQueueBase::Current());
+ quality_scaler_resource_->SetResourceListener(&fake_resource_listener_);
+ }
+
+ ~QualityScalerResourceTest() override {
+ quality_scaler_resource_->SetResourceListener(nullptr);
+ }
+
+ protected:
+ rtc::AutoThread main_thread_;
+ StrictMock<MockResourceListener> fake_resource_listener_;
+ FakeDegradationPreferenceProvider degradation_preference_provider_;
+ rtc::scoped_refptr<QualityScalerResource> quality_scaler_resource_;
+};
+
+TEST_F(QualityScalerResourceTest, ReportQpHigh) {
+ EXPECT_CALL(fake_resource_listener_,
+ OnResourceUsageStateMeasured(Eq(quality_scaler_resource_),
+ Eq(ResourceUsageState::kOveruse)));
+ quality_scaler_resource_->OnReportQpUsageHigh();
+}
+
+TEST_F(QualityScalerResourceTest, ReportQpLow) {
+ EXPECT_CALL(fake_resource_listener_,
+ OnResourceUsageStateMeasured(Eq(quality_scaler_resource_),
+ Eq(ResourceUsageState::kUnderuse)));
+ quality_scaler_resource_->OnReportQpUsageLow();
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/video/adaptation/video_adaptation_gn/moz.build b/third_party/libwebrtc/video/adaptation/video_adaptation_gn/moz.build
new file mode 100644
index 0000000000..e0f103cc6c
--- /dev/null
+++ b/third_party/libwebrtc/video/adaptation/video_adaptation_gn/moz.build
@@ -0,0 +1,241 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+DEFINES["WEBRTC_STRICT_FIELD_TRIALS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "!/third_party/libwebrtc/gen",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/video/adaptation/balanced_constraint.cc",
+ "/third_party/libwebrtc/video/adaptation/bandwidth_quality_scaler_resource.cc",
+ "/third_party/libwebrtc/video/adaptation/bitrate_constraint.cc",
+ "/third_party/libwebrtc/video/adaptation/encode_usage_resource.cc",
+ "/third_party/libwebrtc/video/adaptation/overuse_frame_detector.cc",
+ "/third_party/libwebrtc/video/adaptation/pixel_limit_resource.cc",
+ "/third_party/libwebrtc/video/adaptation/quality_rampup_experiment_helper.cc",
+ "/third_party/libwebrtc/video/adaptation/quality_scaler_resource.cc",
+ "/third_party/libwebrtc/video/adaptation/video_stream_encoder_resource.cc",
+ "/third_party/libwebrtc/video/adaptation/video_stream_encoder_resource_manager.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "crypt32",
+ "iphlpapi",
+ "secur32",
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "mips32":
+
+ DEFINES["MIPS32_LE"] = True
+ DEFINES["MIPS_FPU_LE"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "mips64":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("video_adaptation_gn")
diff --git a/third_party/libwebrtc/video/adaptation/video_stream_encoder_resource.cc b/third_party/libwebrtc/video/adaptation/video_stream_encoder_resource.cc
new file mode 100644
index 0000000000..ad89aef52a
--- /dev/null
+++ b/third_party/libwebrtc/video/adaptation/video_stream_encoder_resource.cc
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2020 The WebRTC Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video/adaptation/video_stream_encoder_resource.h"
+
+#include <algorithm>
+#include <utility>
+
+namespace webrtc {
+
+VideoStreamEncoderResource::VideoStreamEncoderResource(std::string name)
+ : lock_(),
+ name_(std::move(name)),
+ encoder_queue_(nullptr),
+ listener_(nullptr) {}
+
+VideoStreamEncoderResource::~VideoStreamEncoderResource() {
+ RTC_DCHECK(!listener_)
+ << "There is a listener depending on a VideoStreamEncoderResource being "
+ << "destroyed.";
+}
+
+void VideoStreamEncoderResource::RegisterEncoderTaskQueue(
+ TaskQueueBase* encoder_queue) {
+ RTC_DCHECK(!encoder_queue_);
+ RTC_DCHECK(encoder_queue);
+ encoder_queue_ = encoder_queue;
+}
+
+void VideoStreamEncoderResource::SetResourceListener(
+ ResourceListener* listener) {
+ // If you want to change listener you need to unregister the old listener by
+ // setting it to null first.
+ MutexLock crit(&lock_);
+ RTC_DCHECK(!listener_ || !listener) << "A listener is already set";
+ listener_ = listener;
+}
+
+std::string VideoStreamEncoderResource::Name() const {
+ return name_;
+}
+
+void VideoStreamEncoderResource::OnResourceUsageStateMeasured(
+ ResourceUsageState usage_state) {
+ MutexLock crit(&lock_);
+ if (listener_) {
+ listener_->OnResourceUsageStateMeasured(rtc::scoped_refptr<Resource>(this),
+ usage_state);
+ }
+}
+
+TaskQueueBase* VideoStreamEncoderResource::encoder_queue() const {
+ return encoder_queue_;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/video/adaptation/video_stream_encoder_resource.h b/third_party/libwebrtc/video/adaptation/video_stream_encoder_resource.h
new file mode 100644
index 0000000000..e10f595757
--- /dev/null
+++ b/third_party/libwebrtc/video/adaptation/video_stream_encoder_resource.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2020 The WebRTC Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VIDEO_ADAPTATION_VIDEO_STREAM_ENCODER_RESOURCE_H_
+#define VIDEO_ADAPTATION_VIDEO_STREAM_ENCODER_RESOURCE_H_
+
+#include <string>
+#include <vector>
+
+#include "absl/types/optional.h"
+#include "api/adaptation/resource.h"
+#include "api/sequence_checker.h"
+#include "api/task_queue/task_queue_base.h"
+#include "call/adaptation/adaptation_constraint.h"
+#include "rtc_base/synchronization/mutex.h"
+
+namespace webrtc {
+
+class VideoStreamEncoderResource : public Resource {
+ public:
+ ~VideoStreamEncoderResource() override;
+
+ // Registering task queues must be performed as part of initialization.
+ void RegisterEncoderTaskQueue(TaskQueueBase* encoder_queue);
+
+ // Resource implementation.
+ std::string Name() const override;
+ void SetResourceListener(ResourceListener* listener) override;
+
+ protected:
+ explicit VideoStreamEncoderResource(std::string name);
+
+ void OnResourceUsageStateMeasured(ResourceUsageState usage_state);
+
+ // The caller is responsible for ensuring the task queue is still valid.
+ TaskQueueBase* encoder_queue() const;
+
+ private:
+ mutable Mutex lock_;
+ const std::string name_;
+ // Treated as const after initialization.
+ TaskQueueBase* encoder_queue_;
+ ResourceListener* listener_ RTC_GUARDED_BY(lock_);
+};
+
+} // namespace webrtc
+
+#endif // VIDEO_ADAPTATION_VIDEO_STREAM_ENCODER_RESOURCE_H_
diff --git a/third_party/libwebrtc/video/adaptation/video_stream_encoder_resource_manager.cc b/third_party/libwebrtc/video/adaptation/video_stream_encoder_resource_manager.cc
new file mode 100644
index 0000000000..2470bc8893
--- /dev/null
+++ b/third_party/libwebrtc/video/adaptation/video_stream_encoder_resource_manager.cc
@@ -0,0 +1,851 @@
+/*
+ * Copyright 2020 The WebRTC Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video/adaptation/video_stream_encoder_resource_manager.h"
+
+#include <stdio.h>
+
+#include <algorithm>
+#include <cmath>
+#include <limits>
+#include <memory>
+#include <utility>
+
+#include "absl/algorithm/container.h"
+#include "absl/base/macros.h"
+#include "api/adaptation/resource.h"
+#include "api/sequence_checker.h"
+#include "api/task_queue/task_queue_base.h"
+#include "api/video/video_adaptation_reason.h"
+#include "api/video/video_source_interface.h"
+#include "call/adaptation/video_source_restrictions.h"
+#include "modules/video_coding/svc/scalability_mode_util.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/numerics/safe_conversions.h"
+#include "rtc_base/strings/string_builder.h"
+#include "rtc_base/time_utils.h"
+#include "rtc_base/trace_event.h"
+#include "video/adaptation/quality_scaler_resource.h"
+
+namespace webrtc {
+
+const int kDefaultInputPixelsWidth = 176;
+const int kDefaultInputPixelsHeight = 144;
+
+namespace {
+
+constexpr const char* kPixelLimitResourceFieldTrialName =
+ "WebRTC-PixelLimitResource";
+
+bool IsResolutionScalingEnabled(DegradationPreference degradation_preference) {
+ return degradation_preference == DegradationPreference::MAINTAIN_FRAMERATE ||
+ degradation_preference == DegradationPreference::BALANCED;
+}
+
+bool IsFramerateScalingEnabled(DegradationPreference degradation_preference) {
+ return degradation_preference == DegradationPreference::MAINTAIN_RESOLUTION ||
+ degradation_preference == DegradationPreference::BALANCED;
+}
+
+std::string ToString(VideoAdaptationReason reason) {
+ switch (reason) {
+ case VideoAdaptationReason::kQuality:
+ return "quality";
+ case VideoAdaptationReason::kCpu:
+ return "cpu";
+ }
+ RTC_CHECK_NOTREACHED();
+}
+
+std::vector<bool> GetActiveLayersFlags(const VideoCodec& codec) {
+ std::vector<bool> flags;
+ if (codec.codecType == VideoCodecType::kVideoCodecVP9) {
+ flags.resize(codec.VP9().numberOfSpatialLayers);
+ for (size_t i = 0; i < flags.size(); ++i) {
+ flags[i] = codec.spatialLayers[i].active;
+ }
+ } else {
+ flags.resize(codec.numberOfSimulcastStreams);
+ for (size_t i = 0; i < flags.size(); ++i) {
+ flags[i] = codec.simulcastStream[i].active;
+ }
+ }
+ return flags;
+}
+
+bool EqualFlags(const std::vector<bool>& a, const std::vector<bool>& b) {
+ if (a.size() != b.size())
+ return false;
+ return std::equal(a.begin(), a.end(), b.begin());
+}
+
+absl::optional<DataRate> GetSingleActiveLayerMaxBitrate(
+ const VideoCodec& codec) {
+ int num_active = 0;
+ absl::optional<DataRate> max_bitrate;
+ if (codec.codecType == VideoCodecType::kVideoCodecVP9) {
+ for (int i = 0; i < codec.VP9().numberOfSpatialLayers; ++i) {
+ if (codec.spatialLayers[i].active) {
+ ++num_active;
+ max_bitrate =
+ DataRate::KilobitsPerSec(codec.spatialLayers[i].maxBitrate);
+ }
+ }
+ } else {
+ for (int i = 0; i < codec.numberOfSimulcastStreams; ++i) {
+ if (codec.simulcastStream[i].active) {
+ ++num_active;
+ max_bitrate =
+ DataRate::KilobitsPerSec(codec.simulcastStream[i].maxBitrate);
+ }
+ }
+ }
+ return (num_active > 1) ? absl::nullopt : max_bitrate;
+}
+
+} // namespace
+
+class VideoStreamEncoderResourceManager::InitialFrameDropper {
+ public:
+ explicit InitialFrameDropper(
+ rtc::scoped_refptr<QualityScalerResource> quality_scaler_resource)
+ : quality_scaler_resource_(quality_scaler_resource),
+ quality_scaler_settings_(QualityScalerSettings::ParseFromFieldTrials()),
+ has_seen_first_bwe_drop_(false),
+ set_start_bitrate_(DataRate::Zero()),
+ set_start_bitrate_time_ms_(0),
+ initial_framedrop_(0),
+ use_bandwidth_allocation_(false),
+ bandwidth_allocation_(DataRate::Zero()),
+ last_input_width_(0),
+ last_input_height_(0),
+ last_stream_configuration_changed_(false) {
+ RTC_DCHECK(quality_scaler_resource_);
+ }
+
+ // Output signal.
+ bool DropInitialFrames() const {
+ return initial_framedrop_ < kMaxInitialFramedrop;
+ }
+
+ absl::optional<uint32_t> single_active_stream_pixels() const {
+ return single_active_stream_pixels_;
+ }
+
+ absl::optional<uint32_t> UseBandwidthAllocationBps() const {
+ return (use_bandwidth_allocation_ &&
+ bandwidth_allocation_ > DataRate::Zero())
+ ? absl::optional<uint32_t>(bandwidth_allocation_.bps())
+ : absl::nullopt;
+ }
+
+ bool last_stream_configuration_changed() const {
+ return last_stream_configuration_changed_;
+ }
+
+ // Input signals.
+ void SetStartBitrate(DataRate start_bitrate, int64_t now_ms) {
+ set_start_bitrate_ = start_bitrate;
+ set_start_bitrate_time_ms_ = now_ms;
+ }
+
+ void SetBandwidthAllocation(DataRate bandwidth_allocation) {
+ bandwidth_allocation_ = bandwidth_allocation;
+ }
+
+ void SetTargetBitrate(DataRate target_bitrate, int64_t now_ms) {
+ if (set_start_bitrate_ > DataRate::Zero() && !has_seen_first_bwe_drop_ &&
+ quality_scaler_resource_->is_started() &&
+ quality_scaler_settings_.InitialBitrateIntervalMs() &&
+ quality_scaler_settings_.InitialBitrateFactor()) {
+ int64_t diff_ms = now_ms - set_start_bitrate_time_ms_;
+ if (diff_ms <
+ quality_scaler_settings_.InitialBitrateIntervalMs().value() &&
+ (target_bitrate <
+ (set_start_bitrate_ *
+ quality_scaler_settings_.InitialBitrateFactor().value()))) {
+ RTC_LOG(LS_INFO) << "Reset initial_framedrop_. Start bitrate: "
+ << set_start_bitrate_.bps()
+ << ", target bitrate: " << target_bitrate.bps();
+ initial_framedrop_ = 0;
+ has_seen_first_bwe_drop_ = true;
+ }
+ }
+ }
+
+ void OnEncoderSettingsUpdated(
+ const VideoCodec& codec,
+ const VideoAdaptationCounters& adaptation_counters) {
+ last_stream_configuration_changed_ = false;
+ std::vector<bool> active_flags = GetActiveLayersFlags(codec);
+ // Check if the source resolution has changed for the external reasons,
+ // i.e. without any adaptation from WebRTC.
+ const bool source_resolution_changed =
+ (last_input_width_ != codec.width ||
+ last_input_height_ != codec.height) &&
+ adaptation_counters.resolution_adaptations ==
+ last_adaptation_counters_.resolution_adaptations;
+ if (!EqualFlags(active_flags, last_active_flags_) ||
+ source_resolution_changed) {
+ // Streams configuration has changed.
+ last_stream_configuration_changed_ = true;
+ // Initial frame drop must be enabled because BWE might be way too low
+ // for the selected resolution.
+ if (quality_scaler_resource_->is_started()) {
+ RTC_LOG(LS_INFO) << "Resetting initial_framedrop_ due to changed "
+ "stream parameters";
+ initial_framedrop_ = 0;
+ if (single_active_stream_pixels_ &&
+ VideoStreamAdapter::GetSingleActiveLayerPixels(codec) >
+ *single_active_stream_pixels_) {
+ // Resolution increased.
+ use_bandwidth_allocation_ = true;
+ }
+ }
+ }
+ last_adaptation_counters_ = adaptation_counters;
+ last_active_flags_ = active_flags;
+ last_input_width_ = codec.width;
+ last_input_height_ = codec.height;
+ single_active_stream_pixels_ =
+ VideoStreamAdapter::GetSingleActiveLayerPixels(codec);
+ }
+
+ void OnFrameDroppedDueToSize() { ++initial_framedrop_; }
+
+ void Disable() {
+ initial_framedrop_ = kMaxInitialFramedrop;
+ use_bandwidth_allocation_ = false;
+ }
+
+ void OnQualityScalerSettingsUpdated() {
+ if (quality_scaler_resource_->is_started()) {
+ // Restart frame drops due to size.
+ initial_framedrop_ = 0;
+ } else {
+ // Quality scaling disabled so we shouldn't drop initial frames.
+ Disable();
+ }
+ }
+
+ private:
+ // The maximum number of frames to drop at beginning of stream to try and
+ // achieve desired bitrate.
+ static const int kMaxInitialFramedrop = 4;
+
+ const rtc::scoped_refptr<QualityScalerResource> quality_scaler_resource_;
+ const QualityScalerSettings quality_scaler_settings_;
+ bool has_seen_first_bwe_drop_;
+ DataRate set_start_bitrate_;
+ int64_t set_start_bitrate_time_ms_;
+ // Counts how many frames we've dropped in the initial framedrop phase.
+ int initial_framedrop_;
+ absl::optional<uint32_t> single_active_stream_pixels_;
+ bool use_bandwidth_allocation_;
+ DataRate bandwidth_allocation_;
+
+ std::vector<bool> last_active_flags_;
+ VideoAdaptationCounters last_adaptation_counters_;
+ int last_input_width_;
+ int last_input_height_;
+ bool last_stream_configuration_changed_;
+};
+
+VideoStreamEncoderResourceManager::VideoStreamEncoderResourceManager(
+ VideoStreamInputStateProvider* input_state_provider,
+ VideoStreamEncoderObserver* encoder_stats_observer,
+ Clock* clock,
+ bool experiment_cpu_load_estimator,
+ std::unique_ptr<OveruseFrameDetector> overuse_detector,
+ DegradationPreferenceProvider* degradation_preference_provider,
+ const FieldTrialsView& field_trials)
+ : field_trials_(field_trials),
+ degradation_preference_provider_(degradation_preference_provider),
+ bitrate_constraint_(std::make_unique<BitrateConstraint>()),
+ balanced_constraint_(
+ std::make_unique<BalancedConstraint>(degradation_preference_provider_,
+ field_trials)),
+ encode_usage_resource_(
+ EncodeUsageResource::Create(std::move(overuse_detector))),
+ quality_scaler_resource_(QualityScalerResource::Create()),
+ pixel_limit_resource_(nullptr),
+ bandwidth_quality_scaler_resource_(
+ BandwidthQualityScalerResource::Create()),
+ encoder_queue_(nullptr),
+ input_state_provider_(input_state_provider),
+ adaptation_processor_(nullptr),
+ encoder_stats_observer_(encoder_stats_observer),
+ degradation_preference_(DegradationPreference::DISABLED),
+ video_source_restrictions_(),
+ balanced_settings_(field_trials),
+ clock_(clock),
+ experiment_cpu_load_estimator_(experiment_cpu_load_estimator),
+ initial_frame_dropper_(
+ std::make_unique<InitialFrameDropper>(quality_scaler_resource_)),
+ quality_scaling_experiment_enabled_(QualityScalingExperiment::Enabled()),
+ pixel_limit_resource_experiment_enabled_(
+ field_trials.IsEnabled(kPixelLimitResourceFieldTrialName)),
+ encoder_target_bitrate_bps_(absl::nullopt),
+ quality_rampup_experiment_(
+ QualityRampUpExperimentHelper::CreateIfEnabled(this, clock_)),
+ encoder_settings_(absl::nullopt) {
+ TRACE_EVENT0(
+ "webrtc",
+ "VideoStreamEncoderResourceManager::VideoStreamEncoderResourceManager");
+ RTC_CHECK(degradation_preference_provider_);
+ RTC_CHECK(encoder_stats_observer_);
+}
+
+VideoStreamEncoderResourceManager::~VideoStreamEncoderResourceManager() =
+ default;
+
+void VideoStreamEncoderResourceManager::Initialize(
+ TaskQueueBase* encoder_queue) {
+ RTC_DCHECK(!encoder_queue_);
+ RTC_DCHECK(encoder_queue);
+ encoder_queue_ = encoder_queue;
+ encode_usage_resource_->RegisterEncoderTaskQueue(encoder_queue_);
+ quality_scaler_resource_->RegisterEncoderTaskQueue(encoder_queue_);
+ bandwidth_quality_scaler_resource_->RegisterEncoderTaskQueue(encoder_queue_);
+}
+
+void VideoStreamEncoderResourceManager::SetAdaptationProcessor(
+ ResourceAdaptationProcessorInterface* adaptation_processor,
+ VideoStreamAdapter* stream_adapter) {
+ RTC_DCHECK_RUN_ON(encoder_queue_);
+ adaptation_processor_ = adaptation_processor;
+ stream_adapter_ = stream_adapter;
+}
+
+void VideoStreamEncoderResourceManager::SetDegradationPreferences(
+ DegradationPreference degradation_preference) {
+ RTC_DCHECK_RUN_ON(encoder_queue_);
+ degradation_preference_ = degradation_preference;
+ UpdateStatsAdaptationSettings();
+}
+
+DegradationPreference
+VideoStreamEncoderResourceManager::degradation_preference() const {
+ RTC_DCHECK_RUN_ON(encoder_queue_);
+ return degradation_preference_;
+}
+
+void VideoStreamEncoderResourceManager::ConfigureEncodeUsageResource() {
+ RTC_DCHECK_RUN_ON(encoder_queue_);
+ RTC_DCHECK(encoder_settings_.has_value());
+ if (encode_usage_resource_->is_started()) {
+ encode_usage_resource_->StopCheckForOveruse();
+ } else {
+ // If the resource has not yet started then it needs to be added.
+ AddResource(encode_usage_resource_, VideoAdaptationReason::kCpu);
+ }
+ encode_usage_resource_->StartCheckForOveruse(GetCpuOveruseOptions());
+}
+
+void VideoStreamEncoderResourceManager::MaybeInitializePixelLimitResource() {
+ RTC_DCHECK_RUN_ON(encoder_queue_);
+ RTC_DCHECK(adaptation_processor_);
+ RTC_DCHECK(!pixel_limit_resource_);
+ if (!pixel_limit_resource_experiment_enabled_) {
+ // The field trial is not running.
+ return;
+ }
+ int max_pixels = 0;
+ std::string pixel_limit_field_trial =
+ field_trials_.Lookup(kPixelLimitResourceFieldTrialName);
+ if (sscanf(pixel_limit_field_trial.c_str(), "Enabled-%d", &max_pixels) != 1) {
+ RTC_LOG(LS_ERROR) << "Couldn't parse " << kPixelLimitResourceFieldTrialName
+ << " trial config: " << pixel_limit_field_trial;
+ return;
+ }
+ RTC_LOG(LS_INFO) << "Running field trial "
+ << kPixelLimitResourceFieldTrialName << " configured to "
+ << max_pixels << " max pixels";
+ // Configure the specified max pixels from the field trial. The pixel limit
+ // resource is active for the lifetme of the stream (until
+ // StopManagedResources() is called).
+ pixel_limit_resource_ =
+ PixelLimitResource::Create(encoder_queue_, input_state_provider_);
+ pixel_limit_resource_->SetMaxPixels(max_pixels);
+ AddResource(pixel_limit_resource_, VideoAdaptationReason::kCpu);
+}
+
+void VideoStreamEncoderResourceManager::StopManagedResources() {
+ RTC_DCHECK_RUN_ON(encoder_queue_);
+ RTC_DCHECK(adaptation_processor_);
+ if (encode_usage_resource_->is_started()) {
+ encode_usage_resource_->StopCheckForOveruse();
+ RemoveResource(encode_usage_resource_);
+ }
+ if (quality_scaler_resource_->is_started()) {
+ quality_scaler_resource_->StopCheckForOveruse();
+ RemoveResource(quality_scaler_resource_);
+ }
+ if (pixel_limit_resource_) {
+ RemoveResource(pixel_limit_resource_);
+ pixel_limit_resource_ = nullptr;
+ }
+ if (bandwidth_quality_scaler_resource_->is_started()) {
+ bandwidth_quality_scaler_resource_->StopCheckForOveruse();
+ RemoveResource(bandwidth_quality_scaler_resource_);
+ }
+}
+
+void VideoStreamEncoderResourceManager::AddResource(
+ rtc::scoped_refptr<Resource> resource,
+ VideoAdaptationReason reason) {
+ RTC_DCHECK_RUN_ON(encoder_queue_);
+ RTC_DCHECK(resource);
+ bool inserted;
+ std::tie(std::ignore, inserted) = resources_.emplace(resource, reason);
+ RTC_DCHECK(inserted) << "Resource " << resource->Name()
+ << " already was inserted";
+ adaptation_processor_->AddResource(resource);
+}
+
+void VideoStreamEncoderResourceManager::RemoveResource(
+ rtc::scoped_refptr<Resource> resource) {
+ {
+ RTC_DCHECK_RUN_ON(encoder_queue_);
+ RTC_DCHECK(resource);
+ const auto& it = resources_.find(resource);
+ RTC_DCHECK(it != resources_.end())
+ << "Resource \"" << resource->Name() << "\" not found.";
+ resources_.erase(it);
+ }
+ adaptation_processor_->RemoveResource(resource);
+}
+
+std::vector<AdaptationConstraint*>
+VideoStreamEncoderResourceManager::AdaptationConstraints() const {
+ RTC_DCHECK_RUN_ON(encoder_queue_);
+ return {bitrate_constraint_.get(), balanced_constraint_.get()};
+}
+
+void VideoStreamEncoderResourceManager::SetEncoderSettings(
+ EncoderSettings encoder_settings) {
+ RTC_DCHECK_RUN_ON(encoder_queue_);
+ encoder_settings_ = std::move(encoder_settings);
+ bitrate_constraint_->OnEncoderSettingsUpdated(encoder_settings_);
+ initial_frame_dropper_->OnEncoderSettingsUpdated(
+ encoder_settings_->video_codec(), current_adaptation_counters_);
+ MaybeUpdateTargetFrameRate();
+ if (quality_rampup_experiment_) {
+ quality_rampup_experiment_->ConfigureQualityRampupExperiment(
+ initial_frame_dropper_->last_stream_configuration_changed(),
+ initial_frame_dropper_->single_active_stream_pixels(),
+ GetSingleActiveLayerMaxBitrate(encoder_settings_->video_codec()));
+ }
+}
+
+void VideoStreamEncoderResourceManager::SetStartBitrate(
+ DataRate start_bitrate) {
+ RTC_DCHECK_RUN_ON(encoder_queue_);
+ if (!start_bitrate.IsZero()) {
+ encoder_target_bitrate_bps_ = start_bitrate.bps();
+ bitrate_constraint_->OnEncoderTargetBitrateUpdated(
+ encoder_target_bitrate_bps_);
+ balanced_constraint_->OnEncoderTargetBitrateUpdated(
+ encoder_target_bitrate_bps_);
+ }
+ initial_frame_dropper_->SetStartBitrate(start_bitrate,
+ clock_->TimeInMicroseconds());
+}
+
+void VideoStreamEncoderResourceManager::SetTargetBitrate(
+ DataRate target_bitrate) {
+ RTC_DCHECK_RUN_ON(encoder_queue_);
+ if (!target_bitrate.IsZero()) {
+ encoder_target_bitrate_bps_ = target_bitrate.bps();
+ bitrate_constraint_->OnEncoderTargetBitrateUpdated(
+ encoder_target_bitrate_bps_);
+ balanced_constraint_->OnEncoderTargetBitrateUpdated(
+ encoder_target_bitrate_bps_);
+ }
+ initial_frame_dropper_->SetTargetBitrate(target_bitrate,
+ clock_->TimeInMilliseconds());
+}
+
+void VideoStreamEncoderResourceManager::SetEncoderRates(
+ const VideoEncoder::RateControlParameters& encoder_rates) {
+ RTC_DCHECK_RUN_ON(encoder_queue_);
+ encoder_rates_ = encoder_rates;
+ initial_frame_dropper_->SetBandwidthAllocation(
+ encoder_rates.bandwidth_allocation);
+}
+
+void VideoStreamEncoderResourceManager::OnFrameDroppedDueToSize() {
+ RTC_DCHECK_RUN_ON(encoder_queue_);
+ initial_frame_dropper_->OnFrameDroppedDueToSize();
+ Adaptation reduce_resolution = stream_adapter_->GetAdaptDownResolution();
+ if (reduce_resolution.status() == Adaptation::Status::kValid) {
+ stream_adapter_->ApplyAdaptation(reduce_resolution,
+ quality_scaler_resource_);
+ }
+}
+
+void VideoStreamEncoderResourceManager::OnEncodeStarted(
+ const VideoFrame& cropped_frame,
+ int64_t time_when_first_seen_us) {
+ RTC_DCHECK_RUN_ON(encoder_queue_);
+ encode_usage_resource_->OnEncodeStarted(cropped_frame,
+ time_when_first_seen_us);
+}
+
+void VideoStreamEncoderResourceManager::OnEncodeCompleted(
+ const EncodedImage& encoded_image,
+ int64_t time_sent_in_us,
+ absl::optional<int> encode_duration_us,
+ DataSize frame_size) {
+ RTC_DCHECK_RUN_ON(encoder_queue_);
+ // Inform `encode_usage_resource_` of the encode completed event.
+ uint32_t timestamp = encoded_image.Timestamp();
+ int64_t capture_time_us =
+ encoded_image.capture_time_ms_ * rtc::kNumMicrosecsPerMillisec;
+ encode_usage_resource_->OnEncodeCompleted(
+ timestamp, time_sent_in_us, capture_time_us, encode_duration_us);
+ quality_scaler_resource_->OnEncodeCompleted(encoded_image, time_sent_in_us);
+ bandwidth_quality_scaler_resource_->OnEncodeCompleted(
+ encoded_image, time_sent_in_us, frame_size.bytes());
+}
+
+void VideoStreamEncoderResourceManager::OnFrameDropped(
+ EncodedImageCallback::DropReason reason) {
+ RTC_DCHECK_RUN_ON(encoder_queue_);
+ quality_scaler_resource_->OnFrameDropped(reason);
+}
+
+bool VideoStreamEncoderResourceManager::DropInitialFrames() const {
+ RTC_DCHECK_RUN_ON(encoder_queue_);
+ return initial_frame_dropper_->DropInitialFrames();
+}
+
+absl::optional<uint32_t>
+VideoStreamEncoderResourceManager::SingleActiveStreamPixels() const {
+ RTC_DCHECK_RUN_ON(encoder_queue_);
+ return initial_frame_dropper_->single_active_stream_pixels();
+}
+
+absl::optional<uint32_t>
+VideoStreamEncoderResourceManager::UseBandwidthAllocationBps() const {
+ RTC_DCHECK_RUN_ON(encoder_queue_);
+ return initial_frame_dropper_->UseBandwidthAllocationBps();
+}
+
+void VideoStreamEncoderResourceManager::OnMaybeEncodeFrame() {
+ RTC_DCHECK_RUN_ON(encoder_queue_);
+ initial_frame_dropper_->Disable();
+ if (quality_rampup_experiment_ && quality_scaler_resource_->is_started()) {
+ DataRate bandwidth = encoder_rates_.has_value()
+ ? encoder_rates_->bandwidth_allocation
+ : DataRate::Zero();
+ quality_rampup_experiment_->PerformQualityRampupExperiment(
+ quality_scaler_resource_, bandwidth,
+ DataRate::BitsPerSec(encoder_target_bitrate_bps_.value_or(0)),
+ GetSingleActiveLayerMaxBitrate(encoder_settings_->video_codec()));
+ }
+}
+
+void VideoStreamEncoderResourceManager::UpdateQualityScalerSettings(
+ absl::optional<VideoEncoder::QpThresholds> qp_thresholds) {
+ RTC_DCHECK_RUN_ON(encoder_queue_);
+ if (qp_thresholds.has_value()) {
+ if (quality_scaler_resource_->is_started()) {
+ quality_scaler_resource_->SetQpThresholds(qp_thresholds.value());
+ } else {
+ quality_scaler_resource_->StartCheckForOveruse(qp_thresholds.value());
+ AddResource(quality_scaler_resource_, VideoAdaptationReason::kQuality);
+ }
+ } else if (quality_scaler_resource_->is_started()) {
+ quality_scaler_resource_->StopCheckForOveruse();
+ RemoveResource(quality_scaler_resource_);
+ }
+ initial_frame_dropper_->OnQualityScalerSettingsUpdated();
+}
+
+void VideoStreamEncoderResourceManager::UpdateBandwidthQualityScalerSettings(
+ bool bandwidth_quality_scaling_allowed,
+ const std::vector<VideoEncoder::ResolutionBitrateLimits>&
+ resolution_bitrate_limits) {
+ RTC_DCHECK_RUN_ON(encoder_queue_);
+
+ if (!bandwidth_quality_scaling_allowed) {
+ if (bandwidth_quality_scaler_resource_->is_started()) {
+ bandwidth_quality_scaler_resource_->StopCheckForOveruse();
+ RemoveResource(bandwidth_quality_scaler_resource_);
+ }
+ } else {
+ if (!bandwidth_quality_scaler_resource_->is_started()) {
+ // Before executing "StartCheckForOveruse",we must execute "AddResource"
+ // firstly,because it can make the listener valid.
+ AddResource(bandwidth_quality_scaler_resource_,
+ webrtc::VideoAdaptationReason::kQuality);
+ bandwidth_quality_scaler_resource_->StartCheckForOveruse(
+ resolution_bitrate_limits);
+ }
+ }
+}
+
+void VideoStreamEncoderResourceManager::ConfigureQualityScaler(
+ const VideoEncoder::EncoderInfo& encoder_info) {
+ RTC_DCHECK_RUN_ON(encoder_queue_);
+ const auto scaling_settings = encoder_info.scaling_settings;
+ const bool quality_scaling_allowed =
+ IsResolutionScalingEnabled(degradation_preference_) &&
+ (scaling_settings.thresholds.has_value() ||
+ (encoder_settings_.has_value() &&
+ encoder_settings_->encoder_config().is_quality_scaling_allowed)) &&
+ encoder_info.is_qp_trusted.value_or(true);
+
+ // TODO(https://crbug.com/webrtc/11222): Should this move to
+ // QualityScalerResource?
+ if (quality_scaling_allowed) {
+ if (!quality_scaler_resource_->is_started()) {
+ // Quality scaler has not already been configured.
+
+ // Use experimental thresholds if available.
+ absl::optional<VideoEncoder::QpThresholds> experimental_thresholds;
+ if (quality_scaling_experiment_enabled_) {
+ experimental_thresholds = QualityScalingExperiment::GetQpThresholds(
+ GetVideoCodecTypeOrGeneric(encoder_settings_));
+ }
+ UpdateQualityScalerSettings(experimental_thresholds.has_value()
+ ? experimental_thresholds
+ : scaling_settings.thresholds);
+ }
+ } else {
+ UpdateQualityScalerSettings(absl::nullopt);
+ }
+
+ // Set the qp-thresholds to the balanced settings if balanced mode.
+ if (degradation_preference_ == DegradationPreference::BALANCED &&
+ quality_scaler_resource_->is_started()) {
+ absl::optional<VideoEncoder::QpThresholds> thresholds =
+ balanced_settings_.GetQpThresholds(
+ GetVideoCodecTypeOrGeneric(encoder_settings_),
+ LastFrameSizeOrDefault());
+ if (thresholds) {
+ quality_scaler_resource_->SetQpThresholds(*thresholds);
+ }
+ }
+ UpdateStatsAdaptationSettings();
+}
+
+void VideoStreamEncoderResourceManager::ConfigureBandwidthQualityScaler(
+ const VideoEncoder::EncoderInfo& encoder_info) {
+ RTC_DCHECK_RUN_ON(encoder_queue_);
+ const bool bandwidth_quality_scaling_allowed =
+ IsResolutionScalingEnabled(degradation_preference_) &&
+ (encoder_settings_.has_value() &&
+ encoder_settings_->encoder_config().is_quality_scaling_allowed) &&
+ !encoder_info.is_qp_trusted.value_or(true);
+
+ UpdateBandwidthQualityScalerSettings(bandwidth_quality_scaling_allowed,
+ encoder_info.resolution_bitrate_limits);
+ UpdateStatsAdaptationSettings();
+}
+
+VideoAdaptationReason VideoStreamEncoderResourceManager::GetReasonFromResource(
+ rtc::scoped_refptr<Resource> resource) const {
+ RTC_DCHECK_RUN_ON(encoder_queue_);
+ const auto& registered_resource = resources_.find(resource);
+ RTC_DCHECK(registered_resource != resources_.end())
+ << resource->Name() << " not found.";
+ return registered_resource->second;
+}
+
+// TODO(pbos): Lower these thresholds (to closer to 100%) when we handle
+// pipelining encoders better (multiple input frames before something comes
+// out). This should effectively turn off CPU adaptations for systems that
+// remotely cope with the load right now.
+CpuOveruseOptions VideoStreamEncoderResourceManager::GetCpuOveruseOptions()
+ const {
+ RTC_DCHECK_RUN_ON(encoder_queue_);
+ // This is already ensured by the only caller of this method:
+ // StartResourceAdaptation().
+ RTC_DCHECK(encoder_settings_.has_value());
+ CpuOveruseOptions options(field_trials_);
+ // Hardware accelerated encoders are assumed to be pipelined; give them
+ // additional overuse time.
+ if (encoder_settings_->encoder_info().is_hardware_accelerated) {
+ options.low_encode_usage_threshold_percent = 150;
+ options.high_encode_usage_threshold_percent = 200;
+ }
+ if (experiment_cpu_load_estimator_) {
+ options.filter_time_ms = 5 * rtc::kNumMillisecsPerSec;
+ }
+ return options;
+}
+
+int VideoStreamEncoderResourceManager::LastFrameSizeOrDefault() const {
+ RTC_DCHECK_RUN_ON(encoder_queue_);
+ return input_state_provider_->InputState()
+ .single_active_stream_pixels()
+ .value_or(
+ input_state_provider_->InputState().frame_size_pixels().value_or(
+ kDefaultInputPixelsWidth * kDefaultInputPixelsHeight));
+}
+
+void VideoStreamEncoderResourceManager::OnVideoSourceRestrictionsUpdated(
+ VideoSourceRestrictions restrictions,
+ const VideoAdaptationCounters& adaptation_counters,
+ rtc::scoped_refptr<Resource> reason,
+ const VideoSourceRestrictions& unfiltered_restrictions) {
+ RTC_DCHECK_RUN_ON(encoder_queue_);
+ current_adaptation_counters_ = adaptation_counters;
+
+ // TODO(bugs.webrtc.org/11553) Remove reason parameter and add reset callback.
+ if (!reason && adaptation_counters.Total() == 0) {
+ // Adaptation was manually reset - clear the per-reason counters too.
+ encoder_stats_observer_->ClearAdaptationStats();
+ }
+
+ video_source_restrictions_ = FilterRestrictionsByDegradationPreference(
+ restrictions, degradation_preference_);
+ MaybeUpdateTargetFrameRate();
+}
+
+void VideoStreamEncoderResourceManager::OnResourceLimitationChanged(
+ rtc::scoped_refptr<Resource> resource,
+ const std::map<rtc::scoped_refptr<Resource>, VideoAdaptationCounters>&
+ resource_limitations) {
+ RTC_DCHECK_RUN_ON(encoder_queue_);
+ if (!resource) {
+ encoder_stats_observer_->ClearAdaptationStats();
+ return;
+ }
+
+ std::map<VideoAdaptationReason, VideoAdaptationCounters> limitations;
+ for (auto& resource_counter : resource_limitations) {
+ std::map<VideoAdaptationReason, VideoAdaptationCounters>::iterator it;
+ bool inserted;
+ std::tie(it, inserted) = limitations.emplace(
+ GetReasonFromResource(resource_counter.first), resource_counter.second);
+ if (!inserted && it->second.Total() < resource_counter.second.Total()) {
+ it->second = resource_counter.second;
+ }
+ }
+
+ VideoAdaptationReason adaptation_reason = GetReasonFromResource(resource);
+ encoder_stats_observer_->OnAdaptationChanged(
+ adaptation_reason, limitations[VideoAdaptationReason::kCpu],
+ limitations[VideoAdaptationReason::kQuality]);
+
+ if (quality_rampup_experiment_) {
+ bool cpu_limited = limitations.at(VideoAdaptationReason::kCpu).Total() > 0;
+ auto qp_resolution_adaptations =
+ limitations.at(VideoAdaptationReason::kQuality).resolution_adaptations;
+ quality_rampup_experiment_->cpu_adapted(cpu_limited);
+ quality_rampup_experiment_->qp_resolution_adaptations(
+ qp_resolution_adaptations);
+ }
+
+ RTC_LOG(LS_INFO) << ActiveCountsToString(limitations);
+}
+
+void VideoStreamEncoderResourceManager::MaybeUpdateTargetFrameRate() {
+ RTC_DCHECK_RUN_ON(encoder_queue_);
+ absl::optional<double> codec_max_frame_rate =
+ encoder_settings_.has_value()
+ ? absl::optional<double>(
+ encoder_settings_->video_codec().maxFramerate)
+ : absl::nullopt;
+ // The current target framerate is the maximum frame rate as specified by
+ // the current codec configuration or any limit imposed by the adaptation
+ // module. This is used to make sure overuse detection doesn't needlessly
+ // trigger in low and/or variable framerate scenarios.
+ absl::optional<double> target_frame_rate =
+ video_source_restrictions_.max_frame_rate();
+ if (!target_frame_rate.has_value() ||
+ (codec_max_frame_rate.has_value() &&
+ codec_max_frame_rate.value() < target_frame_rate.value())) {
+ target_frame_rate = codec_max_frame_rate;
+ }
+ encode_usage_resource_->SetTargetFrameRate(target_frame_rate);
+}
+
+void VideoStreamEncoderResourceManager::UpdateStatsAdaptationSettings() const {
+ RTC_DCHECK_RUN_ON(encoder_queue_);
+ VideoStreamEncoderObserver::AdaptationSettings cpu_settings(
+ IsResolutionScalingEnabled(degradation_preference_),
+ IsFramerateScalingEnabled(degradation_preference_));
+
+ VideoStreamEncoderObserver::AdaptationSettings quality_settings =
+ (quality_scaler_resource_->is_started() ||
+ bandwidth_quality_scaler_resource_->is_started())
+ ? cpu_settings
+ : VideoStreamEncoderObserver::AdaptationSettings();
+ encoder_stats_observer_->UpdateAdaptationSettings(cpu_settings,
+ quality_settings);
+}
+
+// static
+std::string VideoStreamEncoderResourceManager::ActiveCountsToString(
+ const std::map<VideoAdaptationReason, VideoAdaptationCounters>&
+ active_counts) {
+ rtc::StringBuilder ss;
+
+ ss << "Downgrade counts: fps: {";
+ for (auto& reason_count : active_counts) {
+ ss << ToString(reason_count.first) << ":";
+ ss << reason_count.second.fps_adaptations;
+ }
+ ss << "}, resolution {";
+ for (auto& reason_count : active_counts) {
+ ss << ToString(reason_count.first) << ":";
+ ss << reason_count.second.resolution_adaptations;
+ }
+ ss << "}";
+
+ return ss.Release();
+}
+
+void VideoStreamEncoderResourceManager::OnQualityRampUp() {
+ RTC_DCHECK_RUN_ON(encoder_queue_);
+ stream_adapter_->ClearRestrictions();
+ quality_rampup_experiment_.reset();
+}
+
+bool VideoStreamEncoderResourceManager::IsSimulcastOrMultipleSpatialLayers(
+ const VideoEncoderConfig& encoder_config) {
+ const std::vector<VideoStream>& simulcast_layers =
+ encoder_config.simulcast_layers;
+ if (simulcast_layers.empty()) {
+ return false;
+ }
+
+ absl::optional<int> num_spatial_layers;
+ if (simulcast_layers[0].scalability_mode.has_value() &&
+ encoder_config.number_of_streams == 1) {
+ num_spatial_layers = ScalabilityModeToNumSpatialLayers(
+ *simulcast_layers[0].scalability_mode);
+ }
+
+ if (simulcast_layers.size() == 1) {
+ // Check if multiple spatial layers are used.
+ return num_spatial_layers && *num_spatial_layers > 1;
+ }
+
+ bool svc_with_one_spatial_layer =
+ num_spatial_layers && *num_spatial_layers == 1;
+ if (simulcast_layers[0].active && !svc_with_one_spatial_layer) {
+ // We can't distinguish between simulcast and singlecast when only the
+ // lowest spatial layer is active. Treat this case as simulcast.
+ return true;
+ }
+
+ int num_active_layers =
+ std::count_if(simulcast_layers.begin(), simulcast_layers.end(),
+ [](const VideoStream& layer) { return layer.active; });
+ return num_active_layers > 1;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/video/adaptation/video_stream_encoder_resource_manager.h b/third_party/libwebrtc/video/adaptation/video_stream_encoder_resource_manager.h
new file mode 100644
index 0000000000..e0de3f7d19
--- /dev/null
+++ b/third_party/libwebrtc/video/adaptation/video_stream_encoder_resource_manager.h
@@ -0,0 +1,238 @@
+/*
+ * Copyright 2020 The WebRTC Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VIDEO_ADAPTATION_VIDEO_STREAM_ENCODER_RESOURCE_MANAGER_H_
+#define VIDEO_ADAPTATION_VIDEO_STREAM_ENCODER_RESOURCE_MANAGER_H_
+
+#include <atomic>
+#include <map>
+#include <memory>
+#include <string>
+#include <unordered_map>
+#include <utility>
+#include <vector>
+
+#include "absl/types/optional.h"
+#include "api/adaptation/resource.h"
+#include "api/field_trials_view.h"
+#include "api/rtp_parameters.h"
+#include "api/scoped_refptr.h"
+#include "api/task_queue/task_queue_base.h"
+#include "api/video/video_adaptation_counters.h"
+#include "api/video/video_adaptation_reason.h"
+#include "api/video/video_frame.h"
+#include "api/video/video_source_interface.h"
+#include "api/video_codecs/video_codec.h"
+#include "api/video_codecs/video_encoder.h"
+#include "call/adaptation/resource_adaptation_processor_interface.h"
+#include "call/adaptation/video_stream_adapter.h"
+#include "call/adaptation/video_stream_input_state_provider.h"
+#include "rtc_base/experiments/quality_scaler_settings.h"
+#include "rtc_base/ref_count.h"
+#include "rtc_base/strings/string_builder.h"
+#include "rtc_base/synchronization/mutex.h"
+#include "rtc_base/thread_annotations.h"
+#include "system_wrappers/include/clock.h"
+#include "video/adaptation/balanced_constraint.h"
+#include "video/adaptation/bandwidth_quality_scaler_resource.h"
+#include "video/adaptation/bitrate_constraint.h"
+#include "video/adaptation/encode_usage_resource.h"
+#include "video/adaptation/overuse_frame_detector.h"
+#include "video/adaptation/pixel_limit_resource.h"
+#include "video/adaptation/quality_rampup_experiment_helper.h"
+#include "video/adaptation/quality_scaler_resource.h"
+#include "video/adaptation/video_stream_encoder_resource.h"
+#include "video/config/video_encoder_config.h"
+#include "video/video_stream_encoder_observer.h"
+
+namespace webrtc {
+
+// The assumed input frame size if we have not yet received a frame.
+// TODO(hbos): This is 144p - why are we assuming super low quality? Seems like
+// a bad heuristic.
+extern const int kDefaultInputPixelsWidth;
+extern const int kDefaultInputPixelsHeight;
+
+// Owns adaptation-related Resources pertaining to a single VideoStreamEncoder
+// and passes on the relevant input from the encoder to the resources. The
+// resources provide resource usage states to the ResourceAdaptationProcessor
+// which is responsible for reconfiguring streams in order not to overuse
+// resources.
+//
+// The manager is also involved with various mitigations not part of the
+// ResourceAdaptationProcessor code such as the initial frame dropping.
+class VideoStreamEncoderResourceManager
+ : public VideoSourceRestrictionsListener,
+ public ResourceLimitationsListener,
+ public QualityRampUpExperimentListener {
+ public:
+ VideoStreamEncoderResourceManager(
+ VideoStreamInputStateProvider* input_state_provider,
+ VideoStreamEncoderObserver* encoder_stats_observer,
+ Clock* clock,
+ bool experiment_cpu_load_estimator,
+ std::unique_ptr<OveruseFrameDetector> overuse_detector,
+ DegradationPreferenceProvider* degradation_preference_provider,
+ const FieldTrialsView& field_trials);
+ ~VideoStreamEncoderResourceManager() override;
+
+ void Initialize(TaskQueueBase* encoder_queue);
+ void SetAdaptationProcessor(
+ ResourceAdaptationProcessorInterface* adaptation_processor,
+ VideoStreamAdapter* stream_adapter);
+
+ // TODO(https://crbug.com/webrtc/11563): The degradation preference is a
+ // setting of the Processor, it does not belong to the Manager - can we get
+ // rid of this?
+ void SetDegradationPreferences(DegradationPreference degradation_preference);
+ DegradationPreference degradation_preference() const;
+
+ void ConfigureEncodeUsageResource();
+ // Initializes the pixel limit resource if the "WebRTC-PixelLimitResource"
+ // field trial is enabled. This can be used for testing.
+ void MaybeInitializePixelLimitResource();
+ // Stops the encode usage and quality scaler resources if not already stopped.
+ // If the pixel limit resource was created it is also stopped and nulled.
+ void StopManagedResources();
+
+ // Settings that affect the VideoStreamEncoder-specific resources.
+ void SetEncoderSettings(EncoderSettings encoder_settings);
+ void SetStartBitrate(DataRate start_bitrate);
+ void SetTargetBitrate(DataRate target_bitrate);
+ void SetEncoderRates(
+ const VideoEncoder::RateControlParameters& encoder_rates);
+ // TODO(https://crbug.com/webrtc/11338): This can be made private if we
+ // configure on SetDegredationPreference and SetEncoderSettings.
+ void ConfigureQualityScaler(const VideoEncoder::EncoderInfo& encoder_info);
+ void ConfigureBandwidthQualityScaler(
+ const VideoEncoder::EncoderInfo& encoder_info);
+
+ // Methods corresponding to different points in the encoding pipeline.
+ void OnFrameDroppedDueToSize();
+ void OnMaybeEncodeFrame();
+ void OnEncodeStarted(const VideoFrame& cropped_frame,
+ int64_t time_when_first_seen_us);
+ void OnEncodeCompleted(const EncodedImage& encoded_image,
+ int64_t time_sent_in_us,
+ absl::optional<int> encode_duration_us,
+ DataSize frame_size);
+ void OnFrameDropped(EncodedImageCallback::DropReason reason);
+
+ // Resources need to be mapped to an AdaptReason (kCpu or kQuality) in order
+ // to update legacy getStats().
+ void AddResource(rtc::scoped_refptr<Resource> resource,
+ VideoAdaptationReason reason);
+ void RemoveResource(rtc::scoped_refptr<Resource> resource);
+ std::vector<AdaptationConstraint*> AdaptationConstraints() const;
+ // If true, the VideoStreamEncoder should execute its logic to maybe drop
+ // frames based on size and bitrate.
+ bool DropInitialFrames() const;
+ absl::optional<uint32_t> SingleActiveStreamPixels() const;
+ absl::optional<uint32_t> UseBandwidthAllocationBps() const;
+
+ // VideoSourceRestrictionsListener implementation.
+ // Updates `video_source_restrictions_`.
+ void OnVideoSourceRestrictionsUpdated(
+ VideoSourceRestrictions restrictions,
+ const VideoAdaptationCounters& adaptation_counters,
+ rtc::scoped_refptr<Resource> reason,
+ const VideoSourceRestrictions& unfiltered_restrictions) override;
+ void OnResourceLimitationChanged(
+ rtc::scoped_refptr<Resource> resource,
+ const std::map<rtc::scoped_refptr<Resource>, VideoAdaptationCounters>&
+ resource_limitations) override;
+
+ // QualityRampUpExperimentListener implementation.
+ void OnQualityRampUp() override;
+
+ static bool IsSimulcastOrMultipleSpatialLayers(
+ const VideoEncoderConfig& encoder_config);
+
+ private:
+ class InitialFrameDropper;
+
+ VideoAdaptationReason GetReasonFromResource(
+ rtc::scoped_refptr<Resource> resource) const;
+
+ CpuOveruseOptions GetCpuOveruseOptions() const;
+ int LastFrameSizeOrDefault() const;
+
+ // Calculates an up-to-date value of the target frame rate and informs the
+ // `encode_usage_resource_` of the new value.
+ void MaybeUpdateTargetFrameRate();
+
+ // Use nullopt to disable quality scaling.
+ void UpdateQualityScalerSettings(
+ absl::optional<VideoEncoder::QpThresholds> qp_thresholds);
+
+ void UpdateBandwidthQualityScalerSettings(
+ bool bandwidth_quality_scaling_allowed,
+ const std::vector<VideoEncoder::ResolutionBitrateLimits>&
+ resolution_bitrate_limits);
+
+ void UpdateStatsAdaptationSettings() const;
+
+ static std::string ActiveCountsToString(
+ const std::map<VideoAdaptationReason, VideoAdaptationCounters>&
+ active_counts);
+
+ const FieldTrialsView& field_trials_;
+ DegradationPreferenceProvider* const degradation_preference_provider_;
+ std::unique_ptr<BitrateConstraint> bitrate_constraint_
+ RTC_GUARDED_BY(encoder_queue_);
+ const std::unique_ptr<BalancedConstraint> balanced_constraint_
+ RTC_GUARDED_BY(encoder_queue_);
+ const rtc::scoped_refptr<EncodeUsageResource> encode_usage_resource_;
+ const rtc::scoped_refptr<QualityScalerResource> quality_scaler_resource_;
+ rtc::scoped_refptr<PixelLimitResource> pixel_limit_resource_;
+ const rtc::scoped_refptr<BandwidthQualityScalerResource>
+ bandwidth_quality_scaler_resource_;
+
+ TaskQueueBase* encoder_queue_;
+ VideoStreamInputStateProvider* const input_state_provider_
+ RTC_GUARDED_BY(encoder_queue_);
+ ResourceAdaptationProcessorInterface* adaptation_processor_;
+ VideoStreamAdapter* stream_adapter_ RTC_GUARDED_BY(encoder_queue_);
+ // Thread-safe.
+ VideoStreamEncoderObserver* const encoder_stats_observer_;
+
+ DegradationPreference degradation_preference_ RTC_GUARDED_BY(encoder_queue_);
+ VideoSourceRestrictions video_source_restrictions_
+ RTC_GUARDED_BY(encoder_queue_);
+
+ VideoAdaptationCounters current_adaptation_counters_
+ RTC_GUARDED_BY(encoder_queue_);
+
+ const BalancedDegradationSettings balanced_settings_;
+ Clock* clock_ RTC_GUARDED_BY(encoder_queue_);
+ const bool experiment_cpu_load_estimator_ RTC_GUARDED_BY(encoder_queue_);
+ const std::unique_ptr<InitialFrameDropper> initial_frame_dropper_
+ RTC_GUARDED_BY(encoder_queue_);
+ const bool quality_scaling_experiment_enabled_ RTC_GUARDED_BY(encoder_queue_);
+ const bool pixel_limit_resource_experiment_enabled_
+ RTC_GUARDED_BY(encoder_queue_);
+ absl::optional<uint32_t> encoder_target_bitrate_bps_
+ RTC_GUARDED_BY(encoder_queue_);
+ absl::optional<VideoEncoder::RateControlParameters> encoder_rates_
+ RTC_GUARDED_BY(encoder_queue_);
+ std::unique_ptr<QualityRampUpExperimentHelper> quality_rampup_experiment_
+ RTC_GUARDED_BY(encoder_queue_);
+ absl::optional<EncoderSettings> encoder_settings_
+ RTC_GUARDED_BY(encoder_queue_);
+
+ // Ties a resource to a reason for statistical reporting. This AdaptReason is
+ // also used by this module to make decisions about how to adapt up/down.
+ std::map<rtc::scoped_refptr<Resource>, VideoAdaptationReason> resources_
+ RTC_GUARDED_BY(encoder_queue_);
+};
+
+} // namespace webrtc
+
+#endif // VIDEO_ADAPTATION_VIDEO_STREAM_ENCODER_RESOURCE_MANAGER_H_
diff --git a/third_party/libwebrtc/video/alignment_adjuster.cc b/third_party/libwebrtc/video/alignment_adjuster.cc
new file mode 100644
index 0000000000..1762bec4cf
--- /dev/null
+++ b/third_party/libwebrtc/video/alignment_adjuster.cc
@@ -0,0 +1,126 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video/alignment_adjuster.h"
+
+#include <algorithm>
+#include <limits>
+
+#include "absl/algorithm/container.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+namespace {
+// Round each scale factor to the closest rational in form alignment/i where i
+// is a multiple of `requested_alignment`. Each resolution divisible by
+// `alignment` will be divisible by `requested_alignment` after the scale factor
+// is applied.
+double RoundToMultiple(int alignment,
+ int requested_alignment,
+ VideoEncoderConfig* config,
+ bool update_config) {
+ double diff = 0.0;
+ for (auto& layer : config->simulcast_layers) {
+ double min_dist = std::numeric_limits<double>::max();
+ double new_scale = 1.0;
+ for (int i = requested_alignment; i <= alignment;
+ i += requested_alignment) {
+ double dist = std::abs(layer.scale_resolution_down_by -
+ alignment / static_cast<double>(i));
+ if (dist <= min_dist) {
+ min_dist = dist;
+ new_scale = alignment / static_cast<double>(i);
+ }
+ }
+ diff += std::abs(layer.scale_resolution_down_by - new_scale);
+ if (update_config) {
+ RTC_LOG(LS_INFO) << "scale_resolution_down_by "
+ << layer.scale_resolution_down_by << " -> " << new_scale;
+ layer.scale_resolution_down_by = new_scale;
+ }
+ }
+ return diff;
+}
+} // namespace
+
+// Input: encoder_info.requested_resolution_alignment (K)
+// Input: encoder_info.apply_alignment_to_all_simulcast_layers (B)
+// Input: vector config->simulcast_layers.scale_resolution_down_by (S[i])
+// Output:
+// If B is false, returns K and does not adjust scaling factors.
+// Otherwise, returns adjusted alignment (A), adjusted scaling factors (S'[i])
+// are written in `config` such that:
+//
+// A / S'[i] are integers divisible by K
+// sum abs(S'[i] - S[i]) -> min
+// A integer <= 16
+//
+// Solution chooses closest S'[i] in a form A / j where j is a multiple of K.
+
+int AlignmentAdjuster::GetAlignmentAndMaybeAdjustScaleFactors(
+ const VideoEncoder::EncoderInfo& encoder_info,
+ VideoEncoderConfig* config,
+ absl::optional<size_t> max_layers) {
+ const int requested_alignment = encoder_info.requested_resolution_alignment;
+ if (!encoder_info.apply_alignment_to_all_simulcast_layers) {
+ return requested_alignment;
+ }
+
+ if (requested_alignment < 1 || config->number_of_streams <= 1 ||
+ config->simulcast_layers.size() <= 1) {
+ return requested_alignment;
+ }
+
+ // Update alignment to also apply to simulcast layers.
+ const bool has_scale_resolution_down_by = absl::c_any_of(
+ config->simulcast_layers, [](const webrtc::VideoStream& layer) {
+ return layer.scale_resolution_down_by >= 1.0;
+ });
+
+ if (!has_scale_resolution_down_by) {
+ // Default resolution downscaling used (scale factors: 1, 2, 4, ...).
+ size_t size = config->simulcast_layers.size();
+ if (max_layers && *max_layers > 0 && *max_layers < size) {
+ size = *max_layers;
+ }
+ return requested_alignment * (1 << (size - 1));
+ }
+
+ // Get alignment for downscaled layers.
+ // Adjust `scale_resolution_down_by` to a common multiple to limit the
+ // alignment value (to avoid largely cropped frames and possibly with an
+ // aspect ratio far from the original).
+ const int kMaxAlignment = 16;
+
+ for (auto& layer : config->simulcast_layers) {
+ layer.scale_resolution_down_by =
+ std::max(layer.scale_resolution_down_by, 1.0);
+ layer.scale_resolution_down_by =
+ std::min(layer.scale_resolution_down_by, 10000.0);
+ }
+
+ // Decide on common multiple to use.
+ double min_diff = std::numeric_limits<double>::max();
+ int best_alignment = 1;
+ for (int alignment = requested_alignment; alignment <= kMaxAlignment;
+ ++alignment) {
+ double diff = RoundToMultiple(alignment, requested_alignment, config,
+ /*update_config=*/false);
+ if (diff < min_diff) {
+ min_diff = diff;
+ best_alignment = alignment;
+ }
+ }
+ RoundToMultiple(best_alignment, requested_alignment, config,
+ /*update_config=*/true);
+
+ return std::max(best_alignment, requested_alignment);
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/video/alignment_adjuster.h b/third_party/libwebrtc/video/alignment_adjuster.h
new file mode 100644
index 0000000000..36ac062e91
--- /dev/null
+++ b/third_party/libwebrtc/video/alignment_adjuster.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VIDEO_ALIGNMENT_ADJUSTER_H_
+#define VIDEO_ALIGNMENT_ADJUSTER_H_
+
+#include "api/video_codecs/video_encoder.h"
+#include "video/config/video_encoder_config.h"
+
+namespace webrtc {
+
+class AlignmentAdjuster {
+ public:
+ // Returns the resolution alignment requested by the encoder (i.e
+ // `EncoderInfo::requested_resolution_alignment` which ensures that delivered
+ // frames to the encoder are divisible by this alignment).
+ //
+ // If `EncoderInfo::apply_alignment_to_all_simulcast_layers` is enabled, the
+ // alignment will be adjusted to ensure that each simulcast layer also is
+ // divisible by `requested_resolution_alignment`. The configured scale factors
+ // `scale_resolution_down_by` may be adjusted to a common multiple to limit
+ // the alignment value to avoid largely cropped frames and possibly with an
+ // aspect ratio far from the original.
+
+ // Note: `max_layers` currently only taken into account when using default
+ // scale factors.
+ static int GetAlignmentAndMaybeAdjustScaleFactors(
+ const VideoEncoder::EncoderInfo& info,
+ VideoEncoderConfig* config,
+ absl::optional<size_t> max_layers);
+};
+
+} // namespace webrtc
+
+#endif // VIDEO_ALIGNMENT_ADJUSTER_H_
diff --git a/third_party/libwebrtc/video/alignment_adjuster_unittest.cc b/third_party/libwebrtc/video/alignment_adjuster_unittest.cc
new file mode 100644
index 0000000000..28e4bc0550
--- /dev/null
+++ b/third_party/libwebrtc/video/alignment_adjuster_unittest.cc
@@ -0,0 +1,189 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video/alignment_adjuster.h"
+
+#include <memory>
+#include <tuple>
+#include <vector>
+
+#include "rtc_base/numerics/safe_conversions.h"
+#include "test/encoder_settings.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace test {
+namespace {
+VideoEncoder::EncoderInfo GetEncoderInfo(int alignment, bool apply) {
+ VideoEncoder::EncoderInfo info;
+ info.requested_resolution_alignment = alignment;
+ info.apply_alignment_to_all_simulcast_layers = apply;
+ return info;
+}
+} // namespace
+
+class AlignmentAdjusterTest
+ : public ::testing::TestWithParam<::testing::tuple<
+ int,
+ std::tuple<std::vector<double>, std::vector<double>, int>>> {
+ protected:
+ AlignmentAdjusterTest()
+ : kRequestedAlignment(std::get<0>(GetParam())),
+ kScaleFactors(std::get<0>(std::get<1>(GetParam()))),
+ kAdjustedScaleFactors(std::get<1>(std::get<1>(GetParam()))),
+ kAdjustedAlignment(std::get<2>(std::get<1>(GetParam()))) {}
+
+ const int kRequestedAlignment;
+ const std::vector<double> kScaleFactors;
+ const std::vector<double> kAdjustedScaleFactors;
+ const int kAdjustedAlignment;
+};
+
+INSTANTIATE_TEST_SUITE_P(
+ ScaleFactorsAndAlignment,
+ AlignmentAdjusterTest,
+ ::testing::Combine(
+ ::testing::Values(2), // kRequestedAlignment
+ ::testing::Values(
+ std::make_tuple(std::vector<double>{-1.0}, // kScaleFactors
+ std::vector<double>{-1.0}, // kAdjustedScaleFactors
+ 2), // default: {1.0} // kAdjustedAlignment
+ std::make_tuple(std::vector<double>{-1.0, -1.0},
+ std::vector<double>{-1.0, -1.0},
+ 4), // default: {1.0, 2.0}
+ std::make_tuple(std::vector<double>{-1.0, -1.0, -1.0},
+ std::vector<double>{-1.0, -1.0, -1.0},
+ 8), // default: {1.0, 2.0, 4.0}
+ std::make_tuple(std::vector<double>{1.0, 2.0, 4.0},
+ std::vector<double>{1.0, 2.0, 4.0},
+ 8),
+ std::make_tuple(std::vector<double>{9999.0, -1.0, 1.0},
+ std::vector<double>{8.0, 1.0, 1.0},
+ 16), // kMaxAlignment
+ std::make_tuple(std::vector<double>{3.99, 2.01, 1.0},
+ std::vector<double>{4.0, 2.0, 1.0},
+ 8),
+ std::make_tuple(std::vector<double>{2.9, 2.1},
+ std::vector<double>{6.0 / 2.0, 6.0 / 3.0},
+ 12),
+ std::make_tuple(std::vector<double>{4.9, 1.7, 1.2},
+ std::vector<double>{5.0, 5.0 / 3.0, 5.0 / 4.0},
+ 10),
+ std::make_tuple(std::vector<double>{1.0, 1.3},
+ std::vector<double>{4.0 / 4.0, 4.0 / 3.0},
+ 8),
+ std::make_tuple(std::vector<double>{1.75, 3.5},
+ std::vector<double>{7.0 / 4.0, 7.0 / 2.0},
+ 7),
+ std::make_tuple(std::vector<double>{1.5, 2.5},
+ std::vector<double>{1.5, 2.5},
+ 15))));
+
+class AlignmentAdjusterTestTwoLayers : public AlignmentAdjusterTest {
+ protected:
+ const int kMaxLayers = 2;
+};
+
+INSTANTIATE_TEST_SUITE_P(
+ ScaleFactorsAndAlignmentWithMaxLayers,
+ AlignmentAdjusterTestTwoLayers,
+ ::testing::Combine(
+ ::testing::Values(2), // kRequestedAlignment
+ ::testing::Values(
+ std::make_tuple(std::vector<double>{-1.0}, // kScaleFactors
+ std::vector<double>{-1.0}, // kAdjustedScaleFactors
+ 2), // default: {1.0} // kAdjustedAlignment
+ std::make_tuple(std::vector<double>{-1.0, -1.0},
+ std::vector<double>{-1.0, -1.0},
+ 4), // default: {1.0, 2.0}
+ std::make_tuple(std::vector<double>{-1.0, -1.0, -1.0},
+ std::vector<double>{-1.0, -1.0, -1.0},
+ 4), // default: {1.0, 2.0, 4.0}
+ std::make_tuple(std::vector<double>{1.0, 2.0, 4.0},
+ std::vector<double>{1.0, 2.0, 4.0},
+ 8))));
+
+TEST_P(AlignmentAdjusterTest, AlignmentAppliedToAllLayers) {
+ const bool kApplyAlignmentToAllLayers = true;
+
+ // Fill config with the scaling factor by which to reduce encoding size.
+ const int num_streams = kScaleFactors.size();
+ VideoEncoderConfig config;
+ test::FillEncoderConfiguration(kVideoCodecVP8, num_streams, &config);
+ for (int i = 0; i < num_streams; ++i) {
+ config.simulcast_layers[i].scale_resolution_down_by = kScaleFactors[i];
+ }
+
+ // Verify requested alignment from sink.
+ VideoEncoder::EncoderInfo info =
+ GetEncoderInfo(kRequestedAlignment, kApplyAlignmentToAllLayers);
+ int alignment = AlignmentAdjuster::GetAlignmentAndMaybeAdjustScaleFactors(
+ info, &config, absl::nullopt);
+ EXPECT_EQ(alignment, kAdjustedAlignment);
+
+ // Verify adjusted scale factors.
+ for (int i = 0; i < num_streams; ++i) {
+ EXPECT_EQ(config.simulcast_layers[i].scale_resolution_down_by,
+ kAdjustedScaleFactors[i]);
+ }
+}
+
+TEST_P(AlignmentAdjusterTest, AlignmentNotAppliedToAllLayers) {
+ const bool kApplyAlignmentToAllLayers = false;
+
+ // Fill config with the scaling factor by which to reduce encoding size.
+ const int num_streams = kScaleFactors.size();
+ VideoEncoderConfig config;
+ test::FillEncoderConfiguration(kVideoCodecVP8, num_streams, &config);
+ for (int i = 0; i < num_streams; ++i) {
+ config.simulcast_layers[i].scale_resolution_down_by = kScaleFactors[i];
+ }
+
+ // Verify requested alignment from sink, alignment is not adjusted.
+ VideoEncoder::EncoderInfo info =
+ GetEncoderInfo(kRequestedAlignment, kApplyAlignmentToAllLayers);
+ int alignment = AlignmentAdjuster::GetAlignmentAndMaybeAdjustScaleFactors(
+ info, &config, absl::nullopt);
+ EXPECT_EQ(alignment, kRequestedAlignment);
+
+ // Verify that scale factors are not adjusted.
+ for (int i = 0; i < num_streams; ++i) {
+ EXPECT_EQ(config.simulcast_layers[i].scale_resolution_down_by,
+ kScaleFactors[i]);
+ }
+}
+
+TEST_P(AlignmentAdjusterTestTwoLayers, AlignmentAppliedToAllLayers) {
+ const bool kApplyAlignmentToAllLayers = true;
+
+ // Fill config with the scaling factor by which to reduce encoding size.
+ const int num_streams = kScaleFactors.size();
+ VideoEncoderConfig config;
+ test::FillEncoderConfiguration(kVideoCodecVP8, num_streams, &config);
+ for (int i = 0; i < num_streams; ++i) {
+ config.simulcast_layers[i].scale_resolution_down_by = kScaleFactors[i];
+ }
+
+ // Verify requested alignment from sink, alignment is not adjusted.
+ VideoEncoder::EncoderInfo info =
+ GetEncoderInfo(kRequestedAlignment, kApplyAlignmentToAllLayers);
+ int alignment = AlignmentAdjuster::GetAlignmentAndMaybeAdjustScaleFactors(
+ info, &config, absl::optional<size_t>(kMaxLayers));
+ EXPECT_EQ(alignment, kAdjustedAlignment);
+
+ // Verify adjusted scale factors.
+ for (int i = 0; i < num_streams; ++i) {
+ EXPECT_EQ(config.simulcast_layers[i].scale_resolution_down_by,
+ kAdjustedScaleFactors[i]);
+ }
+}
+
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/video/buffered_frame_decryptor.cc b/third_party/libwebrtc/video/buffered_frame_decryptor.cc
new file mode 100644
index 0000000000..24cbaf8265
--- /dev/null
+++ b/third_party/libwebrtc/video/buffered_frame_decryptor.cc
@@ -0,0 +1,123 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video/buffered_frame_decryptor.h"
+
+#include <utility>
+#include <vector>
+
+#include "modules/rtp_rtcp/source/rtp_descriptor_authentication.h"
+#include "modules/video_coding/frame_object.h"
+#include "rtc_base/logging.h"
+#include "system_wrappers/include/field_trial.h"
+
+namespace webrtc {
+
+BufferedFrameDecryptor::BufferedFrameDecryptor(
+ OnDecryptedFrameCallback* decrypted_frame_callback,
+ OnDecryptionStatusChangeCallback* decryption_status_change_callback,
+ const FieldTrialsView& field_trials)
+ : generic_descriptor_auth_experiment_(
+ !field_trials.IsDisabled("WebRTC-GenericDescriptorAuth")),
+ decrypted_frame_callback_(decrypted_frame_callback),
+ decryption_status_change_callback_(decryption_status_change_callback) {}
+
+BufferedFrameDecryptor::~BufferedFrameDecryptor() {}
+
+void BufferedFrameDecryptor::SetFrameDecryptor(
+ rtc::scoped_refptr<FrameDecryptorInterface> frame_decryptor) {
+ frame_decryptor_ = std::move(frame_decryptor);
+}
+
+void BufferedFrameDecryptor::ManageEncryptedFrame(
+ std::unique_ptr<RtpFrameObject> encrypted_frame) {
+ switch (DecryptFrame(encrypted_frame.get())) {
+ case FrameDecision::kStash:
+ if (stashed_frames_.size() >= kMaxStashedFrames) {
+ RTC_LOG(LS_WARNING) << "Encrypted frame stash full poping oldest item.";
+ stashed_frames_.pop_front();
+ }
+ stashed_frames_.push_back(std::move(encrypted_frame));
+ break;
+ case FrameDecision::kDecrypted:
+ RetryStashedFrames();
+ decrypted_frame_callback_->OnDecryptedFrame(std::move(encrypted_frame));
+ break;
+ case FrameDecision::kDrop:
+ break;
+ }
+}
+
+BufferedFrameDecryptor::FrameDecision BufferedFrameDecryptor::DecryptFrame(
+ RtpFrameObject* frame) {
+ // Optionally attempt to decrypt the raw video frame if it was provided.
+ if (frame_decryptor_ == nullptr) {
+ RTC_LOG(LS_INFO) << "Frame decryption required but not attached to this "
+ "stream. Stashing frame.";
+ return FrameDecision::kStash;
+ }
+ // Retrieve the maximum possible size of the decrypted payload.
+ const size_t max_plaintext_byte_size =
+ frame_decryptor_->GetMaxPlaintextByteSize(cricket::MEDIA_TYPE_VIDEO,
+ frame->size());
+ RTC_CHECK_LE(max_plaintext_byte_size, frame->size());
+ // Place the decrypted frame inline into the existing frame.
+ rtc::ArrayView<uint8_t> inline_decrypted_bitstream(frame->mutable_data(),
+ max_plaintext_byte_size);
+
+ // Enable authenticating the header if the field trial isn't disabled.
+ std::vector<uint8_t> additional_data;
+ if (generic_descriptor_auth_experiment_) {
+ additional_data = RtpDescriptorAuthentication(frame->GetRtpVideoHeader());
+ }
+
+ // Attempt to decrypt the video frame.
+ const FrameDecryptorInterface::Result decrypt_result =
+ frame_decryptor_->Decrypt(cricket::MEDIA_TYPE_VIDEO, /*csrcs=*/{},
+ additional_data, *frame,
+ inline_decrypted_bitstream);
+ // Optionally call the callback if there was a change in status
+ if (decrypt_result.status != last_status_) {
+ last_status_ = decrypt_result.status;
+ decryption_status_change_callback_->OnDecryptionStatusChange(
+ decrypt_result.status);
+ }
+
+ if (!decrypt_result.IsOk()) {
+ // Only stash frames if we have never decrypted a frame before.
+ return first_frame_decrypted_ ? FrameDecision::kDrop
+ : FrameDecision::kStash;
+ }
+ RTC_CHECK_LE(decrypt_result.bytes_written, max_plaintext_byte_size);
+ // Update the frame to contain just the written bytes.
+ frame->set_size(decrypt_result.bytes_written);
+
+ // Indicate that all future fail to decrypt frames should be dropped.
+ if (!first_frame_decrypted_) {
+ first_frame_decrypted_ = true;
+ }
+
+ return FrameDecision::kDecrypted;
+}
+
+void BufferedFrameDecryptor::RetryStashedFrames() {
+ if (!stashed_frames_.empty()) {
+ RTC_LOG(LS_INFO) << "Retrying stashed encrypted frames. Count: "
+ << stashed_frames_.size();
+ }
+ for (auto& frame : stashed_frames_) {
+ if (DecryptFrame(frame.get()) == FrameDecision::kDecrypted) {
+ decrypted_frame_callback_->OnDecryptedFrame(std::move(frame));
+ }
+ }
+ stashed_frames_.clear();
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/video/buffered_frame_decryptor.h b/third_party/libwebrtc/video/buffered_frame_decryptor.h
new file mode 100644
index 0000000000..681f89a7f4
--- /dev/null
+++ b/third_party/libwebrtc/video/buffered_frame_decryptor.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VIDEO_BUFFERED_FRAME_DECRYPTOR_H_
+#define VIDEO_BUFFERED_FRAME_DECRYPTOR_H_
+
+#include <deque>
+#include <memory>
+
+#include "api/crypto/crypto_options.h"
+#include "api/crypto/frame_decryptor_interface.h"
+#include "api/field_trials_view.h"
+#include "modules/video_coding/frame_object.h"
+
+namespace webrtc {
+
+// This callback is provided during the construction of the
+// BufferedFrameDecryptor and is called each time a frame is sucessfully
+// decrypted by the buffer.
+class OnDecryptedFrameCallback {
+ public:
+ virtual ~OnDecryptedFrameCallback() = default;
+ // Called each time a decrypted frame is returned.
+ virtual void OnDecryptedFrame(std::unique_ptr<RtpFrameObject> frame) = 0;
+};
+
+// This callback is called each time there is a status change in the decryption
+// stream. For example going from a none state to a first decryption or going
+// frome a decryptable state to a non decryptable state.
+class OnDecryptionStatusChangeCallback {
+ public:
+ virtual ~OnDecryptionStatusChangeCallback() = default;
+ // Called each time the decryption stream status changes. This call is
+ // blocking so the caller must relinquish the callback quickly. This status
+ // must match what is specified in the FrameDecryptorInterface file. Notably
+ // 0 must indicate success and any positive integer is a failure.
+ virtual void OnDecryptionStatusChange(
+ FrameDecryptorInterface::Status status) = 0;
+};
+
+// The BufferedFrameDecryptor is responsible for deciding when to pass
+// decrypted received frames onto the OnDecryptedFrameCallback. Frames can be
+// delayed when frame encryption is enabled but the key hasn't arrived yet. In
+// this case we stash about 1 second of encrypted frames instead of dropping
+// them to prevent re-requesting the key frame. This optimization is
+// particularly important on low bandwidth networks. Note stashing is only ever
+// done if we have never sucessfully decrypted a frame before. After the first
+// successful decryption payloads will never be stashed.
+class BufferedFrameDecryptor final {
+ public:
+ // Constructs a new BufferedFrameDecryptor that can hold
+ explicit BufferedFrameDecryptor(
+ OnDecryptedFrameCallback* decrypted_frame_callback,
+ OnDecryptionStatusChangeCallback* decryption_status_change_callback,
+ const FieldTrialsView& field_trials);
+
+ ~BufferedFrameDecryptor();
+ // This object cannot be copied.
+ BufferedFrameDecryptor(const BufferedFrameDecryptor&) = delete;
+ BufferedFrameDecryptor& operator=(const BufferedFrameDecryptor&) = delete;
+
+ // Sets a new frame decryptor as the decryptor for the buffered frame
+ // decryptor. This allows the decryptor to be switched out without resetting
+ // the video stream.
+ void SetFrameDecryptor(
+ rtc::scoped_refptr<FrameDecryptorInterface> frame_decryptor);
+
+ // Determines whether the frame should be stashed, dropped or handed off to
+ // the OnDecryptedFrameCallback.
+ void ManageEncryptedFrame(std::unique_ptr<RtpFrameObject> encrypted_frame);
+
+ private:
+ // Represents what should be done with a given frame.
+ enum class FrameDecision { kStash, kDecrypted, kDrop };
+
+ // Attempts to decrypt the frame, if it fails and no prior frames have been
+ // decrypted it will return kStash. Otherwise fail to decrypts will return
+ // kDrop. Successful decryptions will always return kDecrypted.
+ FrameDecision DecryptFrame(RtpFrameObject* frame);
+ // Retries all the stashed frames this is triggered each time a kDecrypted
+ // event occurs.
+ void RetryStashedFrames();
+
+ static const size_t kMaxStashedFrames = 24;
+
+ const bool generic_descriptor_auth_experiment_;
+ bool first_frame_decrypted_ = false;
+ FrameDecryptorInterface::Status last_status_ =
+ FrameDecryptorInterface::Status::kUnknown;
+ rtc::scoped_refptr<FrameDecryptorInterface> frame_decryptor_;
+ OnDecryptedFrameCallback* const decrypted_frame_callback_;
+ OnDecryptionStatusChangeCallback* const decryption_status_change_callback_;
+ std::deque<std::unique_ptr<RtpFrameObject>> stashed_frames_;
+};
+
+} // namespace webrtc
+
+#endif // VIDEO_BUFFERED_FRAME_DECRYPTOR_H_
diff --git a/third_party/libwebrtc/video/buffered_frame_decryptor_unittest.cc b/third_party/libwebrtc/video/buffered_frame_decryptor_unittest.cc
new file mode 100644
index 0000000000..074777bf3a
--- /dev/null
+++ b/third_party/libwebrtc/video/buffered_frame_decryptor_unittest.cc
@@ -0,0 +1,230 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video/buffered_frame_decryptor.h"
+
+#include <map>
+#include <memory>
+#include <vector>
+
+#include "api/test/mock_frame_decryptor.h"
+#include "modules/video_coding/packet_buffer.h"
+#include "system_wrappers/include/clock.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+#include "test/scoped_key_value_config.h"
+
+using ::testing::Return;
+
+namespace webrtc {
+namespace {
+
+FrameDecryptorInterface::Result DecryptSuccess() {
+ return FrameDecryptorInterface::Result(FrameDecryptorInterface::Status::kOk,
+ 0);
+}
+
+FrameDecryptorInterface::Result DecryptFail() {
+ return FrameDecryptorInterface::Result(
+ FrameDecryptorInterface::Status::kFailedToDecrypt, 0);
+}
+
+} // namespace
+
+class BufferedFrameDecryptorTest : public ::testing::Test,
+ public OnDecryptedFrameCallback,
+ public OnDecryptionStatusChangeCallback {
+ public:
+ // Implements the OnDecryptedFrameCallbackInterface
+ void OnDecryptedFrame(std::unique_ptr<RtpFrameObject> frame) override {
+ decrypted_frame_call_count_++;
+ }
+
+ void OnDecryptionStatusChange(FrameDecryptorInterface::Status status) {
+ ++decryption_status_change_count_;
+ }
+
+ // Returns a new fake RtpFrameObject it abstracts the difficult construction
+ // of the RtpFrameObject to simplify testing.
+ std::unique_ptr<RtpFrameObject> CreateRtpFrameObject(bool key_frame) {
+ seq_num_++;
+ RTPVideoHeader rtp_video_header;
+ rtp_video_header.generic.emplace();
+
+ // clang-format off
+ return std::make_unique<RtpFrameObject>(
+ seq_num_,
+ seq_num_,
+ /*markerBit=*/true,
+ /*times_nacked=*/0,
+ /*first_packet_received_time=*/0,
+ /*last_packet_received_time=*/0,
+ /*rtp_timestamp=*/0,
+ /*ntp_time_ms=*/0,
+ VideoSendTiming(),
+ /*payload_type=*/0,
+ kVideoCodecGeneric,
+ kVideoRotation_0,
+ VideoContentType::UNSPECIFIED,
+ rtp_video_header,
+ /*color_space=*/absl::nullopt,
+ RtpPacketInfos(),
+ EncodedImageBuffer::Create(/*size=*/0));
+ // clang-format on
+ }
+
+ protected:
+ BufferedFrameDecryptorTest() {
+ fake_packet_data_ = std::vector<uint8_t>(100);
+ decrypted_frame_call_count_ = 0;
+ decryption_status_change_count_ = 0;
+ seq_num_ = 0;
+ mock_frame_decryptor_ = rtc::make_ref_counted<MockFrameDecryptor>();
+ buffered_frame_decryptor_ =
+ std::make_unique<BufferedFrameDecryptor>(this, this, field_trials_);
+ buffered_frame_decryptor_->SetFrameDecryptor(mock_frame_decryptor_);
+ }
+
+ static const size_t kMaxStashedFrames;
+
+ test::ScopedKeyValueConfig field_trials_;
+ std::vector<uint8_t> fake_packet_data_;
+ rtc::scoped_refptr<MockFrameDecryptor> mock_frame_decryptor_;
+ std::unique_ptr<BufferedFrameDecryptor> buffered_frame_decryptor_;
+ size_t decrypted_frame_call_count_;
+ size_t decryption_status_change_count_ = 0;
+ uint16_t seq_num_;
+};
+
+const size_t BufferedFrameDecryptorTest::kMaxStashedFrames = 24;
+
+// Callback should always be triggered on a successful decryption.
+TEST_F(BufferedFrameDecryptorTest, CallbackCalledOnSuccessfulDecryption) {
+ EXPECT_CALL(*mock_frame_decryptor_, Decrypt)
+ .Times(1)
+ .WillOnce(Return(DecryptSuccess()));
+ EXPECT_CALL(*mock_frame_decryptor_, GetMaxPlaintextByteSize)
+ .Times(1)
+ .WillOnce(Return(0));
+ buffered_frame_decryptor_->ManageEncryptedFrame(CreateRtpFrameObject(true));
+ EXPECT_EQ(decrypted_frame_call_count_, static_cast<size_t>(1));
+ EXPECT_EQ(decryption_status_change_count_, static_cast<size_t>(1));
+}
+
+// An initial fail to decrypt should not trigger the callback.
+TEST_F(BufferedFrameDecryptorTest, CallbackNotCalledOnFailedDecryption) {
+ EXPECT_CALL(*mock_frame_decryptor_, Decrypt)
+ .Times(1)
+ .WillOnce(Return(DecryptFail()));
+ EXPECT_CALL(*mock_frame_decryptor_, GetMaxPlaintextByteSize)
+ .Times(1)
+ .WillOnce(Return(0));
+ buffered_frame_decryptor_->ManageEncryptedFrame(CreateRtpFrameObject(true));
+ EXPECT_EQ(decrypted_frame_call_count_, static_cast<size_t>(0));
+ EXPECT_EQ(decryption_status_change_count_, static_cast<size_t>(1));
+}
+
+// Initial failures should be stored and retried after the first successful
+// decryption.
+TEST_F(BufferedFrameDecryptorTest, DelayedCallbackOnBufferedFrames) {
+ EXPECT_CALL(*mock_frame_decryptor_, Decrypt)
+ .Times(3)
+ .WillOnce(Return(DecryptFail()))
+ .WillOnce(Return(DecryptSuccess()))
+ .WillOnce(Return(DecryptSuccess()));
+ EXPECT_CALL(*mock_frame_decryptor_, GetMaxPlaintextByteSize)
+ .Times(3)
+ .WillRepeatedly(Return(0));
+
+ // The first decrypt will fail stashing the first frame.
+ buffered_frame_decryptor_->ManageEncryptedFrame(CreateRtpFrameObject(true));
+ EXPECT_EQ(decrypted_frame_call_count_, static_cast<size_t>(0));
+ EXPECT_EQ(decryption_status_change_count_, static_cast<size_t>(1));
+ // The second call will succeed playing back both frames.
+ buffered_frame_decryptor_->ManageEncryptedFrame(CreateRtpFrameObject(false));
+ EXPECT_EQ(decrypted_frame_call_count_, static_cast<size_t>(2));
+ EXPECT_EQ(decryption_status_change_count_, static_cast<size_t>(2));
+}
+
+// Subsequent failure to decrypts after the first successful decryption should
+// fail to decryptk
+TEST_F(BufferedFrameDecryptorTest, FTDDiscardedAfterFirstSuccess) {
+ EXPECT_CALL(*mock_frame_decryptor_, Decrypt)
+ .Times(4)
+ .WillOnce(Return(DecryptFail()))
+ .WillOnce(Return(DecryptSuccess()))
+ .WillOnce(Return(DecryptSuccess()))
+ .WillOnce(Return(DecryptFail()));
+ EXPECT_CALL(*mock_frame_decryptor_, GetMaxPlaintextByteSize)
+ .Times(4)
+ .WillRepeatedly(Return(0));
+
+ // The first decrypt will fail stashing the first frame.
+ buffered_frame_decryptor_->ManageEncryptedFrame(CreateRtpFrameObject(true));
+ EXPECT_EQ(decrypted_frame_call_count_, static_cast<size_t>(0));
+ EXPECT_EQ(decryption_status_change_count_, static_cast<size_t>(1));
+ // The second call will succeed playing back both frames.
+ buffered_frame_decryptor_->ManageEncryptedFrame(CreateRtpFrameObject(false));
+ EXPECT_EQ(decrypted_frame_call_count_, static_cast<size_t>(2));
+ EXPECT_EQ(decryption_status_change_count_, static_cast<size_t>(2));
+ // A new failure call will not result in an additional decrypted frame
+ // callback.
+ buffered_frame_decryptor_->ManageEncryptedFrame(CreateRtpFrameObject(true));
+ EXPECT_EQ(decrypted_frame_call_count_, static_cast<size_t>(2));
+ EXPECT_EQ(decryption_status_change_count_, static_cast<size_t>(3));
+}
+
+// Validate that the maximum number of stashed frames cannot be exceeded even if
+// more than its maximum arrives before the first successful decryption.
+TEST_F(BufferedFrameDecryptorTest, MaximumNumberOfFramesStored) {
+ const size_t failed_to_decrypt_count = kMaxStashedFrames * 2;
+ EXPECT_CALL(*mock_frame_decryptor_, Decrypt)
+ .Times(failed_to_decrypt_count)
+ .WillRepeatedly(Return(DecryptFail()));
+ EXPECT_CALL(*mock_frame_decryptor_, GetMaxPlaintextByteSize)
+ .WillRepeatedly(Return(0));
+
+ for (size_t i = 0; i < failed_to_decrypt_count; ++i) {
+ buffered_frame_decryptor_->ManageEncryptedFrame(CreateRtpFrameObject(true));
+ }
+ EXPECT_EQ(decrypted_frame_call_count_, static_cast<size_t>(0));
+ EXPECT_EQ(decryption_status_change_count_, static_cast<size_t>(1));
+
+ EXPECT_CALL(*mock_frame_decryptor_, Decrypt)
+ .Times(kMaxStashedFrames + 1)
+ .WillRepeatedly(Return(DecryptSuccess()));
+ buffered_frame_decryptor_->ManageEncryptedFrame(CreateRtpFrameObject(true));
+ EXPECT_EQ(decrypted_frame_call_count_, kMaxStashedFrames + 1);
+ EXPECT_EQ(decryption_status_change_count_, static_cast<size_t>(2));
+}
+
+// Verifies if a BufferedFrameDecryptor is attached but has no FrameDecryptor
+// attached it will still store frames up to the frame max.
+TEST_F(BufferedFrameDecryptorTest, FramesStoredIfDecryptorNull) {
+ buffered_frame_decryptor_->SetFrameDecryptor(nullptr);
+ for (size_t i = 0; i < (2 * kMaxStashedFrames); ++i) {
+ buffered_frame_decryptor_->ManageEncryptedFrame(CreateRtpFrameObject(true));
+ }
+
+ EXPECT_CALL(*mock_frame_decryptor_, Decrypt)
+ .Times(kMaxStashedFrames + 1)
+ .WillRepeatedly(Return(DecryptSuccess()));
+ EXPECT_CALL(*mock_frame_decryptor_, GetMaxPlaintextByteSize)
+ .WillRepeatedly(Return(0));
+
+ // Attach the frame decryptor at a later point after frames have arrived.
+ buffered_frame_decryptor_->SetFrameDecryptor(mock_frame_decryptor_);
+
+ // Next frame should trigger kMaxStashedFrame decryptions.
+ buffered_frame_decryptor_->ManageEncryptedFrame(CreateRtpFrameObject(true));
+ EXPECT_EQ(decrypted_frame_call_count_, kMaxStashedFrames + 1);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/video/call_stats2.cc b/third_party/libwebrtc/video/call_stats2.cc
new file mode 100644
index 0000000000..ef575d2667
--- /dev/null
+++ b/third_party/libwebrtc/video/call_stats2.cc
@@ -0,0 +1,168 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video/call_stats2.h"
+
+#include <algorithm>
+#include <memory>
+#include <utility>
+
+#include "absl/algorithm/container.h"
+#include "rtc_base/checks.h"
+#include "system_wrappers/include/metrics.h"
+
+namespace webrtc {
+namespace internal {
+namespace {
+
+void RemoveOldReports(int64_t now, std::list<CallStats::RttTime>* reports) {
+ static constexpr const int64_t kRttTimeoutMs = 1500;
+ reports->remove_if(
+ [&now](CallStats::RttTime& r) { return now - r.time > kRttTimeoutMs; });
+}
+
+int64_t GetMaxRttMs(const std::list<CallStats::RttTime>& reports) {
+ int64_t max_rtt_ms = -1;
+ for (const CallStats::RttTime& rtt_time : reports)
+ max_rtt_ms = std::max(rtt_time.rtt, max_rtt_ms);
+ return max_rtt_ms;
+}
+
+int64_t GetAvgRttMs(const std::list<CallStats::RttTime>& reports) {
+ RTC_DCHECK(!reports.empty());
+ int64_t sum = 0;
+ for (std::list<CallStats::RttTime>::const_iterator it = reports.begin();
+ it != reports.end(); ++it) {
+ sum += it->rtt;
+ }
+ return sum / reports.size();
+}
+
+int64_t GetNewAvgRttMs(const std::list<CallStats::RttTime>& reports,
+ int64_t prev_avg_rtt) {
+ if (reports.empty())
+ return -1; // Reset (invalid average).
+
+ int64_t cur_rtt_ms = GetAvgRttMs(reports);
+ if (prev_avg_rtt == -1)
+ return cur_rtt_ms; // New initial average value.
+
+ // Weight factor to apply to the average rtt.
+ // We weigh the old average at 70% against the new average (30%).
+ constexpr const float kWeightFactor = 0.3f;
+ return prev_avg_rtt * (1.0f - kWeightFactor) + cur_rtt_ms * kWeightFactor;
+}
+
+} // namespace
+
+constexpr TimeDelta CallStats::kUpdateInterval;
+
+CallStats::CallStats(Clock* clock, TaskQueueBase* task_queue)
+ : clock_(clock),
+ max_rtt_ms_(-1),
+ avg_rtt_ms_(-1),
+ sum_avg_rtt_ms_(0),
+ num_avg_rtt_(0),
+ time_of_first_rtt_ms_(-1),
+ task_queue_(task_queue) {
+ RTC_DCHECK(task_queue_);
+ RTC_DCHECK_RUN_ON(task_queue_);
+}
+
+CallStats::~CallStats() {
+ RTC_DCHECK_RUN_ON(task_queue_);
+ RTC_DCHECK(observers_.empty());
+
+ repeating_task_.Stop();
+
+ UpdateHistograms();
+}
+
+void CallStats::EnsureStarted() {
+ RTC_DCHECK_RUN_ON(task_queue_);
+ repeating_task_ =
+ RepeatingTaskHandle::DelayedStart(task_queue_, kUpdateInterval, [this]() {
+ UpdateAndReport();
+ return kUpdateInterval;
+ });
+}
+
+void CallStats::UpdateAndReport() {
+ RTC_DCHECK_RUN_ON(task_queue_);
+
+ RemoveOldReports(clock_->CurrentTime().ms(), &reports_);
+ max_rtt_ms_ = GetMaxRttMs(reports_);
+ avg_rtt_ms_ = GetNewAvgRttMs(reports_, avg_rtt_ms_);
+
+ // If there is a valid rtt, update all observers with the max rtt.
+ if (max_rtt_ms_ >= 0) {
+ RTC_DCHECK_GE(avg_rtt_ms_, 0);
+ for (CallStatsObserver* observer : observers_)
+ observer->OnRttUpdate(avg_rtt_ms_, max_rtt_ms_);
+ // Sum for Histogram of average RTT reported over the entire call.
+ sum_avg_rtt_ms_ += avg_rtt_ms_;
+ ++num_avg_rtt_;
+ }
+}
+
+void CallStats::RegisterStatsObserver(CallStatsObserver* observer) {
+ RTC_DCHECK_RUN_ON(task_queue_);
+ if (!absl::c_linear_search(observers_, observer))
+ observers_.push_back(observer);
+}
+
+void CallStats::DeregisterStatsObserver(CallStatsObserver* observer) {
+ RTC_DCHECK_RUN_ON(task_queue_);
+ observers_.remove(observer);
+}
+
+int64_t CallStats::LastProcessedRtt() const {
+ RTC_DCHECK_RUN_ON(task_queue_);
+ // No need for locking since we're on the construction thread.
+ return avg_rtt_ms_;
+}
+
+void CallStats::OnRttUpdate(int64_t rtt) {
+ // This callback may for some RtpRtcp module instances (video send stream) be
+ // invoked from a separate task queue, in other cases, we should already be
+ // on the correct TQ.
+ int64_t now_ms = clock_->TimeInMilliseconds();
+ auto update = [this, rtt, now_ms]() {
+ RTC_DCHECK_RUN_ON(task_queue_);
+ reports_.push_back(RttTime(rtt, now_ms));
+ if (time_of_first_rtt_ms_ == -1)
+ time_of_first_rtt_ms_ = now_ms;
+ UpdateAndReport();
+ };
+
+ if (task_queue_->IsCurrent()) {
+ update();
+ } else {
+ task_queue_->PostTask(SafeTask(task_safety_.flag(), std::move(update)));
+ }
+}
+
+void CallStats::UpdateHistograms() {
+ RTC_DCHECK_RUN_ON(task_queue_);
+
+ if (time_of_first_rtt_ms_ == -1 || num_avg_rtt_ < 1)
+ return;
+
+ int64_t elapsed_sec =
+ (clock_->TimeInMilliseconds() - time_of_first_rtt_ms_) / 1000;
+ if (elapsed_sec >= metrics::kMinRunTimeInSeconds) {
+ int64_t avg_rtt_ms = (sum_avg_rtt_ms_ + num_avg_rtt_ / 2) / num_avg_rtt_;
+ RTC_HISTOGRAM_COUNTS_10000(
+ "WebRTC.Video.AverageRoundTripTimeInMilliseconds", avg_rtt_ms);
+ }
+}
+
+} // namespace internal
+} // namespace webrtc
diff --git a/third_party/libwebrtc/video/call_stats2.h b/third_party/libwebrtc/video/call_stats2.h
new file mode 100644
index 0000000000..7e941d1e75
--- /dev/null
+++ b/third_party/libwebrtc/video/call_stats2.h
@@ -0,0 +1,135 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VIDEO_CALL_STATS2_H_
+#define VIDEO_CALL_STATS2_H_
+
+#include <list>
+#include <memory>
+
+#include "api/task_queue/pending_task_safety_flag.h"
+#include "api/task_queue/task_queue_base.h"
+#include "api/units/timestamp.h"
+#include "modules/include/module_common_types.h"
+#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
+#include "rtc_base/task_utils/repeating_task.h"
+#include "system_wrappers/include/clock.h"
+
+namespace webrtc {
+namespace internal {
+
+class CallStats {
+ public:
+ // Time interval for updating the observers.
+ static constexpr TimeDelta kUpdateInterval = TimeDelta::Millis(1000);
+
+ // Must be created and destroyed on the same task_queue.
+ CallStats(Clock* clock, TaskQueueBase* task_queue);
+ ~CallStats();
+
+ CallStats(const CallStats&) = delete;
+ CallStats& operator=(const CallStats&) = delete;
+
+ // Ensure that necessary repeating tasks are started.
+ void EnsureStarted();
+
+ // Expose an RtcpRttStats implementation without inheriting from RtcpRttStats.
+ // That allows us to separate the threading model of how RtcpRttStats is
+ // used (mostly on a process thread) and how CallStats is used (mostly on
+ // the TQ/worker thread). Since for both cases, there is a LastProcessedRtt()
+ // method, this separation allows us to not need a lock for either.
+ RtcpRttStats* AsRtcpRttStats() { return &rtcp_rtt_stats_impl_; }
+
+ // Registers/deregisters a new observer to receive statistics updates.
+ // Must be called from the construction thread.
+ void RegisterStatsObserver(CallStatsObserver* observer);
+ void DeregisterStatsObserver(CallStatsObserver* observer);
+
+ // Expose `LastProcessedRtt()` from RtcpRttStats to the public interface, as
+ // it is the part of the API that is needed by direct users of CallStats.
+ int64_t LastProcessedRtt() const;
+
+ // Exposed for tests to test histogram support.
+ void UpdateHistogramsForTest() { UpdateHistograms(); }
+
+ // Helper struct keeping track of the time a rtt value is reported.
+ struct RttTime {
+ RttTime(int64_t new_rtt, int64_t rtt_time) : rtt(new_rtt), time(rtt_time) {}
+ const int64_t rtt;
+ const int64_t time;
+ };
+
+ private:
+ // Part of the RtcpRttStats implementation. Called by RtcpRttStatsImpl.
+ void OnRttUpdate(int64_t rtt);
+
+ void UpdateAndReport();
+
+ // This method must only be called when the process thread is not
+ // running, and from the construction thread.
+ void UpdateHistograms();
+
+ class RtcpRttStatsImpl : public RtcpRttStats {
+ public:
+ explicit RtcpRttStatsImpl(CallStats* owner) : owner_(owner) {}
+ ~RtcpRttStatsImpl() override = default;
+
+ private:
+ void OnRttUpdate(int64_t rtt) override {
+ // For video send streams (video/video_send_stream.cc), the RtpRtcp module
+ // is currently created on a transport worker TaskQueue and not the worker
+ // thread - which is what happens in other cases. We should probably fix
+ // that so that the call consistently comes in on the right thread.
+ owner_->OnRttUpdate(rtt);
+ }
+
+ int64_t LastProcessedRtt() const override {
+ // This call path shouldn't be used anymore. This impl is only for
+ // propagating the rtt from the RtpRtcp module, which does not call
+ // LastProcessedRtt(). Down the line we should consider removing
+ // LastProcessedRtt() and use the interface for event notifications only.
+ RTC_DCHECK_NOTREACHED() << "Legacy call path";
+ return 0;
+ }
+
+ CallStats* const owner_;
+ } rtcp_rtt_stats_impl_{this};
+
+ Clock* const clock_;
+
+ // Used to regularly call UpdateAndReport().
+ RepeatingTaskHandle repeating_task_ RTC_GUARDED_BY(task_queue_);
+
+ // The last RTT in the statistics update (zero if there is no valid estimate).
+ int64_t max_rtt_ms_ RTC_GUARDED_BY(task_queue_);
+
+ // Last reported average RTT value.
+ int64_t avg_rtt_ms_ RTC_GUARDED_BY(task_queue_);
+
+ int64_t sum_avg_rtt_ms_ RTC_GUARDED_BY(task_queue_);
+ int64_t num_avg_rtt_ RTC_GUARDED_BY(task_queue_);
+ int64_t time_of_first_rtt_ms_ RTC_GUARDED_BY(task_queue_);
+
+ // All Rtt reports within valid time interval, oldest first.
+ std::list<RttTime> reports_ RTC_GUARDED_BY(task_queue_);
+
+ // Observers getting stats reports.
+ std::list<CallStatsObserver*> observers_ RTC_GUARDED_BY(task_queue_);
+
+ TaskQueueBase* const task_queue_;
+
+ // Used to signal destruction to potentially pending tasks.
+ ScopedTaskSafety task_safety_;
+};
+
+} // namespace internal
+} // namespace webrtc
+
+#endif // VIDEO_CALL_STATS2_H_
diff --git a/third_party/libwebrtc/video/call_stats2_unittest.cc b/third_party/libwebrtc/video/call_stats2_unittest.cc
new file mode 100644
index 0000000000..76abbcfebd
--- /dev/null
+++ b/third_party/libwebrtc/video/call_stats2_unittest.cc
@@ -0,0 +1,312 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video/call_stats2.h"
+
+#include <memory>
+
+#include "api/task_queue/default_task_queue_factory.h"
+#include "api/task_queue/task_queue_base.h"
+#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
+#include "rtc_base/thread.h"
+#include "system_wrappers/include/metrics.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+#include "test/run_loop.h"
+
+using ::testing::AnyNumber;
+using ::testing::InvokeWithoutArgs;
+using ::testing::Return;
+
+namespace webrtc {
+namespace internal {
+
+class MockStatsObserver : public CallStatsObserver {
+ public:
+ MockStatsObserver() {}
+ virtual ~MockStatsObserver() {}
+
+ MOCK_METHOD(void, OnRttUpdate, (int64_t, int64_t), (override));
+};
+
+class CallStats2Test : public ::testing::Test {
+ public:
+ CallStats2Test() { call_stats_.EnsureStarted(); }
+
+ // Queues an rtt update call on the process thread.
+ void AsyncSimulateRttUpdate(int64_t rtt) {
+ RtcpRttStats* rtcp_rtt_stats = call_stats_.AsRtcpRttStats();
+ task_queue_->PostTask(
+ [rtcp_rtt_stats, rtt] { rtcp_rtt_stats->OnRttUpdate(rtt); });
+ }
+
+ protected:
+ void FlushProcessAndWorker() {
+ task_queue_->PostTask([this] { loop_.PostTask([this] { loop_.Quit(); }); });
+ loop_.Run();
+ }
+
+ test::RunLoop loop_;
+ std::unique_ptr<TaskQueueBase, TaskQueueDeleter> task_queue_ =
+ CreateDefaultTaskQueueFactory()->CreateTaskQueue(
+ "CallStats",
+ TaskQueueFactory::Priority::NORMAL);
+
+ // Note: Since rtc::Thread doesn't support injecting a Clock, we're going
+ // to be using a mix of the fake clock (used by CallStats) as well as the
+ // system clock (used by rtc::Thread). This isn't ideal and will result in
+ // the tests taking longer to execute in some cases than they need to.
+ SimulatedClock fake_clock_{12345};
+ CallStats call_stats_{&fake_clock_, loop_.task_queue()};
+};
+
+TEST_F(CallStats2Test, AddAndTriggerCallback) {
+ static constexpr const int64_t kRtt = 25;
+
+ MockStatsObserver stats_observer;
+ EXPECT_CALL(stats_observer, OnRttUpdate(kRtt, kRtt))
+ .Times(1)
+ .WillOnce(InvokeWithoutArgs([this] { loop_.Quit(); }));
+
+ call_stats_.RegisterStatsObserver(&stats_observer);
+ EXPECT_EQ(-1, call_stats_.LastProcessedRtt());
+
+ AsyncSimulateRttUpdate(kRtt);
+ loop_.Run();
+
+ EXPECT_EQ(kRtt, call_stats_.LastProcessedRtt());
+
+ call_stats_.DeregisterStatsObserver(&stats_observer);
+}
+
+TEST_F(CallStats2Test, ProcessTime) {
+ static constexpr const int64_t kRtt = 100;
+ static constexpr const int64_t kRtt2 = 80;
+
+ MockStatsObserver stats_observer;
+
+ EXPECT_CALL(stats_observer, OnRttUpdate(kRtt, kRtt))
+ .Times(2)
+ .WillOnce(InvokeWithoutArgs([this] {
+ // Advance clock and verify we get an update.
+ fake_clock_.AdvanceTimeMilliseconds(CallStats::kUpdateInterval.ms());
+ }))
+ .WillRepeatedly(InvokeWithoutArgs([this] {
+ AsyncSimulateRttUpdate(kRtt2);
+ // Advance clock just too little to get an update.
+ fake_clock_.AdvanceTimeMilliseconds(CallStats::kUpdateInterval.ms() -
+ 1);
+ }));
+
+ // In case you're reading this and wondering how this number is arrived at,
+ // please see comments in the ChangeRtt test that go into some detail.
+ static constexpr const int64_t kLastAvg = 94;
+ EXPECT_CALL(stats_observer, OnRttUpdate(kLastAvg, kRtt2))
+ .Times(1)
+ .WillOnce(InvokeWithoutArgs([this] { loop_.Quit(); }));
+
+ call_stats_.RegisterStatsObserver(&stats_observer);
+
+ AsyncSimulateRttUpdate(kRtt);
+ loop_.Run();
+
+ call_stats_.DeregisterStatsObserver(&stats_observer);
+}
+
+// Verify all observers get correct estimates and observers can be added and
+// removed.
+TEST_F(CallStats2Test, MultipleObservers) {
+ MockStatsObserver stats_observer_1;
+ call_stats_.RegisterStatsObserver(&stats_observer_1);
+ // Add the second observer twice, there should still be only one report to the
+ // observer.
+ MockStatsObserver stats_observer_2;
+ call_stats_.RegisterStatsObserver(&stats_observer_2);
+ call_stats_.RegisterStatsObserver(&stats_observer_2);
+
+ static constexpr const int64_t kRtt = 100;
+
+ // Verify both observers are updated.
+ EXPECT_CALL(stats_observer_1, OnRttUpdate(kRtt, kRtt))
+ .Times(AnyNumber())
+ .WillRepeatedly(Return());
+ EXPECT_CALL(stats_observer_2, OnRttUpdate(kRtt, kRtt))
+ .Times(AnyNumber())
+ .WillOnce(InvokeWithoutArgs([this] { loop_.Quit(); }))
+ .WillRepeatedly(Return());
+ AsyncSimulateRttUpdate(kRtt);
+ loop_.Run();
+
+ // Deregister the second observer and verify update is only sent to the first
+ // observer.
+ call_stats_.DeregisterStatsObserver(&stats_observer_2);
+
+ EXPECT_CALL(stats_observer_1, OnRttUpdate(kRtt, kRtt))
+ .Times(AnyNumber())
+ .WillOnce(InvokeWithoutArgs([this] { loop_.Quit(); }))
+ .WillRepeatedly(Return());
+ EXPECT_CALL(stats_observer_2, OnRttUpdate(kRtt, kRtt)).Times(0);
+ AsyncSimulateRttUpdate(kRtt);
+ loop_.Run();
+
+ // Deregister the first observer.
+ call_stats_.DeregisterStatsObserver(&stats_observer_1);
+
+ // Now make sure we don't get any callbacks.
+ EXPECT_CALL(stats_observer_1, OnRttUpdate(kRtt, kRtt)).Times(0);
+ EXPECT_CALL(stats_observer_2, OnRttUpdate(kRtt, kRtt)).Times(0);
+ AsyncSimulateRttUpdate(kRtt);
+
+ // Flush the queue on the process thread to make sure we return after
+ // Process() has been called.
+ FlushProcessAndWorker();
+}
+
+// Verify increasing and decreasing rtt triggers callbacks with correct values.
+TEST_F(CallStats2Test, ChangeRtt) {
+ // NOTE: This test assumes things about how old reports are removed
+ // inside of call_stats.cc. The threshold ms value is 1500ms, but it's not
+ // clear here that how the clock is advanced, affects that algorithm and
+ // subsequently the average reported rtt.
+
+ MockStatsObserver stats_observer;
+ call_stats_.RegisterStatsObserver(&stats_observer);
+
+ static constexpr const int64_t kFirstRtt = 100;
+ static constexpr const int64_t kLowRtt = kFirstRtt - 20;
+ static constexpr const int64_t kHighRtt = kFirstRtt + 20;
+
+ EXPECT_CALL(stats_observer, OnRttUpdate(kFirstRtt, kFirstRtt))
+ .Times(1)
+ .WillOnce(InvokeWithoutArgs([this] {
+ fake_clock_.AdvanceTimeMilliseconds(1000);
+ AsyncSimulateRttUpdate(kHighRtt); // Reported at T1 (1000ms).
+ }));
+
+ // NOTE: This relies on the internal algorithms of call_stats.cc.
+ // There's a weight factor there (0.3), that weighs the previous average to
+ // the new one by 70%, so the number 103 in this case is arrived at like so:
+ // (100) / 1 * 0.7 + (100+120)/2 * 0.3 = 103
+ static constexpr const int64_t kAvgRtt1 = 103;
+ EXPECT_CALL(stats_observer, OnRttUpdate(kAvgRtt1, kHighRtt))
+ .Times(1)
+ .WillOnce(InvokeWithoutArgs([this] {
+ // This interacts with an internal implementation detail in call_stats
+ // that decays the oldest rtt value. See more below.
+ fake_clock_.AdvanceTimeMilliseconds(1000);
+ AsyncSimulateRttUpdate(kLowRtt); // Reported at T2 (2000ms).
+ }));
+
+ // Increase time enough for a new update, but not too much to make the
+ // rtt invalid. Report a lower rtt and verify the old/high value still is sent
+ // in the callback.
+
+ // Here, enough time must have passed in order to remove exactly the first
+ // report and nothing else (>1500ms has passed since the first rtt).
+ // So, this value is arrived by doing:
+ // (kAvgRtt1)/1 * 0.7 + (kHighRtt+kLowRtt)/2 * 0.3 = 102.1
+ static constexpr const int64_t kAvgRtt2 = 102;
+ EXPECT_CALL(stats_observer, OnRttUpdate(kAvgRtt2, kHighRtt))
+ .Times(1)
+ .WillOnce(InvokeWithoutArgs([this] {
+ // Advance time to make the high report invalid, the lower rtt should
+ // now be in the callback.
+ fake_clock_.AdvanceTimeMilliseconds(1000);
+ }));
+
+ static constexpr const int64_t kAvgRtt3 = 95;
+ EXPECT_CALL(stats_observer, OnRttUpdate(kAvgRtt3, kLowRtt))
+ .Times(1)
+ .WillOnce(InvokeWithoutArgs([this] { loop_.Quit(); }));
+
+ // Trigger the first rtt value and set off the chain of callbacks.
+ AsyncSimulateRttUpdate(kFirstRtt); // Reported at T0 (0ms).
+ loop_.Run();
+
+ call_stats_.DeregisterStatsObserver(&stats_observer);
+}
+
+TEST_F(CallStats2Test, LastProcessedRtt) {
+ MockStatsObserver stats_observer;
+ call_stats_.RegisterStatsObserver(&stats_observer);
+
+ static constexpr const int64_t kRttLow = 10;
+ static constexpr const int64_t kRttHigh = 30;
+ // The following two average numbers dependend on average + weight
+ // calculations in call_stats.cc.
+ static constexpr const int64_t kAvgRtt1 = 13;
+ static constexpr const int64_t kAvgRtt2 = 15;
+
+ EXPECT_CALL(stats_observer, OnRttUpdate(kRttLow, kRttLow))
+ .Times(1)
+ .WillOnce(InvokeWithoutArgs([this] {
+ EXPECT_EQ(kRttLow, call_stats_.LastProcessedRtt());
+ // Don't advance the clock to make sure that low and high rtt values
+ // are associated with the same time stamp.
+ AsyncSimulateRttUpdate(kRttHigh);
+ }));
+
+ EXPECT_CALL(stats_observer, OnRttUpdate(kAvgRtt1, kRttHigh))
+ .Times(AnyNumber())
+ .WillOnce(InvokeWithoutArgs([this] {
+ EXPECT_EQ(kAvgRtt1, call_stats_.LastProcessedRtt());
+ fake_clock_.AdvanceTimeMilliseconds(CallStats::kUpdateInterval.ms());
+ AsyncSimulateRttUpdate(kRttLow);
+ AsyncSimulateRttUpdate(kRttHigh);
+ }))
+ .WillRepeatedly(Return());
+
+ EXPECT_CALL(stats_observer, OnRttUpdate(kAvgRtt2, kRttHigh))
+ .Times(AnyNumber())
+ .WillOnce(InvokeWithoutArgs([this] {
+ EXPECT_EQ(kAvgRtt2, call_stats_.LastProcessedRtt());
+ loop_.Quit();
+ }))
+ .WillRepeatedly(Return());
+
+ // Set a first values and verify that LastProcessedRtt initially returns the
+ // average rtt.
+ fake_clock_.AdvanceTimeMilliseconds(CallStats::kUpdateInterval.ms());
+ AsyncSimulateRttUpdate(kRttLow);
+ loop_.Run();
+ EXPECT_EQ(kAvgRtt2, call_stats_.LastProcessedRtt());
+
+ call_stats_.DeregisterStatsObserver(&stats_observer);
+}
+
+TEST_F(CallStats2Test, ProducesHistogramMetrics) {
+ metrics::Reset();
+ static constexpr const int64_t kRtt = 123;
+ MockStatsObserver stats_observer;
+ call_stats_.RegisterStatsObserver(&stats_observer);
+ EXPECT_CALL(stats_observer, OnRttUpdate(kRtt, kRtt))
+ .Times(AnyNumber())
+ .WillRepeatedly(InvokeWithoutArgs([this] { loop_.Quit(); }));
+
+ AsyncSimulateRttUpdate(kRtt);
+ loop_.Run();
+ fake_clock_.AdvanceTimeMilliseconds(metrics::kMinRunTimeInSeconds *
+ CallStats::kUpdateInterval.ms());
+ AsyncSimulateRttUpdate(kRtt);
+ loop_.Run();
+
+ call_stats_.DeregisterStatsObserver(&stats_observer);
+
+ call_stats_.UpdateHistogramsForTest();
+
+ EXPECT_METRIC_EQ(1, metrics::NumSamples(
+ "WebRTC.Video.AverageRoundTripTimeInMilliseconds"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.AverageRoundTripTimeInMilliseconds",
+ kRtt));
+}
+
+} // namespace internal
+} // namespace webrtc
diff --git a/third_party/libwebrtc/video/config/BUILD.gn b/third_party/libwebrtc/video/config/BUILD.gn
new file mode 100644
index 0000000000..96e254e76b
--- /dev/null
+++ b/third_party/libwebrtc/video/config/BUILD.gn
@@ -0,0 +1,99 @@
+# Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+import("../../webrtc.gni")
+
+rtc_library("streams_config") {
+ sources = [
+ "encoder_stream_factory.cc",
+ "encoder_stream_factory.h",
+ "simulcast.cc",
+ "simulcast.h",
+ ]
+
+ deps = [
+ ":encoder_config",
+ "../../api:field_trials_view",
+ "../../api/transport:field_trial_based_config",
+ "../../api/units:data_rate",
+ "../../api/video:video_codec_constants",
+ "../../api/video_codecs:video_codecs_api",
+ "../../call/adaptation:resource_adaptation",
+ "../../media:media_constants",
+ "../../media:rtc_media_base",
+ "../../modules/video_coding:video_coding_utility",
+ "../../modules/video_coding:webrtc_vp9_helpers",
+ "../../rtc_base:checks",
+ "../../rtc_base:logging",
+ "../../rtc_base/experiments:field_trial_parser",
+ "../../rtc_base/experiments:min_video_bitrate_experiment",
+ "../../rtc_base/experiments:normalize_simulcast_size_experiment",
+ "../../rtc_base/experiments:rate_control_settings",
+ ]
+ absl_deps = [
+ "//third_party/abseil-cpp/absl/algorithm:container",
+ "//third_party/abseil-cpp/absl/memory",
+ "//third_party/abseil-cpp/absl/strings",
+ "//third_party/abseil-cpp/absl/types:optional",
+ ]
+}
+
+rtc_library("encoder_config") {
+ sources = [
+ "video_encoder_config.cc",
+ "video_encoder_config.h",
+ ]
+
+ deps = [
+ "../../api:scoped_refptr",
+ "../../api/video:resolution",
+ "../../api/video_codecs:scalability_mode",
+ "../../api/video_codecs:video_codecs_api",
+ "../../rtc_base:checks",
+ "../../rtc_base:refcount",
+ "../../rtc_base:stringutils",
+ ]
+ absl_deps = [
+ "//third_party/abseil-cpp/absl/algorithm:container",
+ "//third_party/abseil-cpp/absl/memory",
+ "//third_party/abseil-cpp/absl/strings",
+ "//third_party/abseil-cpp/absl/types:optional",
+ ]
+}
+
+if (rtc_include_tests) {
+ rtc_library("video_config_tests") {
+ testonly = true
+
+ defines = []
+ sources = [
+ "encoder_stream_factory_unittest.cc",
+ "simulcast_unittest.cc",
+ ]
+ deps = [
+ ":streams_config",
+ "../../api/transport:field_trial_based_config",
+ "../../call/adaptation:resource_adaptation",
+ "../../media:media_constants",
+ "../../test:field_trial",
+ "../../test:test_support",
+ ]
+ absl_deps = [
+ "//third_party/abseil-cpp/absl/algorithm:container",
+ "//third_party/abseil-cpp/absl/functional:any_invocable",
+ "//third_party/abseil-cpp/absl/functional:bind_front",
+ "//third_party/abseil-cpp/absl/memory",
+ "//third_party/abseil-cpp/absl/strings",
+ "//third_party/abseil-cpp/absl/types:optional",
+ "//third_party/abseil-cpp/absl/types:variant",
+ ]
+ if (!build_with_mozilla) {
+ deps += [ "../../media:rtc_media_base" ]
+ }
+ }
+}
diff --git a/third_party/libwebrtc/video/config/encoder_config_gn/moz.build b/third_party/libwebrtc/video/config/encoder_config_gn/moz.build
new file mode 100644
index 0000000000..ea2b54bfb8
--- /dev/null
+++ b/third_party/libwebrtc/video/config/encoder_config_gn/moz.build
@@ -0,0 +1,225 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+DEFINES["WEBRTC_STRICT_FIELD_TRIALS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "!/third_party/libwebrtc/gen",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/video/config/video_encoder_config.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "mips32":
+
+ DEFINES["MIPS32_LE"] = True
+ DEFINES["MIPS_FPU_LE"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "mips64":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("encoder_config_gn")
diff --git a/third_party/libwebrtc/video/config/encoder_stream_factory.cc b/third_party/libwebrtc/video/config/encoder_stream_factory.cc
new file mode 100644
index 0000000000..fceadf09b4
--- /dev/null
+++ b/third_party/libwebrtc/video/config/encoder_stream_factory.cc
@@ -0,0 +1,465 @@
+/*
+ * Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include "video/config/encoder_stream_factory.h"
+
+#include <algorithm>
+#include <limits>
+#include <set>
+#include <string>
+#include <utility>
+
+#include "absl/algorithm/container.h"
+#include "absl/strings/match.h"
+#include "api/video/video_codec_constants.h"
+#include "media/base/media_constants.h"
+#include "media/base/video_adapter.h"
+#include "modules/video_coding/codecs/vp9/svc_config.h"
+#include "rtc_base/experiments/min_video_bitrate_experiment.h"
+#include "rtc_base/experiments/normalize_simulcast_size_experiment.h"
+#include "rtc_base/logging.h"
+#include "video/config/simulcast.h"
+
+namespace cricket {
+namespace {
+
+const int kMinLayerSize = 16;
+
+int ScaleDownResolution(int resolution,
+ double scale_down_by,
+ int min_resolution) {
+ // Resolution is never scalied down to smaller than min_resolution.
+ // If the input resolution is already smaller than min_resolution,
+ // no scaling should be done at all.
+ if (resolution <= min_resolution)
+ return resolution;
+ return std::max(static_cast<int>(resolution / scale_down_by + 0.5),
+ min_resolution);
+}
+
+bool PowerOfTwo(int value) {
+ return (value > 0) && ((value & (value - 1)) == 0);
+}
+
+bool IsScaleFactorsPowerOfTwo(const webrtc::VideoEncoderConfig& config) {
+ for (const auto& layer : config.simulcast_layers) {
+ double scale = std::max(layer.scale_resolution_down_by, 1.0);
+ if (std::round(scale) != scale || !PowerOfTwo(scale)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+bool IsTemporalLayersSupported(const std::string& codec_name) {
+ return absl::EqualsIgnoreCase(codec_name, kVp8CodecName) ||
+ absl::EqualsIgnoreCase(codec_name, kVp9CodecName) ||
+ absl::EqualsIgnoreCase(codec_name, kAv1CodecName);
+}
+
+size_t FindRequiredActiveLayers(
+ const webrtc::VideoEncoderConfig& encoder_config) {
+ // Need enough layers so that at least the first active one is present.
+ for (size_t i = 0; i < encoder_config.number_of_streams; ++i) {
+ if (encoder_config.simulcast_layers[i].active) {
+ return i + 1;
+ }
+ }
+ return 0;
+}
+
+// The selected thresholds for QVGA and VGA corresponded to a QP around 10.
+// The change in QP declined above the selected bitrates.
+static int GetMaxDefaultVideoBitrateKbps(int width,
+ int height,
+ bool is_screenshare) {
+ int max_bitrate;
+ if (width * height <= 320 * 240) {
+ max_bitrate = 600;
+ } else if (width * height <= 640 * 480) {
+ max_bitrate = 1700;
+ } else if (width * height <= 960 * 540) {
+ max_bitrate = 2000;
+ } else {
+ max_bitrate = 2500;
+ }
+ if (is_screenshare)
+ max_bitrate = std::max(max_bitrate, 1200);
+ return max_bitrate;
+}
+
+} // namespace
+
+// TODO(bugs.webrtc.org/8785): Consider removing max_qp as member of
+// EncoderStreamFactory and instead set this value individually for each stream
+// in the VideoEncoderConfig.simulcast_layers.
+EncoderStreamFactory::EncoderStreamFactory(std::string codec_name,
+ int max_qp,
+ bool is_screenshare,
+ bool conference_mode)
+ : codec_name_(codec_name),
+ max_qp_(max_qp),
+ is_screenshare_(is_screenshare),
+ conference_mode_(conference_mode),
+ trials_(fallback_trials_),
+ encoder_info_requested_resolution_alignment_(1) {}
+
+EncoderStreamFactory::EncoderStreamFactory(
+ std::string codec_name,
+ int max_qp,
+ bool is_screenshare,
+ bool conference_mode,
+ const webrtc::VideoEncoder::EncoderInfo& encoder_info,
+ absl::optional<webrtc::VideoSourceRestrictions> restrictions,
+ const webrtc::FieldTrialsView* trials)
+ : codec_name_(codec_name),
+ max_qp_(max_qp),
+ is_screenshare_(is_screenshare),
+ conference_mode_(conference_mode),
+ trials_(trials ? *trials : fallback_trials_),
+ encoder_info_requested_resolution_alignment_(
+ encoder_info.requested_resolution_alignment),
+ restrictions_(restrictions) {}
+
+std::vector<webrtc::VideoStream> EncoderStreamFactory::CreateEncoderStreams(
+ int frame_width,
+ int frame_height,
+ const webrtc::VideoEncoderConfig& encoder_config) {
+ RTC_DCHECK_GT(encoder_config.number_of_streams, 0);
+ RTC_DCHECK_GE(encoder_config.simulcast_layers.size(),
+ encoder_config.number_of_streams);
+
+ const absl::optional<webrtc::DataRate> experimental_min_bitrate =
+ GetExperimentalMinVideoBitrate(encoder_config.codec_type);
+
+ if (encoder_config.number_of_streams > 1 ||
+ ((absl::EqualsIgnoreCase(codec_name_, kVp8CodecName) ||
+ absl::EqualsIgnoreCase(codec_name_, kH264CodecName)) &&
+ is_screenshare_ && conference_mode_)) {
+ return CreateSimulcastOrConferenceModeScreenshareStreams(
+ frame_width, frame_height, encoder_config, experimental_min_bitrate);
+ }
+
+ return CreateDefaultVideoStreams(frame_width, frame_height, encoder_config,
+ experimental_min_bitrate);
+}
+
+std::vector<webrtc::VideoStream>
+EncoderStreamFactory::CreateDefaultVideoStreams(
+ int width,
+ int height,
+ const webrtc::VideoEncoderConfig& encoder_config,
+ const absl::optional<webrtc::DataRate>& experimental_min_bitrate) const {
+ std::vector<webrtc::VideoStream> layers;
+
+ // For unset max bitrates set default bitrate for non-simulcast.
+ int max_bitrate_bps =
+ (encoder_config.max_bitrate_bps > 0)
+ ? encoder_config.max_bitrate_bps
+ : GetMaxDefaultVideoBitrateKbps(width, height, is_screenshare_) *
+ 1000;
+
+ int min_bitrate_bps =
+ experimental_min_bitrate
+ ? rtc::saturated_cast<int>(experimental_min_bitrate->bps())
+ : webrtc::kDefaultMinVideoBitrateBps;
+ if (encoder_config.simulcast_layers[0].min_bitrate_bps > 0) {
+ // Use set min bitrate.
+ min_bitrate_bps = encoder_config.simulcast_layers[0].min_bitrate_bps;
+ // If only min bitrate is configured, make sure max is above min.
+ if (encoder_config.max_bitrate_bps <= 0)
+ max_bitrate_bps = std::max(min_bitrate_bps, max_bitrate_bps);
+ }
+ int max_framerate = (encoder_config.simulcast_layers[0].max_framerate > 0)
+ ? encoder_config.simulcast_layers[0].max_framerate
+ : kDefaultVideoMaxFramerate;
+
+ webrtc::VideoStream layer;
+ layer.width = width;
+ layer.height = height;
+ layer.max_framerate = max_framerate;
+ layer.requested_resolution =
+ encoder_config.simulcast_layers[0].requested_resolution;
+ // Note: VP9 seems to have be sending if any layer is active,
+ // (see `UpdateSendState`) and still use parameters only from
+ // encoder_config.simulcast_layers[0].
+ layer.active = absl::c_any_of(encoder_config.simulcast_layers,
+ [](const auto& layer) { return layer.active; });
+
+ if (encoder_config.simulcast_layers[0].requested_resolution) {
+ auto res = GetLayerResolutionFromRequestedResolution(
+ width, height,
+ *encoder_config.simulcast_layers[0].requested_resolution);
+ layer.width = res.width;
+ layer.height = res.height;
+ } else if (encoder_config.simulcast_layers[0].scale_resolution_down_by > 1.) {
+ layer.width = ScaleDownResolution(
+ layer.width,
+ encoder_config.simulcast_layers[0].scale_resolution_down_by,
+ kMinLayerSize);
+ layer.height = ScaleDownResolution(
+ layer.height,
+ encoder_config.simulcast_layers[0].scale_resolution_down_by,
+ kMinLayerSize);
+ }
+
+ if (absl::EqualsIgnoreCase(codec_name_, kVp9CodecName)) {
+ RTC_DCHECK(encoder_config.encoder_specific_settings);
+ // Use VP9 SVC layering from codec settings which might be initialized
+ // though field trial in ConfigureVideoEncoderSettings.
+ webrtc::VideoCodecVP9 vp9_settings;
+ encoder_config.encoder_specific_settings->FillVideoCodecVp9(&vp9_settings);
+ layer.num_temporal_layers = vp9_settings.numberOfTemporalLayers;
+
+ // Number of spatial layers is signalled differently from different call
+ // sites (sigh), pick the max as we are interested in the upper bound.
+ int num_spatial_layers =
+ std::max({encoder_config.simulcast_layers.size(),
+ encoder_config.spatial_layers.size(),
+ size_t{vp9_settings.numberOfSpatialLayers}});
+
+ if (width * height > 0 &&
+ (layer.num_temporal_layers > 1u || num_spatial_layers > 1)) {
+ // In SVC mode, the VP9 max bitrate is determined by SvcConfig, instead of
+ // GetMaxDefaultVideoBitrateKbps().
+ std::vector<webrtc::SpatialLayer> svc_layers =
+ webrtc::GetSvcConfig(width, height, max_framerate,
+ /*first_active_layer=*/0, num_spatial_layers,
+ *layer.num_temporal_layers, is_screenshare_);
+ int sum_max_bitrates_kbps = 0;
+ for (const webrtc::SpatialLayer& spatial_layer : svc_layers) {
+ sum_max_bitrates_kbps += spatial_layer.maxBitrate;
+ }
+ RTC_DCHECK_GE(sum_max_bitrates_kbps, 0);
+ if (encoder_config.max_bitrate_bps <= 0) {
+ max_bitrate_bps = sum_max_bitrates_kbps * 1000;
+ } else {
+ max_bitrate_bps =
+ std::min(max_bitrate_bps, sum_max_bitrates_kbps * 1000);
+ }
+ max_bitrate_bps = std::max(min_bitrate_bps, max_bitrate_bps);
+ }
+ }
+
+ // In the case that the application sets a max bitrate that's lower than the
+ // min bitrate, we adjust it down (see bugs.webrtc.org/9141).
+ layer.min_bitrate_bps = std::min(min_bitrate_bps, max_bitrate_bps);
+ if (encoder_config.simulcast_layers[0].target_bitrate_bps <= 0) {
+ layer.target_bitrate_bps = max_bitrate_bps;
+ } else {
+ layer.target_bitrate_bps = std::min(
+ encoder_config.simulcast_layers[0].target_bitrate_bps, max_bitrate_bps);
+ }
+ layer.max_bitrate_bps = max_bitrate_bps;
+ layer.max_qp = max_qp_;
+ layer.bitrate_priority = encoder_config.bitrate_priority;
+
+ if (IsTemporalLayersSupported(codec_name_)) {
+ // Use configured number of temporal layers if set.
+ if (encoder_config.simulcast_layers[0].num_temporal_layers) {
+ layer.num_temporal_layers =
+ *encoder_config.simulcast_layers[0].num_temporal_layers;
+ }
+ }
+ layer.scalability_mode = encoder_config.simulcast_layers[0].scalability_mode;
+ layers.push_back(layer);
+ return layers;
+}
+
+std::vector<webrtc::VideoStream>
+EncoderStreamFactory::CreateSimulcastOrConferenceModeScreenshareStreams(
+ int width,
+ int height,
+ const webrtc::VideoEncoderConfig& encoder_config,
+ const absl::optional<webrtc::DataRate>& experimental_min_bitrate) const {
+ std::vector<webrtc::VideoStream> layers;
+
+ const bool temporal_layers_supported =
+ absl::EqualsIgnoreCase(codec_name_, kVp8CodecName) ||
+ absl::EqualsIgnoreCase(codec_name_, kH264CodecName);
+ // Use legacy simulcast screenshare if conference mode is explicitly enabled
+ // or use the regular simulcast configuration path which is generic.
+ layers = GetSimulcastConfig(FindRequiredActiveLayers(encoder_config),
+ encoder_config.number_of_streams, width, height,
+ encoder_config.bitrate_priority, max_qp_,
+ is_screenshare_ && conference_mode_,
+ temporal_layers_supported, trials_);
+ // Allow an experiment to override the minimum bitrate for the lowest
+ // spatial layer. The experiment's configuration has the lowest priority.
+ if (experimental_min_bitrate) {
+ layers[0].min_bitrate_bps =
+ rtc::saturated_cast<int>(experimental_min_bitrate->bps());
+ }
+ // Update the active simulcast layers and configured bitrates.
+ bool is_highest_layer_max_bitrate_configured = false;
+ const bool has_scale_resolution_down_by = absl::c_any_of(
+ encoder_config.simulcast_layers, [](const webrtc::VideoStream& layer) {
+ return layer.scale_resolution_down_by != -1.;
+ });
+
+ bool default_scale_factors_used = true;
+ if (has_scale_resolution_down_by) {
+ default_scale_factors_used = IsScaleFactorsPowerOfTwo(encoder_config);
+ }
+ const bool norm_size_configured =
+ webrtc::NormalizeSimulcastSizeExperiment::GetBase2Exponent().has_value();
+ const int normalized_width =
+ (default_scale_factors_used || norm_size_configured) &&
+ (width >= kMinLayerSize)
+ ? NormalizeSimulcastSize(width, encoder_config.number_of_streams)
+ : width;
+ const int normalized_height =
+ (default_scale_factors_used || norm_size_configured) &&
+ (height >= kMinLayerSize)
+ ? NormalizeSimulcastSize(height, encoder_config.number_of_streams)
+ : height;
+ for (size_t i = 0; i < layers.size(); ++i) {
+ layers[i].active = encoder_config.simulcast_layers[i].active;
+ layers[i].scalability_mode =
+ encoder_config.simulcast_layers[i].scalability_mode;
+ layers[i].requested_resolution =
+ encoder_config.simulcast_layers[i].requested_resolution;
+ // Update with configured num temporal layers if supported by codec.
+ if (encoder_config.simulcast_layers[i].num_temporal_layers &&
+ IsTemporalLayersSupported(codec_name_)) {
+ layers[i].num_temporal_layers =
+ *encoder_config.simulcast_layers[i].num_temporal_layers;
+ }
+ if (encoder_config.simulcast_layers[i].max_framerate > 0) {
+ layers[i].max_framerate =
+ encoder_config.simulcast_layers[i].max_framerate;
+ }
+ if (encoder_config.simulcast_layers[i].requested_resolution.has_value()) {
+ auto res = GetLayerResolutionFromRequestedResolution(
+ normalized_width, normalized_height,
+ *encoder_config.simulcast_layers[i].requested_resolution);
+ layers[i].width = res.width;
+ layers[i].height = res.height;
+ } else if (has_scale_resolution_down_by) {
+ const double scale_resolution_down_by = std::max(
+ encoder_config.simulcast_layers[i].scale_resolution_down_by, 1.0);
+ layers[i].width = ScaleDownResolution(
+ normalized_width, scale_resolution_down_by, kMinLayerSize);
+ layers[i].height = ScaleDownResolution(
+ normalized_height, scale_resolution_down_by, kMinLayerSize);
+ }
+ // Update simulcast bitrates with configured min and max bitrate.
+ if (encoder_config.simulcast_layers[i].min_bitrate_bps > 0) {
+ layers[i].min_bitrate_bps =
+ encoder_config.simulcast_layers[i].min_bitrate_bps;
+ }
+ if (encoder_config.simulcast_layers[i].max_bitrate_bps > 0) {
+ layers[i].max_bitrate_bps =
+ encoder_config.simulcast_layers[i].max_bitrate_bps;
+ }
+ if (encoder_config.simulcast_layers[i].target_bitrate_bps > 0) {
+ layers[i].target_bitrate_bps =
+ encoder_config.simulcast_layers[i].target_bitrate_bps;
+ }
+ if (encoder_config.simulcast_layers[i].min_bitrate_bps > 0 &&
+ encoder_config.simulcast_layers[i].max_bitrate_bps > 0) {
+ // Min and max bitrate are configured.
+ // Set target to 3/4 of the max bitrate (or to max if below min).
+ if (encoder_config.simulcast_layers[i].target_bitrate_bps <= 0)
+ layers[i].target_bitrate_bps = layers[i].max_bitrate_bps * 3 / 4;
+ if (layers[i].target_bitrate_bps < layers[i].min_bitrate_bps)
+ layers[i].target_bitrate_bps = layers[i].max_bitrate_bps;
+ } else if (encoder_config.simulcast_layers[i].min_bitrate_bps > 0) {
+ // Only min bitrate is configured, make sure target/max are above min.
+ layers[i].target_bitrate_bps =
+ std::max(layers[i].target_bitrate_bps, layers[i].min_bitrate_bps);
+ layers[i].max_bitrate_bps =
+ std::max(layers[i].max_bitrate_bps, layers[i].min_bitrate_bps);
+ } else if (encoder_config.simulcast_layers[i].max_bitrate_bps > 0) {
+ // Only max bitrate is configured, make sure min/target are below max.
+ // Keep target bitrate if it is set explicitly in encoding config.
+ // Otherwise set target bitrate to 3/4 of the max bitrate
+ // or the one calculated from GetSimulcastConfig() which is larger.
+ layers[i].min_bitrate_bps =
+ std::min(layers[i].min_bitrate_bps, layers[i].max_bitrate_bps);
+ if (encoder_config.simulcast_layers[i].target_bitrate_bps <= 0) {
+ layers[i].target_bitrate_bps = std::max(
+ layers[i].target_bitrate_bps, layers[i].max_bitrate_bps * 3 / 4);
+ }
+ layers[i].target_bitrate_bps = std::max(
+ std::min(layers[i].target_bitrate_bps, layers[i].max_bitrate_bps),
+ layers[i].min_bitrate_bps);
+ }
+ if (i == layers.size() - 1) {
+ is_highest_layer_max_bitrate_configured =
+ encoder_config.simulcast_layers[i].max_bitrate_bps > 0;
+ }
+ }
+ if (!is_screenshare_ && !is_highest_layer_max_bitrate_configured &&
+ encoder_config.max_bitrate_bps > 0) {
+ // No application-configured maximum for the largest layer.
+ // If there is bitrate leftover, give it to the largest layer.
+ BoostMaxSimulcastLayer(
+ webrtc::DataRate::BitsPerSec(encoder_config.max_bitrate_bps), &layers);
+ }
+
+ // Sort the layers by max_bitrate_bps, they might not always be from
+ // smallest to biggest
+ std::vector<size_t> index(layers.size());
+ std::iota(index.begin(), index.end(), 0);
+ std::stable_sort(index.begin(), index.end(), [&layers](size_t a, size_t b) {
+ return layers[a].max_bitrate_bps < layers[b].max_bitrate_bps;
+ });
+
+ if (!layers[index[0]].active) {
+ // Adjust min bitrate of the first active layer to allow it to go as low as
+ // the lowest (now inactive) layer could.
+ // Otherwise, if e.g. a single HD stream is active, it would have 600kbps
+ // min bitrate, which would always be allocated to the stream.
+ // This would lead to congested network, dropped frames and overall bad
+ // experience.
+
+ const int min_configured_bitrate = layers[index[0]].min_bitrate_bps;
+ for (size_t i = 0; i < layers.size(); ++i) {
+ if (layers[index[i]].active) {
+ layers[index[i]].min_bitrate_bps = min_configured_bitrate;
+ break;
+ }
+ }
+ }
+
+ return layers;
+}
+
+webrtc::Resolution
+EncoderStreamFactory::GetLayerResolutionFromRequestedResolution(
+ int frame_width,
+ int frame_height,
+ webrtc::Resolution requested_resolution) const {
+ VideoAdapter adapter(encoder_info_requested_resolution_alignment_);
+ adapter.OnOutputFormatRequest(requested_resolution.ToPair(),
+ requested_resolution.PixelCount(),
+ absl::nullopt);
+ if (restrictions_) {
+ rtc::VideoSinkWants wants;
+ wants.is_active = true;
+ wants.target_pixel_count = restrictions_->target_pixels_per_frame();
+ wants.max_pixel_count =
+ rtc::dchecked_cast<int>(restrictions_->max_pixels_per_frame().value_or(
+ std::numeric_limits<int>::max()));
+ wants.aggregates.emplace(rtc::VideoSinkWants::Aggregates());
+ wants.resolution_alignment = encoder_info_requested_resolution_alignment_;
+ adapter.OnSinkWants(wants);
+ }
+ int cropped_width, cropped_height;
+ int out_width = 0, out_height = 0;
+ if (!adapter.AdaptFrameResolution(frame_width, frame_height, 0,
+ &cropped_width, &cropped_height, &out_width,
+ &out_height)) {
+ RTC_LOG(LS_ERROR) << "AdaptFrameResolution returned false!";
+ }
+ return {.width = out_width, .height = out_height};
+}
+
+} // namespace cricket
diff --git a/third_party/libwebrtc/video/config/encoder_stream_factory.h b/third_party/libwebrtc/video/config/encoder_stream_factory.h
new file mode 100644
index 0000000000..37abb93876
--- /dev/null
+++ b/third_party/libwebrtc/video/config/encoder_stream_factory.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#ifndef VIDEO_CONFIG_ENCODER_STREAM_FACTORY_H_
+#define VIDEO_CONFIG_ENCODER_STREAM_FACTORY_H_
+
+#include <string>
+#include <vector>
+
+#include "api/transport/field_trial_based_config.h"
+#include "api/units/data_rate.h"
+#include "api/video_codecs/video_encoder.h"
+#include "call/adaptation/video_source_restrictions.h"
+#include "video/config/video_encoder_config.h"
+
+namespace cricket {
+
+class EncoderStreamFactory
+ : public webrtc::VideoEncoderConfig::VideoStreamFactoryInterface {
+ public:
+ // Note: this constructor is used by testcase in downstream.
+ EncoderStreamFactory(std::string codec_name,
+ int max_qp,
+ bool is_screenshare,
+ bool conference_mode);
+
+ EncoderStreamFactory(std::string codec_name,
+ int max_qp,
+ bool is_screenshare,
+ bool conference_mode,
+ const webrtc::VideoEncoder::EncoderInfo& encoder_info,
+ absl::optional<webrtc::VideoSourceRestrictions>
+ restrictions = absl::nullopt,
+ const webrtc::FieldTrialsView* trials = nullptr);
+
+ std::vector<webrtc::VideoStream> CreateEncoderStreams(
+ int width,
+ int height,
+ const webrtc::VideoEncoderConfig& encoder_config) override;
+
+ private:
+ std::vector<webrtc::VideoStream> CreateDefaultVideoStreams(
+ int width,
+ int height,
+ const webrtc::VideoEncoderConfig& encoder_config,
+ const absl::optional<webrtc::DataRate>& experimental_min_bitrate) const;
+
+ std::vector<webrtc::VideoStream>
+ CreateSimulcastOrConferenceModeScreenshareStreams(
+ int width,
+ int height,
+ const webrtc::VideoEncoderConfig& encoder_config,
+ const absl::optional<webrtc::DataRate>& experimental_min_bitrate) const;
+
+ webrtc::Resolution GetLayerResolutionFromRequestedResolution(
+ int in_frame_width,
+ int in_frame_height,
+ webrtc::Resolution requested_resolution) const;
+
+ const std::string codec_name_;
+ const int max_qp_;
+ const bool is_screenshare_;
+ // Allows a screenshare specific configuration, which enables temporal
+ // layering and various settings.
+ const bool conference_mode_;
+ const webrtc::FieldTrialBasedConfig fallback_trials_;
+ const webrtc::FieldTrialsView& trials_;
+ const int encoder_info_requested_resolution_alignment_;
+ const absl::optional<webrtc::VideoSourceRestrictions> restrictions_;
+};
+
+} // namespace cricket
+
+#endif // VIDEO_CONFIG_ENCODER_STREAM_FACTORY_H_
diff --git a/third_party/libwebrtc/video/config/encoder_stream_factory_unittest.cc b/third_party/libwebrtc/video/config/encoder_stream_factory_unittest.cc
new file mode 100644
index 0000000000..b37b300c96
--- /dev/null
+++ b/third_party/libwebrtc/video/config/encoder_stream_factory_unittest.cc
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video/config/encoder_stream_factory.h"
+
+#include "call/adaptation/video_source_restrictions.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+using cricket::EncoderStreamFactory;
+constexpr int kMaxQp = 48;
+
+namespace {
+
+std::vector<Resolution> GetStreamResolutions(
+ const std::vector<VideoStream>& streams) {
+ std::vector<Resolution> res;
+ for (const auto& s : streams) {
+ if (s.active) {
+ res.push_back(
+ {rtc::checked_cast<int>(s.width), rtc::checked_cast<int>(s.height)});
+ }
+ }
+ return res;
+}
+
+VideoStream LayerWithRequestedResolution(Resolution res) {
+ VideoStream s;
+ s.requested_resolution = res;
+ return s;
+}
+
+} // namespace
+
+TEST(EncoderStreamFactory, SinglecastRequestedResolution) {
+ VideoEncoder::EncoderInfo encoder_info;
+ auto factory = rtc::make_ref_counted<EncoderStreamFactory>(
+ "VP8", kMaxQp,
+ /* is_screenshare= */ false,
+ /* conference_mode= */ false, encoder_info);
+ VideoEncoderConfig encoder_config;
+ encoder_config.number_of_streams = 1;
+ encoder_config.simulcast_layers.push_back(
+ LayerWithRequestedResolution({.width = 640, .height = 360}));
+ auto streams = factory->CreateEncoderStreams(1280, 720, encoder_config);
+ EXPECT_EQ(streams[0].requested_resolution,
+ (Resolution{.width = 640, .height = 360}));
+ EXPECT_EQ(GetStreamResolutions(streams), (std::vector<Resolution>{
+ {.width = 640, .height = 360},
+ }));
+}
+
+TEST(EncoderStreamFactory, SinglecastRequestedResolutionWithAdaptation) {
+ VideoSourceRestrictions restrictions(
+ /* max_pixels_per_frame= */ (320 * 320),
+ /* target_pixels_per_frame= */ absl::nullopt,
+ /* max_frame_rate= */ absl::nullopt);
+ VideoEncoder::EncoderInfo encoder_info;
+ auto factory = rtc::make_ref_counted<EncoderStreamFactory>(
+ "VP8", kMaxQp,
+ /* is_screenshare= */ false,
+ /* conference_mode= */ false, encoder_info, restrictions);
+ VideoEncoderConfig encoder_config;
+ encoder_config.number_of_streams = 1;
+ encoder_config.simulcast_layers.push_back(
+ LayerWithRequestedResolution({.width = 640, .height = 360}));
+ auto streams = factory->CreateEncoderStreams(1280, 720, encoder_config);
+ EXPECT_EQ(streams[0].requested_resolution,
+ (Resolution{.width = 640, .height = 360}));
+ EXPECT_EQ(GetStreamResolutions(streams), (std::vector<Resolution>{
+ {.width = 320, .height = 180},
+ }));
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/video/config/simulcast.cc b/third_party/libwebrtc/video/config/simulcast.cc
new file mode 100644
index 0000000000..2bd4ac04c3
--- /dev/null
+++ b/third_party/libwebrtc/video/config/simulcast.cc
@@ -0,0 +1,497 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video/config/simulcast.h"
+
+#include <stdint.h>
+#include <stdio.h>
+
+#include <algorithm>
+#include <string>
+#include <vector>
+
+#include "absl/strings/match.h"
+#include "absl/types/optional.h"
+#include "api/video/video_codec_constants.h"
+#include "media/base/media_constants.h"
+#include "modules/video_coding/utility/simulcast_rate_allocator.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/experiments/field_trial_parser.h"
+#include "rtc_base/experiments/min_video_bitrate_experiment.h"
+#include "rtc_base/experiments/normalize_simulcast_size_experiment.h"
+#include "rtc_base/experiments/rate_control_settings.h"
+#include "rtc_base/logging.h"
+
+namespace cricket {
+
+namespace {
+
+constexpr char kUseLegacySimulcastLayerLimitFieldTrial[] =
+ "WebRTC-LegacySimulcastLayerLimit";
+
+constexpr double kDefaultMaxRoundupRate = 0.1;
+
+// Limits for legacy conference screensharing mode. Currently used for the
+// lower of the two simulcast streams.
+constexpr webrtc::DataRate kScreenshareDefaultTl0Bitrate =
+ webrtc::DataRate::KilobitsPerSec(200);
+constexpr webrtc::DataRate kScreenshareDefaultTl1Bitrate =
+ webrtc::DataRate::KilobitsPerSec(1000);
+
+// Min/max bitrate for the higher one of the two simulcast stream used for
+// screen content.
+constexpr webrtc::DataRate kScreenshareHighStreamMinBitrate =
+ webrtc::DataRate::KilobitsPerSec(600);
+constexpr webrtc::DataRate kScreenshareHighStreamMaxBitrate =
+ webrtc::DataRate::KilobitsPerSec(1250);
+
+constexpr int kDefaultNumTemporalLayers = 3;
+constexpr int kScreenshareMaxSimulcastLayers = 2;
+constexpr int kScreenshareTemporalLayers = 2;
+
+struct SimulcastFormat {
+ int width;
+ int height;
+ // The maximum number of simulcast layers can be used for
+ // resolutions at `widthxheight` for legacy applications.
+ size_t max_layers;
+ // The maximum bitrate for encoding stream at `widthxheight`, when we are
+ // not sending the next higher spatial stream.
+ webrtc::DataRate max_bitrate;
+ // The target bitrate for encoding stream at `widthxheight`, when this layer
+ // is not the highest layer (i.e., when we are sending another higher spatial
+ // stream).
+ webrtc::DataRate target_bitrate;
+ // The minimum bitrate needed for encoding stream at `widthxheight`.
+ webrtc::DataRate min_bitrate;
+};
+
+// These tables describe from which resolution we can use how many
+// simulcast layers at what bitrates (maximum, target, and minimum).
+// Important!! Keep this table from high resolution to low resolution.
+constexpr const SimulcastFormat kSimulcastFormats[] = {
+ {1920, 1080, 3, webrtc::DataRate::KilobitsPerSec(5000),
+ webrtc::DataRate::KilobitsPerSec(4000),
+ webrtc::DataRate::KilobitsPerSec(800)},
+ {1280, 720, 3, webrtc::DataRate::KilobitsPerSec(2500),
+ webrtc::DataRate::KilobitsPerSec(2500),
+ webrtc::DataRate::KilobitsPerSec(600)},
+ {960, 540, 3, webrtc::DataRate::KilobitsPerSec(1200),
+ webrtc::DataRate::KilobitsPerSec(1200),
+ webrtc::DataRate::KilobitsPerSec(350)},
+ {640, 360, 2, webrtc::DataRate::KilobitsPerSec(700),
+ webrtc::DataRate::KilobitsPerSec(500),
+ webrtc::DataRate::KilobitsPerSec(150)},
+ {480, 270, 2, webrtc::DataRate::KilobitsPerSec(450),
+ webrtc::DataRate::KilobitsPerSec(350),
+ webrtc::DataRate::KilobitsPerSec(150)},
+ {320, 180, 1, webrtc::DataRate::KilobitsPerSec(200),
+ webrtc::DataRate::KilobitsPerSec(150),
+ webrtc::DataRate::KilobitsPerSec(30)},
+ // As the resolution goes down, interpolate the target and max bitrates down
+ // towards zero. The min bitrate is still limited at 30 kbps and the target
+ // and the max will be capped from below accordingly.
+ {0, 0, 1, webrtc::DataRate::KilobitsPerSec(0),
+ webrtc::DataRate::KilobitsPerSec(0),
+ webrtc::DataRate::KilobitsPerSec(30)}};
+
+constexpr webrtc::DataRate Interpolate(const webrtc::DataRate& a,
+ const webrtc::DataRate& b,
+ float rate) {
+ return a * (1.0 - rate) + b * rate;
+}
+
+// TODO(webrtc:12415): Flip this to a kill switch when this feature launches.
+bool EnableLowresBitrateInterpolation(const webrtc::FieldTrialsView& trials) {
+ return absl::StartsWith(
+ trials.Lookup("WebRTC-LowresSimulcastBitrateInterpolation"), "Enabled");
+}
+
+std::vector<SimulcastFormat> GetSimulcastFormats(
+ bool enable_lowres_bitrate_interpolation) {
+ std::vector<SimulcastFormat> formats;
+ formats.insert(formats.begin(), std::begin(kSimulcastFormats),
+ std::end(kSimulcastFormats));
+ if (!enable_lowres_bitrate_interpolation) {
+ RTC_CHECK_GE(formats.size(), 2u);
+ SimulcastFormat& format0x0 = formats[formats.size() - 1];
+ const SimulcastFormat& format_prev = formats[formats.size() - 2];
+ format0x0.max_bitrate = format_prev.max_bitrate;
+ format0x0.target_bitrate = format_prev.target_bitrate;
+ format0x0.min_bitrate = format_prev.min_bitrate;
+ }
+ return formats;
+}
+
+// Multiway: Number of temporal layers for each simulcast stream.
+int DefaultNumberOfTemporalLayers(const webrtc::FieldTrialsView& trials) {
+ const std::string group_name =
+ trials.Lookup("WebRTC-VP8ConferenceTemporalLayers");
+ if (group_name.empty())
+ return kDefaultNumTemporalLayers;
+
+ int num_temporal_layers = kDefaultNumTemporalLayers;
+ if (sscanf(group_name.c_str(), "%d", &num_temporal_layers) == 1 &&
+ num_temporal_layers > 0 &&
+ num_temporal_layers <= webrtc::kMaxTemporalStreams) {
+ return num_temporal_layers;
+ }
+
+ RTC_LOG(LS_WARNING) << "Attempt to set number of temporal layers to "
+ "incorrect value: "
+ << group_name;
+
+ return kDefaultNumTemporalLayers;
+}
+
+int FindSimulcastFormatIndex(int width,
+ int height,
+ bool enable_lowres_bitrate_interpolation) {
+ RTC_DCHECK_GE(width, 0);
+ RTC_DCHECK_GE(height, 0);
+ const auto formats = GetSimulcastFormats(enable_lowres_bitrate_interpolation);
+ for (uint32_t i = 0; i < formats.size(); ++i) {
+ if (width * height >= formats[i].width * formats[i].height) {
+ return i;
+ }
+ }
+ RTC_DCHECK_NOTREACHED();
+ return -1;
+}
+
+} // namespace
+
+// Round size to nearest simulcast-friendly size.
+// Simulcast stream width and height must both be dividable by
+// |2 ^ (simulcast_layers - 1)|.
+int NormalizeSimulcastSize(int size, size_t simulcast_layers) {
+ int base2_exponent = static_cast<int>(simulcast_layers) - 1;
+ const absl::optional<int> experimental_base2_exponent =
+ webrtc::NormalizeSimulcastSizeExperiment::GetBase2Exponent();
+ if (experimental_base2_exponent &&
+ (size > (1 << *experimental_base2_exponent))) {
+ base2_exponent = *experimental_base2_exponent;
+ }
+ return ((size >> base2_exponent) << base2_exponent);
+}
+
+SimulcastFormat InterpolateSimulcastFormat(
+ int width,
+ int height,
+ absl::optional<double> max_roundup_rate,
+ bool enable_lowres_bitrate_interpolation) {
+ const auto formats = GetSimulcastFormats(enable_lowres_bitrate_interpolation);
+ const int index = FindSimulcastFormatIndex(
+ width, height, enable_lowres_bitrate_interpolation);
+ if (index == 0)
+ return formats[index];
+ const int total_pixels_up =
+ formats[index - 1].width * formats[index - 1].height;
+ const int total_pixels_down = formats[index].width * formats[index].height;
+ const int total_pixels = width * height;
+ const float rate = (total_pixels_up - total_pixels) /
+ static_cast<float>(total_pixels_up - total_pixels_down);
+
+ // Use upper resolution if `rate` is below the configured threshold.
+ size_t max_layers = (rate < max_roundup_rate.value_or(kDefaultMaxRoundupRate))
+ ? formats[index - 1].max_layers
+ : formats[index].max_layers;
+ webrtc::DataRate max_bitrate = Interpolate(formats[index - 1].max_bitrate,
+ formats[index].max_bitrate, rate);
+ webrtc::DataRate target_bitrate = Interpolate(
+ formats[index - 1].target_bitrate, formats[index].target_bitrate, rate);
+ webrtc::DataRate min_bitrate = Interpolate(formats[index - 1].min_bitrate,
+ formats[index].min_bitrate, rate);
+
+ return {width, height, max_layers, max_bitrate, target_bitrate, min_bitrate};
+}
+
+SimulcastFormat InterpolateSimulcastFormat(
+ int width,
+ int height,
+ bool enable_lowres_bitrate_interpolation) {
+ return InterpolateSimulcastFormat(width, height, absl::nullopt,
+ enable_lowres_bitrate_interpolation);
+}
+
+webrtc::DataRate FindSimulcastMaxBitrate(
+ int width,
+ int height,
+ bool enable_lowres_bitrate_interpolation) {
+ return InterpolateSimulcastFormat(width, height,
+ enable_lowres_bitrate_interpolation)
+ .max_bitrate;
+}
+
+webrtc::DataRate FindSimulcastTargetBitrate(
+ int width,
+ int height,
+ bool enable_lowres_bitrate_interpolation) {
+ return InterpolateSimulcastFormat(width, height,
+ enable_lowres_bitrate_interpolation)
+ .target_bitrate;
+}
+
+webrtc::DataRate FindSimulcastMinBitrate(
+ int width,
+ int height,
+ bool enable_lowres_bitrate_interpolation) {
+ return InterpolateSimulcastFormat(width, height,
+ enable_lowres_bitrate_interpolation)
+ .min_bitrate;
+}
+
+void BoostMaxSimulcastLayer(webrtc::DataRate max_bitrate,
+ std::vector<webrtc::VideoStream>* layers) {
+ if (layers->empty())
+ return;
+
+ const webrtc::DataRate total_bitrate = GetTotalMaxBitrate(*layers);
+
+ // We're still not using all available bits.
+ if (total_bitrate < max_bitrate) {
+ // Spend additional bits to boost the max layer.
+ const webrtc::DataRate bitrate_left = max_bitrate - total_bitrate;
+ layers->back().max_bitrate_bps += bitrate_left.bps();
+ }
+}
+
+webrtc::DataRate GetTotalMaxBitrate(
+ const std::vector<webrtc::VideoStream>& layers) {
+ if (layers.empty())
+ return webrtc::DataRate::Zero();
+
+ int total_max_bitrate_bps = 0;
+ for (size_t s = 0; s < layers.size() - 1; ++s) {
+ total_max_bitrate_bps += layers[s].target_bitrate_bps;
+ }
+ total_max_bitrate_bps += layers.back().max_bitrate_bps;
+ return webrtc::DataRate::BitsPerSec(total_max_bitrate_bps);
+}
+
+size_t LimitSimulcastLayerCount(int width,
+ int height,
+ size_t need_layers,
+ size_t layer_count,
+ const webrtc::FieldTrialsView& trials) {
+ if (!absl::StartsWith(trials.Lookup(kUseLegacySimulcastLayerLimitFieldTrial),
+ "Disabled")) {
+ // Max layers from one higher resolution in kSimulcastFormats will be used
+ // if the ratio (pixels_up - pixels) / (pixels_up - pixels_down) is less
+ // than configured `max_ratio`. pixels_down is the selected index in
+ // kSimulcastFormats based on pixels.
+ webrtc::FieldTrialOptional<double> max_ratio("max_ratio");
+ webrtc::ParseFieldTrial({&max_ratio},
+ trials.Lookup("WebRTC-SimulcastLayerLimitRoundUp"));
+
+ const bool enable_lowres_bitrate_interpolation =
+ EnableLowresBitrateInterpolation(trials);
+ size_t adaptive_layer_count = std::max(
+ need_layers,
+ InterpolateSimulcastFormat(width, height, max_ratio.GetOptional(),
+ enable_lowres_bitrate_interpolation)
+ .max_layers);
+ if (layer_count > adaptive_layer_count) {
+ RTC_LOG(LS_WARNING) << "Reducing simulcast layer count from "
+ << layer_count << " to " << adaptive_layer_count;
+ layer_count = adaptive_layer_count;
+ }
+ }
+ return layer_count;
+}
+
+std::vector<webrtc::VideoStream> GetSimulcastConfig(
+ size_t min_layers,
+ size_t max_layers,
+ int width,
+ int height,
+ double bitrate_priority,
+ int max_qp,
+ bool is_screenshare_with_conference_mode,
+ bool temporal_layers_supported,
+ const webrtc::FieldTrialsView& trials) {
+ RTC_DCHECK_LE(min_layers, max_layers);
+ RTC_DCHECK(max_layers > 1 || is_screenshare_with_conference_mode);
+
+ const bool base_heavy_tl3_rate_alloc =
+ webrtc::RateControlSettings::ParseFromKeyValueConfig(&trials)
+ .Vp8BaseHeavyTl3RateAllocation();
+ if (is_screenshare_with_conference_mode) {
+ return GetScreenshareLayers(max_layers, width, height, bitrate_priority,
+ max_qp, temporal_layers_supported,
+ base_heavy_tl3_rate_alloc, trials);
+ } else {
+ // Some applications rely on the old behavior limiting the simulcast layer
+ // count based on the resolution automatically, which they can get through
+ // the WebRTC-LegacySimulcastLayerLimit field trial until they update.
+ max_layers =
+ LimitSimulcastLayerCount(width, height, min_layers, max_layers, trials);
+
+ return GetNormalSimulcastLayers(max_layers, width, height, bitrate_priority,
+ max_qp, temporal_layers_supported,
+ base_heavy_tl3_rate_alloc, trials);
+ }
+}
+
+std::vector<webrtc::VideoStream> GetNormalSimulcastLayers(
+ size_t layer_count,
+ int width,
+ int height,
+ double bitrate_priority,
+ int max_qp,
+ bool temporal_layers_supported,
+ bool base_heavy_tl3_rate_alloc,
+ const webrtc::FieldTrialsView& trials) {
+ std::vector<webrtc::VideoStream> layers(layer_count);
+
+ const bool enable_lowres_bitrate_interpolation =
+ EnableLowresBitrateInterpolation(trials);
+
+ // Format width and height has to be divisible by |2 ^ num_simulcast_layers -
+ // 1|.
+ width = NormalizeSimulcastSize(width, layer_count);
+ height = NormalizeSimulcastSize(height, layer_count);
+ // Add simulcast streams, from highest resolution (`s` = num_simulcast_layers
+ // -1) to lowest resolution at `s` = 0.
+ for (size_t s = layer_count - 1;; --s) {
+ layers[s].width = width;
+ layers[s].height = height;
+ // TODO(pbos): Fill actual temporal-layer bitrate thresholds.
+ layers[s].max_qp = max_qp;
+ layers[s].num_temporal_layers =
+ temporal_layers_supported ? DefaultNumberOfTemporalLayers(trials) : 1;
+ layers[s].max_bitrate_bps =
+ FindSimulcastMaxBitrate(width, height,
+ enable_lowres_bitrate_interpolation)
+ .bps();
+ layers[s].target_bitrate_bps =
+ FindSimulcastTargetBitrate(width, height,
+ enable_lowres_bitrate_interpolation)
+ .bps();
+ int num_temporal_layers = DefaultNumberOfTemporalLayers(trials);
+ if (s == 0) {
+ // If alternative temporal rate allocation is selected, adjust the
+ // bitrate of the lowest simulcast stream so that absolute bitrate for
+ // the base temporal layer matches the bitrate for the base temporal
+ // layer with the default 3 simulcast streams. Otherwise we risk a
+ // higher threshold for receiving a feed at all.
+ float rate_factor = 1.0;
+ if (num_temporal_layers == 3) {
+ if (base_heavy_tl3_rate_alloc) {
+ // Base heavy allocation increases TL0 bitrate from 40% to 60%.
+ rate_factor = 0.4 / 0.6;
+ }
+ } else {
+ rate_factor =
+ webrtc::SimulcastRateAllocator::GetTemporalRateAllocation(
+ 3, 0, /*base_heavy_tl3_rate_alloc=*/false) /
+ webrtc::SimulcastRateAllocator::GetTemporalRateAllocation(
+ num_temporal_layers, 0, /*base_heavy_tl3_rate_alloc=*/false);
+ }
+
+ layers[s].max_bitrate_bps =
+ static_cast<int>(layers[s].max_bitrate_bps * rate_factor);
+ layers[s].target_bitrate_bps =
+ static_cast<int>(layers[s].target_bitrate_bps * rate_factor);
+ }
+ layers[s].min_bitrate_bps =
+ FindSimulcastMinBitrate(width, height,
+ enable_lowres_bitrate_interpolation)
+ .bps();
+
+ // Ensure consistency.
+ layers[s].max_bitrate_bps =
+ std::max(layers[s].min_bitrate_bps, layers[s].max_bitrate_bps);
+ layers[s].target_bitrate_bps =
+ std::max(layers[s].min_bitrate_bps, layers[s].target_bitrate_bps);
+
+ layers[s].max_framerate = kDefaultVideoMaxFramerate;
+
+ width /= 2;
+ height /= 2;
+
+ if (s == 0) {
+ break;
+ }
+ }
+ // Currently the relative bitrate priority of the sender is controlled by
+ // the value of the lowest VideoStream.
+ // TODO(bugs.webrtc.org/8630): The web specification describes being able to
+ // control relative bitrate for each individual simulcast layer, but this
+ // is currently just implemented per rtp sender.
+ layers[0].bitrate_priority = bitrate_priority;
+ return layers;
+}
+
+std::vector<webrtc::VideoStream> GetScreenshareLayers(
+ size_t max_layers,
+ int width,
+ int height,
+ double bitrate_priority,
+ int max_qp,
+ bool temporal_layers_supported,
+ bool base_heavy_tl3_rate_alloc,
+ const webrtc::FieldTrialsView& trials) {
+ size_t num_simulcast_layers =
+ std::min<int>(max_layers, kScreenshareMaxSimulcastLayers);
+
+ std::vector<webrtc::VideoStream> layers(num_simulcast_layers);
+ // For legacy screenshare in conference mode, tl0 and tl1 bitrates are
+ // piggybacked on the VideoCodec struct as target and max bitrates,
+ // respectively. See eg. webrtc::LibvpxVp8Encoder::SetRates().
+ layers[0].width = width;
+ layers[0].height = height;
+ layers[0].max_qp = max_qp;
+ layers[0].max_framerate = 5;
+ layers[0].min_bitrate_bps = webrtc::kDefaultMinVideoBitrateBps;
+ layers[0].target_bitrate_bps = kScreenshareDefaultTl0Bitrate.bps();
+ layers[0].max_bitrate_bps = kScreenshareDefaultTl1Bitrate.bps();
+ layers[0].num_temporal_layers = temporal_layers_supported ? 2 : 1;
+
+ // With simulcast enabled, add another spatial layer. This one will have a
+ // more normal layout, with the regular 3 temporal layer pattern and no fps
+ // restrictions. The base simulcast layer will still use legacy setup.
+ if (num_simulcast_layers == kScreenshareMaxSimulcastLayers) {
+ // Add optional upper simulcast layer.
+ int max_bitrate_bps;
+ bool using_boosted_bitrate = false;
+ if (!temporal_layers_supported) {
+ // Set the max bitrate to where the base layer would have been if temporal
+ // layers were enabled.
+ max_bitrate_bps = static_cast<int>(
+ kScreenshareHighStreamMaxBitrate.bps() *
+ webrtc::SimulcastRateAllocator::GetTemporalRateAllocation(
+ kScreenshareTemporalLayers, 0, base_heavy_tl3_rate_alloc));
+ } else {
+ // Experimental temporal layer mode used, use increased max bitrate.
+ max_bitrate_bps = kScreenshareHighStreamMaxBitrate.bps();
+ using_boosted_bitrate = true;
+ }
+
+ layers[1].width = width;
+ layers[1].height = height;
+ layers[1].max_qp = max_qp;
+ layers[1].max_framerate = kDefaultVideoMaxFramerate;
+ layers[1].num_temporal_layers =
+ temporal_layers_supported ? kScreenshareTemporalLayers : 1;
+ layers[1].min_bitrate_bps = using_boosted_bitrate
+ ? kScreenshareHighStreamMinBitrate.bps()
+ : layers[0].target_bitrate_bps * 2;
+ layers[1].target_bitrate_bps = max_bitrate_bps;
+ layers[1].max_bitrate_bps = max_bitrate_bps;
+ }
+
+ // The bitrate priority currently implemented on a per-sender level, so we
+ // just set it for the first simulcast layer.
+ layers[0].bitrate_priority = bitrate_priority;
+ return layers;
+}
+
+} // namespace cricket
diff --git a/third_party/libwebrtc/video/config/simulcast.h b/third_party/libwebrtc/video/config/simulcast.h
new file mode 100644
index 0000000000..32af168bcd
--- /dev/null
+++ b/third_party/libwebrtc/video/config/simulcast.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VIDEO_CONFIG_SIMULCAST_H_
+#define VIDEO_CONFIG_SIMULCAST_H_
+
+#include <stddef.h>
+
+#include <vector>
+
+#include "api/field_trials_view.h"
+#include "api/units/data_rate.h"
+#include "video/config/video_encoder_config.h"
+
+namespace cricket {
+
+// Gets the total maximum bitrate for the `streams`.
+webrtc::DataRate GetTotalMaxBitrate(
+ const std::vector<webrtc::VideoStream>& streams);
+
+// Adds any bitrate of `max_bitrate` that is above the total maximum bitrate for
+// the `layers` to the highest quality layer.
+void BoostMaxSimulcastLayer(webrtc::DataRate max_bitrate,
+ std::vector<webrtc::VideoStream>* layers);
+
+// Round size to nearest simulcast-friendly size
+int NormalizeSimulcastSize(int size, size_t simulcast_layers);
+
+// Gets simulcast settings.
+std::vector<webrtc::VideoStream> GetSimulcastConfig(
+ size_t min_layers,
+ size_t max_layers,
+ int width,
+ int height,
+ double bitrate_priority,
+ int max_qp,
+ bool is_screenshare_with_conference_mode,
+ bool temporal_layers_supported,
+ const webrtc::FieldTrialsView& trials);
+
+// Gets the simulcast config layers for a non-screensharing case.
+std::vector<webrtc::VideoStream> GetNormalSimulcastLayers(
+ size_t max_layers,
+ int width,
+ int height,
+ double bitrate_priority,
+ int max_qp,
+ bool temporal_layers_supported,
+ bool base_heavy_tl3_rate_alloc,
+ const webrtc::FieldTrialsView& trials);
+
+// Gets simulcast config layers for screenshare settings.
+std::vector<webrtc::VideoStream> GetScreenshareLayers(
+ size_t max_layers,
+ int width,
+ int height,
+ double bitrate_priority,
+ int max_qp,
+ bool temporal_layers_supported,
+ bool base_heavy_tl3_rate_alloc,
+ const webrtc::FieldTrialsView& trials);
+
+} // namespace cricket
+
+#endif // VIDEO_CONFIG_SIMULCAST_H_
diff --git a/third_party/libwebrtc/video/config/simulcast_unittest.cc b/third_party/libwebrtc/video/config/simulcast_unittest.cc
new file mode 100644
index 0000000000..152a0f9525
--- /dev/null
+++ b/third_party/libwebrtc/video/config/simulcast_unittest.cc
@@ -0,0 +1,525 @@
+/*
+ * Copyright 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video/config/simulcast.h"
+
+#include "api/transport/field_trial_based_config.h"
+#include "media/base/media_constants.h"
+#include "test/field_trial.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+constexpr int kQpMax = 55;
+constexpr double kBitratePriority = 2.0;
+constexpr bool kScreenshare = true;
+constexpr int kDefaultTemporalLayers = 3; // Value from simulcast.cc.
+
+// Values from kSimulcastConfigs in simulcast.cc.
+const std::vector<VideoStream> GetSimulcastBitrates720p() {
+ std::vector<VideoStream> streams(3);
+ streams[0].min_bitrate_bps = 30000;
+ streams[0].target_bitrate_bps = 150000;
+ streams[0].max_bitrate_bps = 200000;
+ streams[1].min_bitrate_bps = 150000;
+ streams[1].target_bitrate_bps = 500000;
+ streams[1].max_bitrate_bps = 700000;
+ streams[2].min_bitrate_bps = 600000;
+ streams[2].target_bitrate_bps = 2500000;
+ streams[2].max_bitrate_bps = 2500000;
+ return streams;
+}
+} // namespace
+
+TEST(SimulcastTest, TotalMaxBitrateIsZeroForNoStreams) {
+ std::vector<VideoStream> streams;
+ EXPECT_EQ(0, cricket::GetTotalMaxBitrate(streams).bps());
+}
+
+TEST(SimulcastTest, GetTotalMaxBitrateForSingleStream) {
+ std::vector<VideoStream> streams(1);
+ streams[0].max_bitrate_bps = 100000;
+ EXPECT_EQ(100000, cricket::GetTotalMaxBitrate(streams).bps());
+}
+
+TEST(SimulcastTest, GetTotalMaxBitrateForMultipleStreams) {
+ std::vector<VideoStream> streams(3);
+ streams[0].target_bitrate_bps = 100000;
+ streams[1].target_bitrate_bps = 200000;
+ streams[2].max_bitrate_bps = 400000;
+ EXPECT_EQ(700000, cricket::GetTotalMaxBitrate(streams).bps());
+}
+
+TEST(SimulcastTest, BandwidthAboveTotalMaxBitrateGivenToHighestStream) {
+ std::vector<VideoStream> streams(3);
+ streams[0].target_bitrate_bps = 100000;
+ streams[1].target_bitrate_bps = 200000;
+ streams[2].max_bitrate_bps = 400000;
+
+ const webrtc::DataRate one_bps = webrtc::DataRate::BitsPerSec(1);
+
+ // No bitrate above the total max to give to the highest stream.
+ const webrtc::DataRate max_total_bitrate =
+ cricket::GetTotalMaxBitrate(streams);
+ cricket::BoostMaxSimulcastLayer(max_total_bitrate, &streams);
+ EXPECT_EQ(400000, streams[2].max_bitrate_bps);
+ EXPECT_EQ(max_total_bitrate, cricket::GetTotalMaxBitrate(streams));
+
+ // The bitrate above the total max should be given to the highest stream.
+ cricket::BoostMaxSimulcastLayer(max_total_bitrate + one_bps, &streams);
+ EXPECT_EQ(400000 + 1, streams[2].max_bitrate_bps);
+ EXPECT_EQ(max_total_bitrate + one_bps, cricket::GetTotalMaxBitrate(streams));
+}
+
+TEST(SimulcastTest, GetConfig) {
+ const std::vector<VideoStream> kExpected = GetSimulcastBitrates720p();
+ const FieldTrialBasedConfig trials;
+
+ const size_t kMinLayers = 1;
+ const size_t kMaxLayers = 3;
+ std::vector<VideoStream> streams = cricket::GetSimulcastConfig(
+ kMinLayers, kMaxLayers, 1280, 720, kBitratePriority, kQpMax,
+ !kScreenshare, true, trials);
+
+ EXPECT_EQ(kMaxLayers, streams.size());
+ EXPECT_EQ(320u, streams[0].width);
+ EXPECT_EQ(180u, streams[0].height);
+ EXPECT_EQ(640u, streams[1].width);
+ EXPECT_EQ(360u, streams[1].height);
+ EXPECT_EQ(1280u, streams[2].width);
+ EXPECT_EQ(720u, streams[2].height);
+
+ for (size_t i = 0; i < streams.size(); ++i) {
+ EXPECT_EQ(size_t{kDefaultTemporalLayers}, streams[i].num_temporal_layers);
+ EXPECT_EQ(cricket::kDefaultVideoMaxFramerate, streams[i].max_framerate);
+ EXPECT_EQ(kQpMax, streams[i].max_qp);
+ EXPECT_EQ(kExpected[i].min_bitrate_bps, streams[i].min_bitrate_bps);
+ EXPECT_EQ(kExpected[i].target_bitrate_bps, streams[i].target_bitrate_bps);
+ EXPECT_EQ(kExpected[i].max_bitrate_bps, streams[i].max_bitrate_bps);
+ EXPECT_TRUE(streams[i].active);
+ }
+ // Currently set on lowest stream.
+ EXPECT_EQ(kBitratePriority, streams[0].bitrate_priority);
+ EXPECT_FALSE(streams[1].bitrate_priority);
+ EXPECT_FALSE(streams[2].bitrate_priority);
+}
+
+TEST(SimulcastTest, GetConfigWithBaseHeavyVP8TL3RateAllocation) {
+ test::ScopedFieldTrials field_trials(
+ "WebRTC-UseBaseHeavyVP8TL3RateAllocation/Enabled/");
+ FieldTrialBasedConfig trials;
+
+ const std::vector<VideoStream> kExpected = GetSimulcastBitrates720p();
+
+ const size_t kMinLayers = 1;
+ const size_t kMaxLayers = 3;
+ std::vector<VideoStream> streams = cricket::GetSimulcastConfig(
+ kMinLayers, kMaxLayers, 1280, 720, kBitratePriority, kQpMax,
+ !kScreenshare, true, trials);
+
+ EXPECT_EQ(kExpected[0].min_bitrate_bps, streams[0].min_bitrate_bps);
+ EXPECT_EQ(static_cast<int>(0.4 * kExpected[0].target_bitrate_bps / 0.6),
+ streams[0].target_bitrate_bps);
+ EXPECT_EQ(static_cast<int>(0.4 * kExpected[0].max_bitrate_bps / 0.6),
+ streams[0].max_bitrate_bps);
+ for (size_t i = 1; i < streams.size(); ++i) {
+ EXPECT_EQ(kExpected[i].min_bitrate_bps, streams[i].min_bitrate_bps);
+ EXPECT_EQ(kExpected[i].target_bitrate_bps, streams[i].target_bitrate_bps);
+ EXPECT_EQ(kExpected[i].max_bitrate_bps, streams[i].max_bitrate_bps);
+ }
+}
+
+TEST(SimulcastTest, GetConfigWithLimitedMaxLayers) {
+ const size_t kMinLayers = 1;
+ const size_t kMaxLayers = 2;
+ FieldTrialBasedConfig trials;
+ std::vector<VideoStream> streams = cricket::GetSimulcastConfig(
+ kMinLayers, kMaxLayers, 1280, 720, kBitratePriority, kQpMax,
+ !kScreenshare, true, trials);
+
+ EXPECT_EQ(kMaxLayers, streams.size());
+ EXPECT_EQ(640u, streams[0].width);
+ EXPECT_EQ(360u, streams[0].height);
+ EXPECT_EQ(1280u, streams[1].width);
+ EXPECT_EQ(720u, streams[1].height);
+}
+
+TEST(SimulcastTest, GetConfigWithLimitedMaxLayersForResolution) {
+ test::ScopedFieldTrials field_trials(
+ "WebRTC-LegacySimulcastLayerLimit/Enabled/");
+ FieldTrialBasedConfig trials;
+ const size_t kMinLayers = 1;
+ const size_t kMaxLayers = 3;
+ std::vector<VideoStream> streams = cricket::GetSimulcastConfig(
+ kMinLayers, kMaxLayers, 800, 600, kBitratePriority, kQpMax, !kScreenshare,
+ true, trials);
+
+ EXPECT_EQ(2u, streams.size());
+ EXPECT_EQ(400u, streams[0].width);
+ EXPECT_EQ(300u, streams[0].height);
+ EXPECT_EQ(800u, streams[1].width);
+ EXPECT_EQ(600u, streams[1].height);
+}
+
+TEST(SimulcastTest, GetConfigWithLowResolutionScreenshare) {
+ test::ScopedFieldTrials field_trials(
+ "WebRTC-LegacySimulcastLayerLimit/Enabled/");
+ FieldTrialBasedConfig trials;
+ const size_t kMinLayers = 1;
+ const size_t kMaxLayers = 3;
+ std::vector<VideoStream> streams = cricket::GetSimulcastConfig(
+ kMinLayers, kMaxLayers, 100, 100, kBitratePriority, kQpMax, kScreenshare,
+ true, trials);
+
+ // Simulcast streams number is never decreased for screenshare,
+ // even for very low resolution.
+ EXPECT_GT(streams.size(), 1u);
+}
+
+TEST(SimulcastTest, GetConfigWithNotLimitedMaxLayersForResolution) {
+ test::ScopedFieldTrials field_trials(
+ "WebRTC-LegacySimulcastLayerLimit/Disabled/");
+ FieldTrialBasedConfig trials;
+ const size_t kMinLayers = 1;
+ const size_t kMaxLayers = 3;
+ std::vector<VideoStream> streams = cricket::GetSimulcastConfig(
+ kMinLayers, kMaxLayers, 800, 600, kBitratePriority, kQpMax, !kScreenshare,
+ true, trials);
+
+ EXPECT_EQ(kMaxLayers, streams.size());
+ EXPECT_EQ(200u, streams[0].width);
+ EXPECT_EQ(150u, streams[0].height);
+ EXPECT_EQ(400u, streams[1].width);
+ EXPECT_EQ(300u, streams[1].height);
+ EXPECT_EQ(800u, streams[2].width);
+ EXPECT_EQ(600u, streams[2].height);
+}
+
+TEST(SimulcastTest, GetConfigWithNormalizedResolution) {
+ FieldTrialBasedConfig trials;
+ const size_t kMinLayers = 1;
+ const size_t kMaxLayers = 2;
+ std::vector<VideoStream> streams = cricket::GetSimulcastConfig(
+ kMinLayers, kMaxLayers, 640 + 1, 360 + 1, kBitratePriority, kQpMax,
+ !kScreenshare, true, trials);
+
+ // Must be divisible by |2 ^ (num_layers - 1)|.
+ EXPECT_EQ(kMaxLayers, streams.size());
+ EXPECT_EQ(320u, streams[0].width);
+ EXPECT_EQ(180u, streams[0].height);
+ EXPECT_EQ(640u, streams[1].width);
+ EXPECT_EQ(360u, streams[1].height);
+}
+
+TEST(SimulcastTest, GetConfigWithNormalizedResolutionDivisibleBy4) {
+ test::ScopedFieldTrials field_trials(
+ "WebRTC-NormalizeSimulcastResolution/Enabled-2/");
+ FieldTrialBasedConfig trials;
+
+ const size_t kMinLayers = 1;
+ const size_t kMaxLayers = 2;
+ std::vector<VideoStream> streams = cricket::GetSimulcastConfig(
+ kMinLayers, kMaxLayers, 709, 501, kBitratePriority, kQpMax, !kScreenshare,
+ true, trials);
+
+ // Must be divisible by |2 ^ 2|.
+ EXPECT_EQ(kMaxLayers, streams.size());
+ EXPECT_EQ(354u, streams[0].width);
+ EXPECT_EQ(250u, streams[0].height);
+ EXPECT_EQ(708u, streams[1].width);
+ EXPECT_EQ(500u, streams[1].height);
+}
+
+TEST(SimulcastTest, GetConfigWithNormalizedResolutionDivisibleBy8) {
+ test::ScopedFieldTrials field_trials(
+ "WebRTC-NormalizeSimulcastResolution/Enabled-3/");
+ FieldTrialBasedConfig trials;
+
+ const size_t kMinLayers = 1;
+ const size_t kMaxLayers = 2;
+ std::vector<VideoStream> streams = cricket::GetSimulcastConfig(
+ kMinLayers, kMaxLayers, 709, 501, kBitratePriority, kQpMax, !kScreenshare,
+ true, trials);
+
+ // Must be divisible by |2 ^ 3|.
+ EXPECT_EQ(kMaxLayers, streams.size());
+ EXPECT_EQ(352u, streams[0].width);
+ EXPECT_EQ(248u, streams[0].height);
+ EXPECT_EQ(704u, streams[1].width);
+ EXPECT_EQ(496u, streams[1].height);
+}
+
+TEST(SimulcastTest, GetConfigForLegacyLayerLimit) {
+ test::ScopedFieldTrials field_trials(
+ "WebRTC-LegacySimulcastLayerLimit/Enabled/");
+ FieldTrialBasedConfig trials;
+
+ const size_t kMinLayers = 1;
+ const int kMaxLayers = 3;
+ std::vector<VideoStream> streams = cricket::GetSimulcastConfig(
+ kMinLayers, kMaxLayers, 320, 180, kBitratePriority, kQpMax, !kScreenshare,
+ true, trials);
+ EXPECT_EQ(1u, streams.size());
+
+ streams = cricket::GetSimulcastConfig(kMinLayers, kMaxLayers, 640, 360,
+ kBitratePriority, kQpMax, !kScreenshare,
+ true, trials);
+ EXPECT_EQ(2u, streams.size());
+
+ streams = cricket::GetSimulcastConfig(kMinLayers, kMaxLayers, 1920, 1080,
+ kBitratePriority, kQpMax, !kScreenshare,
+ true, trials);
+ EXPECT_EQ(3u, streams.size());
+}
+
+TEST(SimulcastTest, GetConfigForLegacyLayerLimitWithRequiredHD) {
+ test::ScopedFieldTrials field_trials(
+ "WebRTC-LegacySimulcastLayerLimit/Enabled/");
+ FieldTrialBasedConfig trials;
+
+ const size_t kMinLayers = 3; // "HD" layer must be present!
+ const int kMaxLayers = 3;
+ std::vector<VideoStream> streams = cricket::GetSimulcastConfig(
+ kMinLayers, kMaxLayers, 320, 180, kBitratePriority, kQpMax, !kScreenshare,
+ true, trials);
+ EXPECT_EQ(3u, streams.size());
+
+ streams = cricket::GetSimulcastConfig(kMinLayers, kMaxLayers, 640, 360,
+ kBitratePriority, kQpMax, !kScreenshare,
+ true, trials);
+ EXPECT_EQ(3u, streams.size());
+
+ streams = cricket::GetSimulcastConfig(kMinLayers, kMaxLayers, 1920, 1080,
+ kBitratePriority, kQpMax, !kScreenshare,
+ true, trials);
+ EXPECT_EQ(3u, streams.size());
+}
+
+TEST(SimulcastTest, GetConfigForScreenshareSimulcast) {
+ FieldTrialBasedConfig trials;
+ const size_t kMinLayers = 1;
+ const size_t kMaxLayers = 3;
+ std::vector<VideoStream> streams = cricket::GetSimulcastConfig(
+ kMinLayers, kMaxLayers, 1400, 800, kBitratePriority, kQpMax, kScreenshare,
+ true, trials);
+
+ EXPECT_GT(streams.size(), 1u);
+ for (size_t i = 0; i < streams.size(); ++i) {
+ EXPECT_EQ(1400u, streams[i].width) << "Screen content never scaled.";
+ EXPECT_EQ(800u, streams[i].height) << "Screen content never scaled.";
+ EXPECT_EQ(kQpMax, streams[i].max_qp);
+ EXPECT_TRUE(streams[i].active);
+ EXPECT_GT(streams[i].num_temporal_layers, size_t{1});
+ EXPECT_GT(streams[i].max_framerate, 0);
+ EXPECT_GT(streams[i].min_bitrate_bps, 0);
+ EXPECT_GT(streams[i].target_bitrate_bps, streams[i].min_bitrate_bps);
+ EXPECT_GE(streams[i].max_bitrate_bps, streams[i].target_bitrate_bps);
+ }
+}
+
+TEST(SimulcastTest, GetConfigForScreenshareSimulcastWithLimitedMaxLayers) {
+ FieldTrialBasedConfig trials;
+ const size_t kMinLayers = 1;
+ const size_t kMaxLayers = 1;
+ std::vector<VideoStream> streams = cricket::GetSimulcastConfig(
+ kMinLayers, kMaxLayers, 1400, 800, kBitratePriority, kQpMax, kScreenshare,
+ true, trials);
+
+ EXPECT_EQ(kMaxLayers, streams.size());
+}
+
+TEST(SimulcastTest, AveragesBitratesForNonStandardResolution) {
+ FieldTrialBasedConfig trials;
+ const size_t kMinLayers = 1;
+ const size_t kMaxLayers = 3;
+ std::vector<VideoStream> streams = cricket::GetSimulcastConfig(
+ kMinLayers, kMaxLayers, 900, 800, kBitratePriority, kQpMax, !kScreenshare,
+ true, trials);
+
+ EXPECT_EQ(kMaxLayers, streams.size());
+ EXPECT_EQ(900u, streams[2].width);
+ EXPECT_EQ(800u, streams[2].height);
+ EXPECT_EQ(1850000, streams[2].max_bitrate_bps);
+ EXPECT_EQ(1850000, streams[2].target_bitrate_bps);
+ EXPECT_EQ(475000, streams[2].min_bitrate_bps);
+}
+
+TEST(SimulcastTest, BitratesForCloseToStandardResolution) {
+ const size_t kMinLayers = 1;
+ const size_t kMaxLayers = 3;
+ // Resolution very close to 720p in number of pixels
+ const size_t kWidth = 1280;
+ const size_t kHeight = 716;
+ const std::vector<VideoStream> kExpectedNear = GetSimulcastBitrates720p();
+ FieldTrialBasedConfig trials;
+
+ std::vector<VideoStream> streams = cricket::GetSimulcastConfig(
+ kMinLayers, kMaxLayers, kWidth, kHeight, kBitratePriority, kQpMax,
+ !kScreenshare, true, trials);
+
+ EXPECT_EQ(kMaxLayers, streams.size());
+ EXPECT_EQ(kWidth, streams[2].width);
+ EXPECT_EQ(kHeight, streams[2].height);
+ for (size_t i = 0; i < streams.size(); ++i) {
+ EXPECT_NEAR(kExpectedNear[i].max_bitrate_bps, streams[i].max_bitrate_bps,
+ 20000);
+ EXPECT_NEAR(kExpectedNear[i].target_bitrate_bps,
+ streams[i].target_bitrate_bps, 20000);
+ EXPECT_NEAR(kExpectedNear[i].min_bitrate_bps, streams[i].min_bitrate_bps,
+ 20000);
+ }
+}
+
+TEST(SimulcastTest, MaxLayersWithRoundUpDisabled) {
+ test::ScopedFieldTrials field_trials(
+ "WebRTC-SimulcastLayerLimitRoundUp/max_ratio:0.0/");
+ FieldTrialBasedConfig trials;
+ const size_t kMinLayers = 1;
+ const int kMaxLayers = 3;
+
+ std::vector<VideoStream> streams;
+ streams = cricket::GetSimulcastConfig(kMinLayers, kMaxLayers, 960, 540,
+ kBitratePriority, kQpMax, !kScreenshare,
+ true, trials);
+ EXPECT_EQ(3u, streams.size());
+ // <960x540: 2 layers
+ streams = cricket::GetSimulcastConfig(kMinLayers, kMaxLayers, 960, 539,
+ kBitratePriority, kQpMax, !kScreenshare,
+ true, trials);
+ EXPECT_EQ(2u, streams.size());
+ streams = cricket::GetSimulcastConfig(kMinLayers, kMaxLayers, 480, 270,
+ kBitratePriority, kQpMax, !kScreenshare,
+ true, trials);
+ EXPECT_EQ(2u, streams.size());
+ // <480x270: 1 layer
+ streams = cricket::GetSimulcastConfig(kMinLayers, kMaxLayers, 480, 269,
+ kBitratePriority, kQpMax, !kScreenshare,
+ true, trials);
+ EXPECT_EQ(1u, streams.size());
+}
+
+TEST(SimulcastTest, MaxLayersWithDefaultRoundUpRatio) {
+ // Default: "WebRTC-SimulcastLayerLimitRoundUp/max_ratio:0.1/"
+ FieldTrialBasedConfig trials;
+ const size_t kMinLayers = 1;
+ const int kMaxLayers = 3;
+
+ std::vector<VideoStream> streams;
+ streams = cricket::GetSimulcastConfig(kMinLayers, kMaxLayers, 960, 540,
+ kBitratePriority, kQpMax, !kScreenshare,
+ true, trials);
+ EXPECT_EQ(3u, streams.size());
+ // Lowest cropped height where max layers from higher resolution is used.
+ streams = cricket::GetSimulcastConfig(kMinLayers, kMaxLayers, 960, 512,
+ kBitratePriority, kQpMax, !kScreenshare,
+ true, trials);
+ EXPECT_EQ(3u, streams.size());
+ streams = cricket::GetSimulcastConfig(kMinLayers, kMaxLayers, 960, 508,
+ kBitratePriority, kQpMax, !kScreenshare,
+ true, trials);
+ EXPECT_EQ(2u, streams.size());
+ streams = cricket::GetSimulcastConfig(kMinLayers, kMaxLayers, 480, 270,
+ kBitratePriority, kQpMax, !kScreenshare,
+ true, trials);
+ EXPECT_EQ(2u, streams.size());
+ // Lowest cropped height where max layers from higher resolution is used.
+ streams = cricket::GetSimulcastConfig(kMinLayers, kMaxLayers, 480, 256,
+ kBitratePriority, kQpMax, !kScreenshare,
+ true, trials);
+ EXPECT_EQ(2u, streams.size());
+ streams = cricket::GetSimulcastConfig(kMinLayers, kMaxLayers, 480, 254,
+ kBitratePriority, kQpMax, !kScreenshare,
+ true, trials);
+ EXPECT_EQ(1u, streams.size());
+}
+
+TEST(SimulcastTest, MaxLayersWithRoundUpRatio) {
+ test::ScopedFieldTrials field_trials(
+ "WebRTC-SimulcastLayerLimitRoundUp/max_ratio:0.13/");
+ FieldTrialBasedConfig trials;
+ const size_t kMinLayers = 1;
+ const int kMaxLayers = 3;
+
+ std::vector<VideoStream> streams;
+ streams = cricket::GetSimulcastConfig(kMinLayers, kMaxLayers, 480, 270,
+ kBitratePriority, kQpMax, !kScreenshare,
+ true, trials);
+ EXPECT_EQ(2u, streams.size());
+ // Lowest cropped height where max layers from higher resolution is used.
+ streams = cricket::GetSimulcastConfig(kMinLayers, kMaxLayers, 480, 252,
+ kBitratePriority, kQpMax, !kScreenshare,
+ true, trials);
+ EXPECT_EQ(2u, streams.size());
+ streams = cricket::GetSimulcastConfig(kMinLayers, kMaxLayers, 480, 250,
+ kBitratePriority, kQpMax, !kScreenshare,
+ true, trials);
+ EXPECT_EQ(1u, streams.size());
+}
+
+TEST(SimulcastTest, BitratesInterpolatedForResBelow180p) {
+ // TODO(webrtc:12415): Remove when feature launches.
+ test::ScopedFieldTrials field_trials(
+ "WebRTC-LowresSimulcastBitrateInterpolation/Enabled/");
+
+ const size_t kMaxLayers = 3;
+ FieldTrialBasedConfig trials;
+
+ std::vector<VideoStream> streams = cricket::GetSimulcastConfig(
+ /* min_layers = */ 1, kMaxLayers, /* width = */ 960, /* height = */ 540,
+ kBitratePriority, kQpMax, !kScreenshare, true, trials);
+
+ ASSERT_EQ(streams.size(), kMaxLayers);
+ EXPECT_EQ(240u, streams[0].width);
+ EXPECT_EQ(135u, streams[0].height);
+ EXPECT_EQ(streams[0].max_bitrate_bps, 112500);
+ EXPECT_EQ(streams[0].target_bitrate_bps, 84375);
+ EXPECT_EQ(streams[0].min_bitrate_bps, 30000);
+}
+
+TEST(SimulcastTest, BitratesConsistentForVerySmallRes) {
+ // TODO(webrtc:12415): Remove when feature launches.
+ test::ScopedFieldTrials field_trials(
+ "WebRTC-LowresSimulcastBitrateInterpolation/Enabled/");
+
+ FieldTrialBasedConfig trials;
+
+ std::vector<VideoStream> streams = cricket::GetSimulcastConfig(
+ /* min_layers = */ 1, /* max_layers = */ 3, /* width = */ 1,
+ /* height = */ 1, kBitratePriority, kQpMax, !kScreenshare, true, trials);
+
+ ASSERT_TRUE(!streams.empty());
+ EXPECT_EQ(1u, streams[0].width);
+ EXPECT_EQ(1u, streams[0].height);
+ EXPECT_EQ(streams[0].max_bitrate_bps, 30000);
+ EXPECT_EQ(streams[0].target_bitrate_bps, 30000);
+ EXPECT_EQ(streams[0].min_bitrate_bps, 30000);
+}
+
+TEST(SimulcastTest,
+ BitratesNotInterpolatedForResBelow180pWhenDisabledTrialSet) {
+ test::ScopedFieldTrials field_trials(
+ "WebRTC-LowresSimulcastBitrateInterpolation/Disabled/");
+
+ const size_t kMaxLayers = 3;
+ FieldTrialBasedConfig trials;
+
+ std::vector<VideoStream> streams = cricket::GetSimulcastConfig(
+ /* min_layers = */ 1, kMaxLayers, /* width = */ 960, /* height = */ 540,
+ kBitratePriority, kQpMax, !kScreenshare, true, trials);
+
+ ASSERT_EQ(streams.size(), kMaxLayers);
+ EXPECT_EQ(240u, streams[0].width);
+ EXPECT_EQ(135u, streams[0].height);
+ EXPECT_EQ(streams[0].max_bitrate_bps, 200000);
+ EXPECT_EQ(streams[0].target_bitrate_bps, 150000);
+ EXPECT_EQ(streams[0].min_bitrate_bps, 30000);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/video/config/streams_config_gn/moz.build b/third_party/libwebrtc/video/config/streams_config_gn/moz.build
new file mode 100644
index 0000000000..9e174cff3c
--- /dev/null
+++ b/third_party/libwebrtc/video/config/streams_config_gn/moz.build
@@ -0,0 +1,234 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+DEFINES["WEBRTC_STRICT_FIELD_TRIALS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "!/third_party/libwebrtc/gen",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/video/config/encoder_stream_factory.cc",
+ "/third_party/libwebrtc/video/config/simulcast.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "GLESv2",
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "crypt32",
+ "iphlpapi",
+ "secur32",
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "mips32":
+
+ DEFINES["MIPS32_LE"] = True
+ DEFINES["MIPS_FPU_LE"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "mips64":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("streams_config_gn")
diff --git a/third_party/libwebrtc/video/config/video_encoder_config.cc b/third_party/libwebrtc/video/config/video_encoder_config.cc
new file mode 100644
index 0000000000..6ea2052138
--- /dev/null
+++ b/third_party/libwebrtc/video/config/video_encoder_config.cc
@@ -0,0 +1,133 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include "video/config/video_encoder_config.h"
+
+#include <string>
+
+#include "rtc_base/checks.h"
+#include "rtc_base/strings/string_builder.h"
+
+namespace webrtc {
+VideoStream::VideoStream()
+ : width(0),
+ height(0),
+ max_framerate(-1),
+ min_bitrate_bps(-1),
+ target_bitrate_bps(-1),
+ max_bitrate_bps(-1),
+ scale_resolution_down_by(-1.),
+ max_qp(-1),
+ num_temporal_layers(absl::nullopt),
+ active(true) {}
+VideoStream::VideoStream(const VideoStream& other) = default;
+
+VideoStream::~VideoStream() = default;
+
+std::string VideoStream::ToString() const {
+ char buf[1024];
+ rtc::SimpleStringBuilder ss(buf);
+ ss << "{width: " << width;
+ ss << ", height: " << height;
+ ss << ", max_framerate: " << max_framerate;
+ ss << ", min_bitrate_bps:" << min_bitrate_bps;
+ ss << ", target_bitrate_bps:" << target_bitrate_bps;
+ ss << ", max_bitrate_bps:" << max_bitrate_bps;
+ ss << ", max_qp: " << max_qp;
+ ss << ", num_temporal_layers: " << num_temporal_layers.value_or(1);
+ ss << ", bitrate_priority: " << bitrate_priority.value_or(0);
+ ss << ", active: " << active;
+ ss << ", scale_down_by: " << scale_resolution_down_by;
+
+ return ss.str();
+}
+
+VideoEncoderConfig::VideoEncoderConfig()
+ : codec_type(kVideoCodecGeneric),
+ video_format("Unset"),
+ content_type(ContentType::kRealtimeVideo),
+ frame_drop_enabled(false),
+ encoder_specific_settings(nullptr),
+ min_transmit_bitrate_bps(0),
+ max_bitrate_bps(0),
+ bitrate_priority(1.0),
+ number_of_streams(0),
+ legacy_conference_mode(false),
+ is_quality_scaling_allowed(false) {}
+
+VideoEncoderConfig::VideoEncoderConfig(VideoEncoderConfig&&) = default;
+
+VideoEncoderConfig::~VideoEncoderConfig() = default;
+
+std::string VideoEncoderConfig::ToString() const {
+ char buf[1024];
+ rtc::SimpleStringBuilder ss(buf);
+ ss << "{codec_type: ";
+ ss << CodecTypeToPayloadString(codec_type);
+ ss << ", content_type: ";
+ switch (content_type) {
+ case ContentType::kRealtimeVideo:
+ ss << "kRealtimeVideo";
+ break;
+ case ContentType::kScreen:
+ ss << "kScreenshare";
+ break;
+ }
+ ss << ", frame_drop_enabled: " << frame_drop_enabled;
+ ss << ", encoder_specific_settings: ";
+ ss << (encoder_specific_settings != nullptr ? "(ptr)" : "NULL");
+
+ ss << ", min_transmit_bitrate_bps: " << min_transmit_bitrate_bps;
+ ss << '}';
+ return ss.str();
+}
+
+VideoEncoderConfig::VideoEncoderConfig(const VideoEncoderConfig&) = default;
+
+void VideoEncoderConfig::EncoderSpecificSettings::FillEncoderSpecificSettings(
+ VideoCodec* codec) const {
+ if (codec->codecType == kVideoCodecVP8) {
+ FillVideoCodecVp8(codec->VP8());
+ } else if (codec->codecType == kVideoCodecVP9) {
+ FillVideoCodecVp9(codec->VP9());
+ } else {
+ RTC_DCHECK_NOTREACHED()
+ << "Encoder specifics set/used for unknown codec type.";
+ }
+}
+
+void VideoEncoderConfig::EncoderSpecificSettings::FillVideoCodecVp8(
+ VideoCodecVP8* vp8_settings) const {
+ RTC_DCHECK_NOTREACHED();
+}
+
+void VideoEncoderConfig::EncoderSpecificSettings::FillVideoCodecVp9(
+ VideoCodecVP9* vp9_settings) const {
+ RTC_DCHECK_NOTREACHED();
+}
+
+VideoEncoderConfig::Vp8EncoderSpecificSettings::Vp8EncoderSpecificSettings(
+ const VideoCodecVP8& specifics)
+ : specifics_(specifics) {}
+
+void VideoEncoderConfig::Vp8EncoderSpecificSettings::FillVideoCodecVp8(
+ VideoCodecVP8* vp8_settings) const {
+ *vp8_settings = specifics_;
+}
+
+VideoEncoderConfig::Vp9EncoderSpecificSettings::Vp9EncoderSpecificSettings(
+ const VideoCodecVP9& specifics)
+ : specifics_(specifics) {}
+
+void VideoEncoderConfig::Vp9EncoderSpecificSettings::FillVideoCodecVp9(
+ VideoCodecVP9* vp9_settings) const {
+ *vp9_settings = specifics_;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/video/config/video_encoder_config.h b/third_party/libwebrtc/video/config/video_encoder_config.h
new file mode 100644
index 0000000000..5a79d58cbf
--- /dev/null
+++ b/third_party/libwebrtc/video/config/video_encoder_config.h
@@ -0,0 +1,208 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VIDEO_CONFIG_VIDEO_ENCODER_CONFIG_H_
+#define VIDEO_CONFIG_VIDEO_ENCODER_CONFIG_H_
+
+#include <stddef.h>
+
+#include <string>
+#include <vector>
+
+#include "absl/types/optional.h"
+#include "api/scoped_refptr.h"
+#include "api/video/resolution.h"
+#include "api/video_codecs/scalability_mode.h"
+#include "api/video_codecs/sdp_video_format.h"
+#include "api/video_codecs/video_codec.h"
+#include "rtc_base/ref_count.h"
+
+namespace webrtc {
+
+// The `VideoStream` struct describes a simulcast layer, or "stream".
+struct VideoStream {
+ VideoStream();
+ ~VideoStream();
+ VideoStream(const VideoStream& other);
+ std::string ToString() const;
+
+ // Width/Height in pixels.
+ // This is the actual width and height used to configure encoder,
+ // which might be less than `requested_resolution` due to adaptation
+ // or due to the source providing smaller frames than requested.
+ size_t width;
+ size_t height;
+
+ // Frame rate in fps.
+ int max_framerate;
+
+ // Bitrate, in bps, for the stream.
+ int min_bitrate_bps;
+ int target_bitrate_bps;
+ int max_bitrate_bps;
+
+ // Scaling factor applied to the stream size.
+ // `width` and `height` values are already scaled down.
+ double scale_resolution_down_by;
+
+ // Maximum Quantization Parameter to use when encoding the stream.
+ int max_qp;
+
+ // Determines the number of temporal layers that the stream should be
+ // encoded with. This value should be greater than zero.
+ // TODO(brandtr): This class is used both for configuring the encoder
+ // (meaning that this field _must_ be set), and for signaling the app-level
+ // encoder settings (meaning that the field _may_ be set). We should separate
+ // this and remove this optional instead.
+ absl::optional<size_t> num_temporal_layers;
+
+ // The priority of this stream, to be used when allocating resources
+ // between multiple streams.
+ absl::optional<double> bitrate_priority;
+
+ absl::optional<ScalabilityMode> scalability_mode;
+
+ // If this stream is enabled by the user, or not.
+ bool active;
+
+ // An optional user supplied max_frame_resolution
+ // than can be set independently of (adapted) VideoSource.
+ // This value is set from RtpEncodingParameters::requested_resolution
+ // (i.e. used for signaling app-level settings).
+ //
+ // The actual encode resolution is in `width` and `height`,
+ // which can be lower than requested_resolution,
+ // e.g. if source only provides lower resolution or
+ // if resource adaptation is active.
+ absl::optional<Resolution> requested_resolution;
+};
+
+class VideoEncoderConfig {
+ public:
+ // These are reference counted to permit copying VideoEncoderConfig and be
+ // kept alive until all encoder_specific_settings go out of scope.
+ // TODO(kthelgason): Consider removing the need for copying VideoEncoderConfig
+ // and use absl::optional for encoder_specific_settings instead.
+ class EncoderSpecificSettings : public rtc::RefCountInterface {
+ public:
+ // TODO(pbos): Remove FillEncoderSpecificSettings as soon as VideoCodec is
+ // not in use and encoder implementations ask for codec-specific structs
+ // directly.
+ void FillEncoderSpecificSettings(VideoCodec* codec_struct) const;
+
+ virtual void FillVideoCodecVp8(VideoCodecVP8* vp8_settings) const;
+ virtual void FillVideoCodecVp9(VideoCodecVP9* vp9_settings) const;
+
+ private:
+ ~EncoderSpecificSettings() override {}
+ friend class VideoEncoderConfig;
+ };
+
+ class Vp8EncoderSpecificSettings : public EncoderSpecificSettings {
+ public:
+ explicit Vp8EncoderSpecificSettings(const VideoCodecVP8& specifics);
+ void FillVideoCodecVp8(VideoCodecVP8* vp8_settings) const override;
+
+ private:
+ VideoCodecVP8 specifics_;
+ };
+
+ class Vp9EncoderSpecificSettings : public EncoderSpecificSettings {
+ public:
+ explicit Vp9EncoderSpecificSettings(const VideoCodecVP9& specifics);
+ void FillVideoCodecVp9(VideoCodecVP9* vp9_settings) const override;
+
+ private:
+ VideoCodecVP9 specifics_;
+ };
+
+ enum class ContentType {
+ kRealtimeVideo,
+ kScreen,
+ };
+
+ class VideoStreamFactoryInterface : public rtc::RefCountInterface {
+ public:
+ // An implementation should return a std::vector<VideoStream> with the
+ // wanted VideoStream settings for the given video resolution.
+ // The size of the vector may not be larger than
+ // `encoder_config.number_of_streams`.
+ virtual std::vector<VideoStream> CreateEncoderStreams(
+ int frame_width,
+ int frame_height,
+ const VideoEncoderConfig& encoder_config) = 0;
+
+ protected:
+ ~VideoStreamFactoryInterface() override {}
+ };
+
+ VideoEncoderConfig& operator=(VideoEncoderConfig&&) = default;
+ VideoEncoderConfig& operator=(const VideoEncoderConfig&) = delete;
+
+ // Mostly used by tests. Avoid creating copies if you can.
+ VideoEncoderConfig Copy() const { return VideoEncoderConfig(*this); }
+
+ VideoEncoderConfig();
+ VideoEncoderConfig(VideoEncoderConfig&&);
+ ~VideoEncoderConfig();
+ std::string ToString() const;
+
+ // TODO(bugs.webrtc.org/6883): Consolidate on one of these.
+ VideoCodecType codec_type;
+ SdpVideoFormat video_format;
+
+ // Note: This factory can be unset, and VideoStreamEncoder will
+ // then use the EncoderStreamFactory. The factory is only set by
+ // tests.
+ rtc::scoped_refptr<VideoStreamFactoryInterface> video_stream_factory;
+ std::vector<SpatialLayer> spatial_layers;
+ ContentType content_type;
+ bool frame_drop_enabled;
+ rtc::scoped_refptr<const EncoderSpecificSettings> encoder_specific_settings;
+
+ // Padding will be used up to this bitrate regardless of the bitrate produced
+ // by the encoder. Padding above what's actually produced by the encoder helps
+ // maintaining a higher bitrate estimate. Padding will however not be sent
+ // unless the estimated bandwidth indicates that the link can handle it.
+ int min_transmit_bitrate_bps;
+ int max_bitrate_bps;
+ // The bitrate priority used for all VideoStreams.
+ double bitrate_priority;
+
+ // The simulcast layer's configurations set by the application for this video
+ // sender. These are modified by the video_stream_factory before being passed
+ // down to lower layers for the video encoding.
+ // `simulcast_layers` is also used for configuring non-simulcast (when there
+ // is a single VideoStream).
+ std::vector<VideoStream> simulcast_layers;
+
+ // Max number of encoded VideoStreams to produce.
+ size_t number_of_streams;
+
+ // Legacy Google conference mode flag for simulcast screenshare
+ bool legacy_conference_mode;
+
+ // Indicates whether quality scaling can be used or not.
+ bool is_quality_scaling_allowed;
+
+ // Maximum Quantization Parameter.
+ // This value is fed into EncoderStreamFactory that
+ // apply it to all simulcast layers/spatial layers.
+ int max_qp;
+
+ private:
+ // Access to the copy constructor is private to force use of the Copy()
+ // method for those exceptional cases where we do use it.
+ VideoEncoderConfig(const VideoEncoderConfig&);
+};
+
+} // namespace webrtc
+
+#endif // VIDEO_CONFIG_VIDEO_ENCODER_CONFIG_H_
diff --git a/third_party/libwebrtc/video/cpu_scaling_tests.cc b/third_party/libwebrtc/video/cpu_scaling_tests.cc
new file mode 100644
index 0000000000..b9f3a45e94
--- /dev/null
+++ b/third_party/libwebrtc/video/cpu_scaling_tests.cc
@@ -0,0 +1,144 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <limits>
+#include <vector>
+
+#include "api/rtp_parameters.h"
+#include "api/video/video_frame.h"
+#include "api/video/video_sink_interface.h"
+#include "api/video/video_source_interface.h"
+#include "call/video_receive_stream.h"
+#include "call/video_send_stream.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/event.h"
+#include "test/call_test.h"
+#include "test/field_trial.h"
+#include "test/frame_generator_capturer.h"
+#include "test/gtest.h"
+#include "video/config/video_encoder_config.h"
+
+namespace webrtc {
+namespace {
+constexpr int kWidth = 1280;
+constexpr int kHeight = 720;
+constexpr int kFps = 28;
+} // namespace
+
+// Minimal normal usage at start, then 60s overuse.
+class CpuOveruseTest : public test::CallTest {
+ protected:
+ CpuOveruseTest()
+ : field_trials_("WebRTC-ForceSimulatedOveruseIntervalMs/1-60000-60000/") {
+ }
+
+ void RunTestAndCheckForAdaptation(
+ const DegradationPreference& degradation_preference,
+ bool expect_adaptation);
+
+ test::ScopedFieldTrials field_trials_;
+};
+
+void CpuOveruseTest::RunTestAndCheckForAdaptation(
+ const DegradationPreference& degradation_preference,
+ bool expect_adaptation) {
+ class OveruseObserver
+ : public test::SendTest,
+ public test::FrameGeneratorCapturer::SinkWantsObserver {
+ public:
+ OveruseObserver(const DegradationPreference& degradation_preference,
+ bool expect_adaptation)
+ : SendTest(expect_adaptation ? kLongTimeout : kDefaultTimeout),
+ degradation_preference_(degradation_preference),
+ expect_adaptation_(expect_adaptation) {}
+
+ private:
+ void OnFrameGeneratorCapturerCreated(
+ test::FrameGeneratorCapturer* frame_generator_capturer) override {
+ frame_generator_capturer->SetSinkWantsObserver(this);
+ // Set initial resolution.
+ frame_generator_capturer->ChangeResolution(kWidth, kHeight);
+ }
+
+ // Called when FrameGeneratorCapturer::AddOrUpdateSink is called.
+ void OnSinkWantsChanged(rtc::VideoSinkInterface<VideoFrame>* sink,
+ const rtc::VideoSinkWants& wants) override {
+ if (wants.max_pixel_count == std::numeric_limits<int>::max() &&
+ wants.max_framerate_fps == kFps) {
+ // Max configured framerate is initially set.
+ return;
+ }
+ switch (degradation_preference_) {
+ case DegradationPreference::MAINTAIN_FRAMERATE:
+ EXPECT_LT(wants.max_pixel_count, kWidth * kHeight);
+ observation_complete_.Set();
+ break;
+ case DegradationPreference::MAINTAIN_RESOLUTION:
+ EXPECT_LT(wants.max_framerate_fps, kFps);
+ observation_complete_.Set();
+ break;
+ case DegradationPreference::BALANCED:
+ if (wants.max_pixel_count == std::numeric_limits<int>::max() &&
+ wants.max_framerate_fps == std::numeric_limits<int>::max()) {
+ // `adapt_counters_` map in VideoStreamEncoder is reset when
+ // balanced mode is set.
+ break;
+ }
+ EXPECT_TRUE(wants.max_pixel_count < kWidth * kHeight ||
+ wants.max_framerate_fps < kFps);
+ observation_complete_.Set();
+ break;
+ default:
+ RTC_DCHECK_NOTREACHED();
+ }
+ }
+
+ void ModifyVideoConfigs(
+ VideoSendStream::Config* send_config,
+ std::vector<VideoReceiveStreamInterface::Config>* receive_configs,
+ VideoEncoderConfig* encoder_config) override {
+ EXPECT_FALSE(encoder_config->simulcast_layers.empty());
+ encoder_config->simulcast_layers[0].max_framerate = kFps;
+ }
+
+ void ModifyVideoDegradationPreference(
+ DegradationPreference* degradation_preference) override {
+ *degradation_preference = degradation_preference_;
+ }
+
+ void PerformTest() override {
+ EXPECT_EQ(expect_adaptation_, Wait())
+ << "Timed out while waiting for a scale down.";
+ }
+
+ const DegradationPreference degradation_preference_;
+ const bool expect_adaptation_;
+ } test(degradation_preference, expect_adaptation);
+
+ RunBaseTest(&test);
+}
+
+TEST_F(CpuOveruseTest, AdaptsDownInResolutionOnOveruse) {
+ RunTestAndCheckForAdaptation(DegradationPreference::MAINTAIN_FRAMERATE, true);
+}
+
+TEST_F(CpuOveruseTest, AdaptsDownInFpsOnOveruse) {
+ RunTestAndCheckForAdaptation(DegradationPreference::MAINTAIN_RESOLUTION,
+ true);
+}
+
+TEST_F(CpuOveruseTest, AdaptsDownInResolutionOrFpsOnOveruse) {
+ RunTestAndCheckForAdaptation(DegradationPreference::BALANCED, true);
+}
+
+TEST_F(CpuOveruseTest, NoAdaptDownOnOveruse) {
+ RunTestAndCheckForAdaptation(DegradationPreference::DISABLED, false);
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/video/decode_synchronizer.cc b/third_party/libwebrtc/video/decode_synchronizer.cc
new file mode 100644
index 0000000000..7d4da3d47a
--- /dev/null
+++ b/third_party/libwebrtc/video/decode_synchronizer.cc
@@ -0,0 +1,190 @@
+/*
+ * Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video/decode_synchronizer.h"
+
+#include <iterator>
+#include <memory>
+#include <utility>
+#include <vector>
+
+#include "api/sequence_checker.h"
+#include "api/units/time_delta.h"
+#include "api/units/timestamp.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "video/frame_decode_scheduler.h"
+#include "video/frame_decode_timing.h"
+
+namespace webrtc {
+
+DecodeSynchronizer::ScheduledFrame::ScheduledFrame(
+ uint32_t rtp_timestamp,
+ FrameDecodeTiming::FrameSchedule schedule,
+ FrameDecodeScheduler::FrameReleaseCallback callback)
+ : rtp_timestamp_(rtp_timestamp),
+ schedule_(std::move(schedule)),
+ callback_(std::move(callback)) {}
+
+void DecodeSynchronizer::ScheduledFrame::RunFrameReleaseCallback() && {
+ // Inspiration from Chromium base::OnceCallback. Move `*this` to a local
+ // before execution to ensure internal state is cleared after callback
+ // execution.
+ auto sf = std::move(*this);
+ std::move(sf.callback_)(sf.rtp_timestamp_, sf.schedule_.render_time);
+}
+
+Timestamp DecodeSynchronizer::ScheduledFrame::LatestDecodeTime() const {
+ return schedule_.latest_decode_time;
+}
+
+DecodeSynchronizer::SynchronizedFrameDecodeScheduler::
+ SynchronizedFrameDecodeScheduler(DecodeSynchronizer* sync)
+ : sync_(sync) {
+ RTC_DCHECK(sync_);
+}
+
+DecodeSynchronizer::SynchronizedFrameDecodeScheduler::
+ ~SynchronizedFrameDecodeScheduler() {
+ RTC_DCHECK(!next_frame_);
+ RTC_DCHECK(stopped_);
+}
+
+absl::optional<uint32_t>
+DecodeSynchronizer::SynchronizedFrameDecodeScheduler::ScheduledRtpTimestamp() {
+ return next_frame_.has_value()
+ ? absl::make_optional(next_frame_->rtp_timestamp())
+ : absl::nullopt;
+}
+
+DecodeSynchronizer::ScheduledFrame
+DecodeSynchronizer::SynchronizedFrameDecodeScheduler::ReleaseNextFrame() {
+ RTC_DCHECK(next_frame_);
+ auto res = std::move(*next_frame_);
+ next_frame_.reset();
+ return res;
+}
+
+Timestamp
+DecodeSynchronizer::SynchronizedFrameDecodeScheduler::LatestDecodeTime() {
+ RTC_DCHECK(next_frame_);
+ return next_frame_->LatestDecodeTime();
+}
+
+void DecodeSynchronizer::SynchronizedFrameDecodeScheduler::ScheduleFrame(
+ uint32_t rtp,
+ FrameDecodeTiming::FrameSchedule schedule,
+ FrameReleaseCallback cb) {
+ RTC_DCHECK(!next_frame_) << "Can not schedule two frames at once.";
+ next_frame_ = ScheduledFrame(rtp, std::move(schedule), std::move(cb));
+ sync_->OnFrameScheduled(this);
+}
+
+void DecodeSynchronizer::SynchronizedFrameDecodeScheduler::CancelOutstanding() {
+ next_frame_.reset();
+}
+
+void DecodeSynchronizer::SynchronizedFrameDecodeScheduler::Stop() {
+ CancelOutstanding();
+ stopped_ = true;
+ sync_->RemoveFrameScheduler(this);
+}
+
+DecodeSynchronizer::DecodeSynchronizer(Clock* clock,
+ Metronome* metronome,
+ TaskQueueBase* worker_queue)
+ : clock_(clock), worker_queue_(worker_queue), metronome_(metronome) {
+ RTC_DCHECK(metronome_);
+ RTC_DCHECK(worker_queue_);
+}
+
+DecodeSynchronizer::~DecodeSynchronizer() {
+ RTC_DCHECK_RUN_ON(worker_queue_);
+ RTC_DCHECK(schedulers_.empty());
+}
+
+std::unique_ptr<FrameDecodeScheduler>
+DecodeSynchronizer::CreateSynchronizedFrameScheduler() {
+ RTC_DCHECK_RUN_ON(worker_queue_);
+ auto scheduler = std::make_unique<SynchronizedFrameDecodeScheduler>(this);
+ auto [it, inserted] = schedulers_.emplace(scheduler.get());
+ // If this is the first `scheduler` added, start listening to the metronome.
+ if (inserted && schedulers_.size() == 1) {
+ RTC_DLOG(LS_VERBOSE) << "Listening to metronome";
+ ScheduleNextTick();
+ }
+
+ return std::move(scheduler);
+}
+
+void DecodeSynchronizer::OnFrameScheduled(
+ SynchronizedFrameDecodeScheduler* scheduler) {
+ RTC_DCHECK_RUN_ON(worker_queue_);
+ RTC_DCHECK(scheduler->ScheduledRtpTimestamp());
+
+ Timestamp now = clock_->CurrentTime();
+ Timestamp next_tick = expected_next_tick_;
+ // If no tick has registered yet assume it will occur in the tick period.
+ if (next_tick.IsInfinite()) {
+ next_tick = now + metronome_->TickPeriod();
+ }
+
+ // Release the frame right away if the decode time is too soon. Otherwise
+ // the stream may fall behind too much.
+ bool decode_before_next_tick =
+ scheduler->LatestDecodeTime() <
+ (next_tick - FrameDecodeTiming::kMaxAllowedFrameDelay);
+ // Decode immediately if the decode time is in the past.
+ bool decode_time_in_past = scheduler->LatestDecodeTime() < now;
+
+ if (decode_before_next_tick || decode_time_in_past) {
+ ScheduledFrame scheduled_frame = scheduler->ReleaseNextFrame();
+ std::move(scheduled_frame).RunFrameReleaseCallback();
+ }
+}
+
+void DecodeSynchronizer::RemoveFrameScheduler(
+ SynchronizedFrameDecodeScheduler* scheduler) {
+ RTC_DCHECK_RUN_ON(worker_queue_);
+ RTC_DCHECK(scheduler);
+ auto it = schedulers_.find(scheduler);
+ if (it == schedulers_.end()) {
+ return;
+ }
+ schedulers_.erase(it);
+ // If there are no more schedulers active, stop listening for metronome ticks.
+ if (schedulers_.empty()) {
+ expected_next_tick_ = Timestamp::PlusInfinity();
+ }
+}
+
+void DecodeSynchronizer::ScheduleNextTick() {
+ RTC_DCHECK_RUN_ON(worker_queue_);
+ metronome_->RequestCallOnNextTick(
+ SafeTask(safety_.flag(), [this] { OnTick(); }));
+}
+
+void DecodeSynchronizer::OnTick() {
+ RTC_DCHECK_RUN_ON(worker_queue_);
+ expected_next_tick_ = clock_->CurrentTime() + metronome_->TickPeriod();
+
+ for (auto* scheduler : schedulers_) {
+ if (scheduler->ScheduledRtpTimestamp() &&
+ scheduler->LatestDecodeTime() < expected_next_tick_) {
+ auto scheduled_frame = scheduler->ReleaseNextFrame();
+ std::move(scheduled_frame).RunFrameReleaseCallback();
+ }
+ }
+
+ if (!schedulers_.empty())
+ ScheduleNextTick();
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/video/decode_synchronizer.h b/third_party/libwebrtc/video/decode_synchronizer.h
new file mode 100644
index 0000000000..c6f8efdb29
--- /dev/null
+++ b/third_party/libwebrtc/video/decode_synchronizer.h
@@ -0,0 +1,137 @@
+/*
+ * Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VIDEO_DECODE_SYNCHRONIZER_H_
+#define VIDEO_DECODE_SYNCHRONIZER_H_
+
+#include <stdint.h>
+
+#include <functional>
+#include <memory>
+#include <set>
+#include <utility>
+
+#include "absl/types/optional.h"
+#include "api/metronome/metronome.h"
+#include "api/sequence_checker.h"
+#include "api/task_queue/task_queue_base.h"
+#include "api/units/timestamp.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/thread_annotations.h"
+#include "video/frame_decode_scheduler.h"
+#include "video/frame_decode_timing.h"
+
+namespace webrtc {
+
+// DecodeSynchronizer synchronizes the frame scheduling by coalescing decoding
+// on the metronome.
+//
+// A video receive stream can use the DecodeSynchronizer by receiving a
+// FrameDecodeScheduler instance with `CreateSynchronizedFrameScheduler()`.
+// This instance implements FrameDecodeScheduler and can be used as a normal
+// scheduler. This instance is owned by the receive stream, and is borrowed by
+// the DecodeSynchronizer. The DecodeSynchronizer will stop borrowing the
+// instance when `FrameDecodeScheduler::Stop()` is called, after which the
+// scheduler may be destroyed by the receive stream.
+//
+// When a frame is scheduled for decode by a receive stream using the
+// DecodeSynchronizer, it will instead be executed on the metronome during the
+// tick interval where `max_decode_time` occurs. For example, if a frame is
+// scheduled for decode in 50ms and the tick interval is 20ms, then the frame
+// will be released for decoding in 2 ticks. See below for illustration,
+//
+// In the case where the decode time is in the past, or must occur before the
+// next metronome tick then the frame will be released right away, allowing a
+// delayed stream to catch up quickly.
+//
+// DecodeSynchronizer is single threaded - all method calls must run on the
+// `worker_queue_`.
+class DecodeSynchronizer {
+ public:
+ DecodeSynchronizer(Clock* clock,
+ Metronome* metronome,
+ TaskQueueBase* worker_queue);
+ ~DecodeSynchronizer();
+ DecodeSynchronizer(const DecodeSynchronizer&) = delete;
+ DecodeSynchronizer& operator=(const DecodeSynchronizer&) = delete;
+
+ std::unique_ptr<FrameDecodeScheduler> CreateSynchronizedFrameScheduler();
+
+ private:
+ class ScheduledFrame {
+ public:
+ ScheduledFrame(uint32_t rtp_timestamp,
+ FrameDecodeTiming::FrameSchedule schedule,
+ FrameDecodeScheduler::FrameReleaseCallback callback);
+
+ // Disallow copy since `callback` should only be moved.
+ ScheduledFrame(const ScheduledFrame&) = delete;
+ ScheduledFrame& operator=(const ScheduledFrame&) = delete;
+ ScheduledFrame(ScheduledFrame&&) = default;
+ ScheduledFrame& operator=(ScheduledFrame&&) = default;
+
+ // Executes `callback_`.
+ void RunFrameReleaseCallback() &&;
+
+ uint32_t rtp_timestamp() const { return rtp_timestamp_; }
+ Timestamp LatestDecodeTime() const;
+
+ private:
+ uint32_t rtp_timestamp_;
+ FrameDecodeTiming::FrameSchedule schedule_;
+ FrameDecodeScheduler::FrameReleaseCallback callback_;
+ };
+
+ class SynchronizedFrameDecodeScheduler : public FrameDecodeScheduler {
+ public:
+ explicit SynchronizedFrameDecodeScheduler(DecodeSynchronizer* sync);
+ ~SynchronizedFrameDecodeScheduler() override;
+
+ // Releases the outstanding frame for decoding. This invalidates
+ // `next_frame_`. There must be a frame scheduled.
+ ScheduledFrame ReleaseNextFrame();
+
+ // Returns `next_frame_.schedule.max_decode_time`. There must be a frame
+ // scheduled when this is called.
+ Timestamp LatestDecodeTime();
+
+ // FrameDecodeScheduler implementation.
+ absl::optional<uint32_t> ScheduledRtpTimestamp() override;
+ void ScheduleFrame(uint32_t rtp,
+ FrameDecodeTiming::FrameSchedule schedule,
+ FrameReleaseCallback cb) override;
+ void CancelOutstanding() override;
+ void Stop() override;
+
+ private:
+ DecodeSynchronizer* sync_;
+ absl::optional<ScheduledFrame> next_frame_;
+ bool stopped_ = false;
+ };
+
+ void OnFrameScheduled(SynchronizedFrameDecodeScheduler* scheduler);
+ void RemoveFrameScheduler(SynchronizedFrameDecodeScheduler* scheduler);
+
+ void ScheduleNextTick();
+ void OnTick();
+
+ Clock* const clock_;
+ TaskQueueBase* const worker_queue_;
+ Metronome* const metronome_;
+
+ Timestamp expected_next_tick_ = Timestamp::PlusInfinity();
+ std::set<SynchronizedFrameDecodeScheduler*> schedulers_
+ RTC_GUARDED_BY(worker_queue_);
+ ScopedTaskSafetyDetached safety_;
+};
+
+} // namespace webrtc
+
+#endif // VIDEO_DECODE_SYNCHRONIZER_H_
diff --git a/third_party/libwebrtc/video/decode_synchronizer_gn/moz.build b/third_party/libwebrtc/video/decode_synchronizer_gn/moz.build
new file mode 100644
index 0000000000..de01a944ea
--- /dev/null
+++ b/third_party/libwebrtc/video/decode_synchronizer_gn/moz.build
@@ -0,0 +1,232 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+DEFINES["WEBRTC_STRICT_FIELD_TRIALS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "!/third_party/libwebrtc/gen",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/video/decode_synchronizer.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "crypt32",
+ "iphlpapi",
+ "secur32",
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "mips32":
+
+ DEFINES["MIPS32_LE"] = True
+ DEFINES["MIPS_FPU_LE"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "mips64":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("decode_synchronizer_gn")
diff --git a/third_party/libwebrtc/video/decode_synchronizer_unittest.cc b/third_party/libwebrtc/video/decode_synchronizer_unittest.cc
new file mode 100644
index 0000000000..7a0d833812
--- /dev/null
+++ b/third_party/libwebrtc/video/decode_synchronizer_unittest.cc
@@ -0,0 +1,252 @@
+/*
+ * Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video/decode_synchronizer.h"
+
+#include <stddef.h>
+
+#include <memory>
+#include <utility>
+
+#include "absl/functional/any_invocable.h"
+#include "api/metronome/test/fake_metronome.h"
+#include "api/units/time_delta.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+#include "test/time_controller/simulated_time_controller.h"
+#include "video/frame_decode_scheduler.h"
+#include "video/frame_decode_timing.h"
+
+using ::testing::_;
+using ::testing::Eq;
+using ::testing::Invoke;
+using ::testing::Return;
+
+namespace webrtc {
+
+class MockMetronome : public Metronome {
+ public:
+ MOCK_METHOD(void,
+ RequestCallOnNextTick,
+ (absl::AnyInvocable<void() &&> callback),
+ (override));
+ MOCK_METHOD(TimeDelta, TickPeriod, (), (const override));
+};
+
+class DecodeSynchronizerTest : public ::testing::Test {
+ public:
+ static constexpr TimeDelta kTickPeriod = TimeDelta::Millis(33);
+
+ DecodeSynchronizerTest()
+ : time_controller_(Timestamp::Millis(1337)),
+ clock_(time_controller_.GetClock()),
+ metronome_(kTickPeriod),
+ decode_synchronizer_(clock_,
+ &metronome_,
+ time_controller_.GetMainThread()) {}
+
+ protected:
+ GlobalSimulatedTimeController time_controller_;
+ Clock* clock_;
+ test::ForcedTickMetronome metronome_;
+ DecodeSynchronizer decode_synchronizer_;
+};
+
+TEST_F(DecodeSynchronizerTest, AllFramesReadyBeforeNextTickDecoded) {
+ ::testing::MockFunction<void(uint32_t, Timestamp)> mock_callback1;
+ auto scheduler1 = decode_synchronizer_.CreateSynchronizedFrameScheduler();
+
+ testing::MockFunction<void(unsigned int, Timestamp)> mock_callback2;
+ auto scheduler2 = decode_synchronizer_.CreateSynchronizedFrameScheduler();
+
+ {
+ uint32_t frame_rtp = 90000;
+ FrameDecodeTiming::FrameSchedule frame_sched{
+ .latest_decode_time =
+ clock_->CurrentTime() + kTickPeriod - TimeDelta::Millis(3),
+ .render_time = clock_->CurrentTime() + TimeDelta::Millis(60)};
+ scheduler1->ScheduleFrame(frame_rtp, frame_sched,
+ mock_callback1.AsStdFunction());
+ EXPECT_CALL(mock_callback1,
+ Call(Eq(frame_rtp), Eq(frame_sched.render_time)));
+ }
+ {
+ uint32_t frame_rtp = 123456;
+ FrameDecodeTiming::FrameSchedule frame_sched{
+ .latest_decode_time =
+ clock_->CurrentTime() + kTickPeriod - TimeDelta::Millis(2),
+ .render_time = clock_->CurrentTime() + TimeDelta::Millis(70)};
+ scheduler2->ScheduleFrame(frame_rtp, frame_sched,
+ mock_callback2.AsStdFunction());
+ EXPECT_CALL(mock_callback2,
+ Call(Eq(frame_rtp), Eq(frame_sched.render_time)));
+ }
+ metronome_.Tick();
+ time_controller_.AdvanceTime(TimeDelta::Zero());
+
+ // Cleanup
+ scheduler1->Stop();
+ scheduler2->Stop();
+}
+
+TEST_F(DecodeSynchronizerTest, FramesNotDecodedIfDecodeTimeIsInNextInterval) {
+ ::testing::MockFunction<void(unsigned int, Timestamp)> mock_callback;
+ auto scheduler = decode_synchronizer_.CreateSynchronizedFrameScheduler();
+
+ uint32_t frame_rtp = 90000;
+ FrameDecodeTiming::FrameSchedule frame_sched{
+ .latest_decode_time =
+ clock_->CurrentTime() + kTickPeriod + TimeDelta::Millis(10),
+ .render_time =
+ clock_->CurrentTime() + kTickPeriod + TimeDelta::Millis(30)};
+ scheduler->ScheduleFrame(frame_rtp, frame_sched,
+ mock_callback.AsStdFunction());
+
+ metronome_.Tick();
+ time_controller_.AdvanceTime(TimeDelta::Zero());
+ // No decodes should have happened in this tick.
+ ::testing::Mock::VerifyAndClearExpectations(&mock_callback);
+
+ // Decode should happen on next tick.
+ EXPECT_CALL(mock_callback, Call(Eq(frame_rtp), Eq(frame_sched.render_time)));
+ time_controller_.AdvanceTime(kTickPeriod);
+ metronome_.Tick();
+ time_controller_.AdvanceTime(TimeDelta::Zero());
+
+ // Cleanup
+ scheduler->Stop();
+}
+
+TEST_F(DecodeSynchronizerTest, FrameDecodedOnce) {
+ ::testing::MockFunction<void(unsigned int, Timestamp)> mock_callback;
+ auto scheduler = decode_synchronizer_.CreateSynchronizedFrameScheduler();
+
+ uint32_t frame_rtp = 90000;
+ FrameDecodeTiming::FrameSchedule frame_sched{
+ .latest_decode_time = clock_->CurrentTime() + TimeDelta::Millis(30),
+ .render_time = clock_->CurrentTime() + TimeDelta::Millis(60)};
+ scheduler->ScheduleFrame(frame_rtp, frame_sched,
+ mock_callback.AsStdFunction());
+ EXPECT_CALL(mock_callback, Call(_, _)).Times(1);
+ metronome_.Tick();
+ time_controller_.AdvanceTime(TimeDelta::Zero());
+ ::testing::Mock::VerifyAndClearExpectations(&mock_callback);
+
+ // Trigger tick again. No frame should be decoded now.
+ time_controller_.AdvanceTime(kTickPeriod);
+ metronome_.Tick();
+ time_controller_.AdvanceTime(TimeDelta::Zero());
+
+ // Cleanup
+ scheduler->Stop();
+}
+
+TEST_F(DecodeSynchronizerTest, FrameWithDecodeTimeInPastDecodedImmediately) {
+ ::testing::MockFunction<void(unsigned int, Timestamp)> mock_callback;
+ auto scheduler = decode_synchronizer_.CreateSynchronizedFrameScheduler();
+
+ uint32_t frame_rtp = 90000;
+ FrameDecodeTiming::FrameSchedule frame_sched{
+ .latest_decode_time = clock_->CurrentTime() - TimeDelta::Millis(5),
+ .render_time = clock_->CurrentTime() + TimeDelta::Millis(30)};
+ EXPECT_CALL(mock_callback, Call(Eq(90000u), _)).Times(1);
+ scheduler->ScheduleFrame(frame_rtp, frame_sched,
+ mock_callback.AsStdFunction());
+ // Verify the callback was invoked already.
+ ::testing::Mock::VerifyAndClearExpectations(&mock_callback);
+
+ metronome_.Tick();
+ time_controller_.AdvanceTime(TimeDelta::Zero());
+
+ // Cleanup
+ scheduler->Stop();
+}
+
+TEST_F(DecodeSynchronizerTest,
+ FrameWithDecodeTimeFarBeforeNextTickDecodedImmediately) {
+ ::testing::MockFunction<void(unsigned int, Timestamp)> mock_callback;
+ auto scheduler = decode_synchronizer_.CreateSynchronizedFrameScheduler();
+
+ // Frame which would be behind by more than kMaxAllowedFrameDelay after
+ // the next tick.
+ FrameDecodeTiming::FrameSchedule frame_sched{
+ .latest_decode_time = clock_->CurrentTime() + kTickPeriod -
+ FrameDecodeTiming::kMaxAllowedFrameDelay -
+ TimeDelta::Millis(1),
+ .render_time = clock_->CurrentTime() + TimeDelta::Millis(30)};
+ EXPECT_CALL(mock_callback, Call(Eq(90000u), _)).Times(1);
+ scheduler->ScheduleFrame(90000, frame_sched, mock_callback.AsStdFunction());
+ // Verify the callback was invoked already.
+ ::testing::Mock::VerifyAndClearExpectations(&mock_callback);
+
+ time_controller_.AdvanceTime(kTickPeriod);
+ metronome_.Tick();
+ time_controller_.AdvanceTime(TimeDelta::Zero());
+
+ // A frame that would be behind by exactly kMaxAllowedFrameDelay after next
+ // tick should decode at the next tick.
+ FrameDecodeTiming::FrameSchedule queued_frame{
+ .latest_decode_time = clock_->CurrentTime() + kTickPeriod -
+ FrameDecodeTiming::kMaxAllowedFrameDelay,
+ .render_time = clock_->CurrentTime() + TimeDelta::Millis(30)};
+ scheduler->ScheduleFrame(180000, queued_frame, mock_callback.AsStdFunction());
+ // Verify the callback was invoked already.
+ ::testing::Mock::VerifyAndClearExpectations(&mock_callback);
+
+ EXPECT_CALL(mock_callback, Call(Eq(180000u), _)).Times(1);
+ time_controller_.AdvanceTime(kTickPeriod);
+ metronome_.Tick();
+ time_controller_.AdvanceTime(TimeDelta::Zero());
+
+ // Cleanup
+ scheduler->Stop();
+}
+
+TEST_F(DecodeSynchronizerTest, FramesNotReleasedAfterStop) {
+ ::testing::MockFunction<void(unsigned int, Timestamp)> mock_callback;
+ auto scheduler = decode_synchronizer_.CreateSynchronizedFrameScheduler();
+
+ uint32_t frame_rtp = 90000;
+ FrameDecodeTiming::FrameSchedule frame_sched{
+ .latest_decode_time = clock_->CurrentTime() + TimeDelta::Millis(30),
+ .render_time = clock_->CurrentTime() + TimeDelta::Millis(60)};
+ scheduler->ScheduleFrame(frame_rtp, frame_sched,
+ mock_callback.AsStdFunction());
+ // Cleanup
+ scheduler->Stop();
+
+ // No callback should occur on this tick since Stop() was called before.
+ metronome_.Tick();
+ time_controller_.AdvanceTime(TimeDelta::Zero());
+}
+
+TEST(DecodeSynchronizerStandaloneTest,
+ MetronomeNotListenedWhenNoStreamsAreActive) {
+ GlobalSimulatedTimeController time_controller(Timestamp::Millis(4711));
+ Clock* clock(time_controller.GetClock());
+ MockMetronome metronome;
+ ON_CALL(metronome, TickPeriod).WillByDefault(Return(TimeDelta::Seconds(1)));
+ DecodeSynchronizer decode_synchronizer_(clock, &metronome,
+ time_controller.GetMainThread());
+ absl::AnyInvocable<void() &&> callback;
+ EXPECT_CALL(metronome, RequestCallOnNextTick)
+ .WillOnce(Invoke([&callback](absl::AnyInvocable<void() &&> cb) {
+ callback = std::move(cb);
+ }));
+ auto scheduler = decode_synchronizer_.CreateSynchronizedFrameScheduler();
+ auto scheduler2 = decode_synchronizer_.CreateSynchronizedFrameScheduler();
+ scheduler->Stop();
+ scheduler2->Stop();
+ time_controller.AdvanceTime(TimeDelta::Seconds(1));
+ ASSERT_TRUE(callback);
+ (std::move)(callback)();
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/video/encoder_bitrate_adjuster.cc b/third_party/libwebrtc/video/encoder_bitrate_adjuster.cc
new file mode 100644
index 0000000000..8ed16a7565
--- /dev/null
+++ b/third_party/libwebrtc/video/encoder_bitrate_adjuster.cc
@@ -0,0 +1,338 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video/encoder_bitrate_adjuster.h"
+
+#include <algorithm>
+#include <memory>
+#include <vector>
+
+#include "rtc_base/experiments/rate_control_settings.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/time_utils.h"
+
+namespace webrtc {
+namespace {
+// Helper struct with metadata for a single spatial layer.
+struct LayerRateInfo {
+ double link_utilization_factor = 0.0;
+ double media_utilization_factor = 0.0;
+ DataRate target_rate = DataRate::Zero();
+
+ DataRate WantedOvershoot() const {
+ // If there is headroom, allow bitrate to go up to media rate limit.
+ // Still limit media utilization to 1.0, so we don't overshoot over long
+ // runs even if we have headroom.
+ const double max_media_utilization =
+ std::max(1.0, media_utilization_factor);
+ if (link_utilization_factor > max_media_utilization) {
+ return (link_utilization_factor - max_media_utilization) * target_rate;
+ }
+ return DataRate::Zero();
+ }
+};
+} // namespace
+constexpr int64_t EncoderBitrateAdjuster::kWindowSizeMs;
+constexpr size_t EncoderBitrateAdjuster::kMinFramesSinceLayoutChange;
+constexpr double EncoderBitrateAdjuster::kDefaultUtilizationFactor;
+
+EncoderBitrateAdjuster::EncoderBitrateAdjuster(const VideoCodec& codec_settings)
+ : utilize_bandwidth_headroom_(RateControlSettings::ParseFromFieldTrials()
+ .BitrateAdjusterCanUseNetworkHeadroom()),
+ frames_since_layout_change_(0),
+ min_bitrates_bps_{} {
+ if (codec_settings.codecType == VideoCodecType::kVideoCodecVP9) {
+ for (size_t si = 0; si < codec_settings.VP9().numberOfSpatialLayers; ++si) {
+ if (codec_settings.spatialLayers[si].active) {
+ min_bitrates_bps_[si] =
+ std::max(codec_settings.minBitrate * 1000,
+ codec_settings.spatialLayers[si].minBitrate * 1000);
+ }
+ }
+ } else {
+ for (size_t si = 0; si < codec_settings.numberOfSimulcastStreams; ++si) {
+ if (codec_settings.simulcastStream[si].active) {
+ min_bitrates_bps_[si] =
+ std::max(codec_settings.minBitrate * 1000,
+ codec_settings.simulcastStream[si].minBitrate * 1000);
+ }
+ }
+ }
+}
+
+EncoderBitrateAdjuster::~EncoderBitrateAdjuster() = default;
+
+VideoBitrateAllocation EncoderBitrateAdjuster::AdjustRateAllocation(
+ const VideoEncoder::RateControlParameters& rates) {
+ current_rate_control_parameters_ = rates;
+
+ // First check that overshoot detectors exist, and store per spatial layer
+ // how many active temporal layers we have.
+ size_t active_tls_[kMaxSpatialLayers] = {};
+ for (size_t si = 0; si < kMaxSpatialLayers; ++si) {
+ active_tls_[si] = 0;
+ for (size_t ti = 0; ti < kMaxTemporalStreams; ++ti) {
+ // Layer is enabled iff it has both positive bitrate and framerate target.
+ if (rates.bitrate.GetBitrate(si, ti) > 0 &&
+ current_fps_allocation_[si].size() > ti &&
+ current_fps_allocation_[si][ti] > 0) {
+ ++active_tls_[si];
+ if (!overshoot_detectors_[si][ti]) {
+ overshoot_detectors_[si][ti] =
+ std::make_unique<EncoderOvershootDetector>(kWindowSizeMs);
+ frames_since_layout_change_ = 0;
+ }
+ } else if (overshoot_detectors_[si][ti]) {
+ // Layer removed, destroy overshoot detector.
+ overshoot_detectors_[si][ti].reset();
+ frames_since_layout_change_ = 0;
+ }
+ }
+ }
+
+ // Next poll the overshoot detectors and populate the adjusted allocation.
+ const int64_t now_ms = rtc::TimeMillis();
+ VideoBitrateAllocation adjusted_allocation;
+ std::vector<LayerRateInfo> layer_infos;
+ DataRate wanted_overshoot_sum = DataRate::Zero();
+
+ for (size_t si = 0; si < kMaxSpatialLayers; ++si) {
+ layer_infos.emplace_back();
+ LayerRateInfo& layer_info = layer_infos.back();
+
+ layer_info.target_rate =
+ DataRate::BitsPerSec(rates.bitrate.GetSpatialLayerSum(si));
+
+ // Adjustment is done per spatial layer only (not per temporal layer).
+ if (frames_since_layout_change_ < kMinFramesSinceLayoutChange) {
+ layer_info.link_utilization_factor = kDefaultUtilizationFactor;
+ layer_info.media_utilization_factor = kDefaultUtilizationFactor;
+ } else if (active_tls_[si] == 0 ||
+ layer_info.target_rate == DataRate::Zero()) {
+ // No signaled temporal layers, or no bitrate set. Could either be unused
+ // spatial layer or bitrate dynamic mode; pass bitrate through without any
+ // change.
+ layer_info.link_utilization_factor = 1.0;
+ layer_info.media_utilization_factor = 1.0;
+ } else if (active_tls_[si] == 1) {
+ // A single active temporal layer, this might mean single layer or that
+ // encoder does not support temporal layers. Merge target bitrates for
+ // this spatial layer.
+ RTC_DCHECK(overshoot_detectors_[si][0]);
+ layer_info.link_utilization_factor =
+ overshoot_detectors_[si][0]
+ ->GetNetworkRateUtilizationFactor(now_ms)
+ .value_or(kDefaultUtilizationFactor);
+ layer_info.media_utilization_factor =
+ overshoot_detectors_[si][0]
+ ->GetMediaRateUtilizationFactor(now_ms)
+ .value_or(kDefaultUtilizationFactor);
+ } else if (layer_info.target_rate > DataRate::Zero()) {
+ // Multiple temporal layers enabled for this spatial layer. Update rate
+ // for each of them and make a weighted average of utilization factors,
+ // with bitrate fraction used as weight.
+ // If any layer is missing a utilization factor, fall back to default.
+ layer_info.link_utilization_factor = 0.0;
+ layer_info.media_utilization_factor = 0.0;
+ for (size_t ti = 0; ti < active_tls_[si]; ++ti) {
+ RTC_DCHECK(overshoot_detectors_[si][ti]);
+ const absl::optional<double> ti_link_utilization_factor =
+ overshoot_detectors_[si][ti]->GetNetworkRateUtilizationFactor(
+ now_ms);
+ const absl::optional<double> ti_media_utilization_factor =
+ overshoot_detectors_[si][ti]->GetMediaRateUtilizationFactor(now_ms);
+ if (!ti_link_utilization_factor || !ti_media_utilization_factor) {
+ layer_info.link_utilization_factor = kDefaultUtilizationFactor;
+ layer_info.media_utilization_factor = kDefaultUtilizationFactor;
+ break;
+ }
+ const double weight =
+ static_cast<double>(rates.bitrate.GetBitrate(si, ti)) /
+ layer_info.target_rate.bps();
+ layer_info.link_utilization_factor +=
+ weight * ti_link_utilization_factor.value();
+ layer_info.media_utilization_factor +=
+ weight * ti_media_utilization_factor.value();
+ }
+ } else {
+ RTC_DCHECK_NOTREACHED();
+ }
+
+ if (layer_info.link_utilization_factor < 1.0) {
+ // TODO(sprang): Consider checking underuse and allowing it to cancel some
+ // potential overuse by other streams.
+
+ // Don't boost target bitrate if encoder is under-using.
+ layer_info.link_utilization_factor = 1.0;
+ } else {
+ // Don't reduce encoder target below 50%, in which case the frame dropper
+ // should kick in instead.
+ layer_info.link_utilization_factor =
+ std::min(layer_info.link_utilization_factor, 2.0);
+
+ // Keep track of sum of desired overshoot bitrate.
+ wanted_overshoot_sum += layer_info.WantedOvershoot();
+ }
+ }
+
+ // Available link headroom that can be used to fill wanted overshoot.
+ DataRate available_headroom = DataRate::Zero();
+ if (utilize_bandwidth_headroom_) {
+ available_headroom = rates.bandwidth_allocation -
+ DataRate::BitsPerSec(rates.bitrate.get_sum_bps());
+ }
+
+ // All wanted overshoots are satisfied in the same proportion based on
+ // available headroom.
+ const double granted_overshoot_ratio =
+ wanted_overshoot_sum == DataRate::Zero()
+ ? 0.0
+ : std::min(1.0, available_headroom.bps<double>() /
+ wanted_overshoot_sum.bps());
+
+ for (size_t si = 0; si < kMaxSpatialLayers; ++si) {
+ LayerRateInfo& layer_info = layer_infos[si];
+ double utilization_factor = layer_info.link_utilization_factor;
+ DataRate allowed_overshoot =
+ granted_overshoot_ratio * layer_info.WantedOvershoot();
+ if (allowed_overshoot > DataRate::Zero()) {
+ // Pretend the target bitrate is higher by the allowed overshoot.
+ // Since utilization_factor = actual_bitrate / target_bitrate, it can be
+ // done by multiplying by old_target_bitrate / new_target_bitrate.
+ utilization_factor *= layer_info.target_rate.bps<double>() /
+ (allowed_overshoot.bps<double>() +
+ layer_info.target_rate.bps<double>());
+ }
+
+ if (min_bitrates_bps_[si] > 0 &&
+ layer_info.target_rate > DataRate::Zero() &&
+ DataRate::BitsPerSec(min_bitrates_bps_[si]) < layer_info.target_rate) {
+ // Make sure rate adjuster doesn't push target bitrate below minimum.
+ utilization_factor =
+ std::min(utilization_factor, layer_info.target_rate.bps<double>() /
+ min_bitrates_bps_[si]);
+ }
+
+ if (layer_info.target_rate > DataRate::Zero()) {
+ RTC_LOG(LS_VERBOSE) << "Utilization factors for spatial index " << si
+ << ": link = " << layer_info.link_utilization_factor
+ << ", media = " << layer_info.media_utilization_factor
+ << ", wanted overshoot = "
+ << layer_info.WantedOvershoot().bps()
+ << " bps, available headroom = "
+ << available_headroom.bps()
+ << " bps, total utilization factor = "
+ << utilization_factor;
+ }
+
+ // Populate the adjusted allocation with determined utilization factor.
+ if (active_tls_[si] == 1 &&
+ layer_info.target_rate >
+ DataRate::BitsPerSec(rates.bitrate.GetBitrate(si, 0))) {
+ // Bitrate allocation indicates temporal layer usage, but encoder
+ // does not seem to support it. Pipe all bitrate into a single
+ // overshoot detector.
+ uint32_t adjusted_layer_bitrate_bps =
+ std::min(static_cast<uint32_t>(
+ layer_info.target_rate.bps() / utilization_factor + 0.5),
+ layer_info.target_rate.bps<uint32_t>());
+ adjusted_allocation.SetBitrate(si, 0, adjusted_layer_bitrate_bps);
+ } else {
+ for (size_t ti = 0; ti < kMaxTemporalStreams; ++ti) {
+ if (rates.bitrate.HasBitrate(si, ti)) {
+ uint32_t adjusted_layer_bitrate_bps = std::min(
+ static_cast<uint32_t>(
+ rates.bitrate.GetBitrate(si, ti) / utilization_factor + 0.5),
+ rates.bitrate.GetBitrate(si, ti));
+ adjusted_allocation.SetBitrate(si, ti, adjusted_layer_bitrate_bps);
+ }
+ }
+ }
+
+ // In case of rounding errors, add bitrate to TL0 until min bitrate
+ // constraint has been met.
+ const uint32_t adjusted_spatial_layer_sum =
+ adjusted_allocation.GetSpatialLayerSum(si);
+ if (layer_info.target_rate > DataRate::Zero() &&
+ adjusted_spatial_layer_sum < min_bitrates_bps_[si]) {
+ adjusted_allocation.SetBitrate(si, 0,
+ adjusted_allocation.GetBitrate(si, 0) +
+ min_bitrates_bps_[si] -
+ adjusted_spatial_layer_sum);
+ }
+
+ // Update all detectors with the new adjusted bitrate targets.
+ for (size_t ti = 0; ti < kMaxTemporalStreams; ++ti) {
+ const uint32_t layer_bitrate_bps = adjusted_allocation.GetBitrate(si, ti);
+ // Overshoot detector may not exist, eg for ScreenshareLayers case.
+ if (layer_bitrate_bps > 0 && overshoot_detectors_[si][ti]) {
+ // Number of frames in this layer alone is not cumulative, so
+ // subtract fps from any low temporal layer.
+ const double fps_fraction =
+ static_cast<double>(
+ current_fps_allocation_[si][ti] -
+ (ti == 0 ? 0 : current_fps_allocation_[si][ti - 1])) /
+ VideoEncoder::EncoderInfo::kMaxFramerateFraction;
+
+ if (fps_fraction <= 0.0) {
+ RTC_LOG(LS_WARNING)
+ << "Encoder config has temporal layer with non-zero bitrate "
+ "allocation but zero framerate allocation.";
+ continue;
+ }
+
+ overshoot_detectors_[si][ti]->SetTargetRate(
+ DataRate::BitsPerSec(layer_bitrate_bps),
+ fps_fraction * rates.framerate_fps, now_ms);
+ }
+ }
+ }
+
+ // Since no spatial layers or streams are toggled by the adjustment
+ // bw-limited flag stays the same.
+ adjusted_allocation.set_bw_limited(rates.bitrate.is_bw_limited());
+
+ return adjusted_allocation;
+}
+
+void EncoderBitrateAdjuster::OnEncoderInfo(
+ const VideoEncoder::EncoderInfo& encoder_info) {
+ // Copy allocation into current state and re-allocate.
+ for (size_t si = 0; si < kMaxSpatialLayers; ++si) {
+ current_fps_allocation_[si] = encoder_info.fps_allocation[si];
+ }
+
+ // Trigger re-allocation so that overshoot detectors have correct targets.
+ AdjustRateAllocation(current_rate_control_parameters_);
+}
+
+void EncoderBitrateAdjuster::OnEncodedFrame(DataSize size,
+ int spatial_index,
+ int temporal_index) {
+ ++frames_since_layout_change_;
+ // Detectors may not exist, for instance if ScreenshareLayers is used.
+ auto& detector = overshoot_detectors_[spatial_index][temporal_index];
+ if (detector) {
+ detector->OnEncodedFrame(size.bytes(), rtc::TimeMillis());
+ }
+}
+
+void EncoderBitrateAdjuster::Reset() {
+ for (size_t si = 0; si < kMaxSpatialLayers; ++si) {
+ for (size_t ti = 0; ti < kMaxTemporalStreams; ++ti) {
+ overshoot_detectors_[si][ti].reset();
+ }
+ }
+ // Call AdjustRateAllocation() with the last know bitrate allocation, so that
+ // the appropriate overuse detectors are immediately re-created.
+ AdjustRateAllocation(current_rate_control_parameters_);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/video/encoder_bitrate_adjuster.h b/third_party/libwebrtc/video/encoder_bitrate_adjuster.h
new file mode 100644
index 0000000000..74d0289ad0
--- /dev/null
+++ b/third_party/libwebrtc/video/encoder_bitrate_adjuster.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VIDEO_ENCODER_BITRATE_ADJUSTER_H_
+#define VIDEO_ENCODER_BITRATE_ADJUSTER_H_
+
+#include <memory>
+
+#include "api/video/encoded_image.h"
+#include "api/video/video_bitrate_allocation.h"
+#include "api/video_codecs/video_encoder.h"
+#include "video/encoder_overshoot_detector.h"
+
+namespace webrtc {
+
+class EncoderBitrateAdjuster {
+ public:
+ // Size of sliding window used to track overshoot rate.
+ static constexpr int64_t kWindowSizeMs = 3000;
+ // Minimum number of frames since last layout change required to trust the
+ // overshoot statistics. Otherwise falls back to default utilization.
+ // By layout change, we mean any spatial/temporal layer being either enabled
+ // or disabled.
+ static constexpr size_t kMinFramesSinceLayoutChange = 30;
+ // Default utilization, before reliable metrics are available, is set to 20%
+ // overshoot. This is conservative so that badly misbehaving encoders don't
+ // build too much queue at the very start.
+ static constexpr double kDefaultUtilizationFactor = 1.2;
+
+ explicit EncoderBitrateAdjuster(const VideoCodec& codec_settings);
+ ~EncoderBitrateAdjuster();
+
+ // Adjusts the given rate allocation to make it paceable within the target
+ // rates.
+ VideoBitrateAllocation AdjustRateAllocation(
+ const VideoEncoder::RateControlParameters& rates);
+
+ // Updated overuse detectors with data about the encoder, specifically about
+ // the temporal layer frame rate allocation.
+ void OnEncoderInfo(const VideoEncoder::EncoderInfo& encoder_info);
+
+ // Updates the overuse detectors according to the encoded image size.
+ void OnEncodedFrame(DataSize size, int spatial_index, int temporal_index);
+
+ void Reset();
+
+ private:
+ const bool utilize_bandwidth_headroom_;
+
+ VideoEncoder::RateControlParameters current_rate_control_parameters_;
+ // FPS allocation of temporal layers, per spatial layer. Represented as a Q8
+ // fraction; 0 = 0%, 255 = 100%. See VideoEncoder::EncoderInfo.fps_allocation.
+ absl::InlinedVector<uint8_t, kMaxTemporalStreams>
+ current_fps_allocation_[kMaxSpatialLayers];
+
+ // Frames since layout was changed, mean that any spatial or temporal layer
+ // was either disabled or enabled.
+ size_t frames_since_layout_change_;
+ std::unique_ptr<EncoderOvershootDetector>
+ overshoot_detectors_[kMaxSpatialLayers][kMaxTemporalStreams];
+
+ // Minimum bitrates allowed, per spatial layer.
+ uint32_t min_bitrates_bps_[kMaxSpatialLayers];
+};
+
+} // namespace webrtc
+
+#endif // VIDEO_ENCODER_BITRATE_ADJUSTER_H_
diff --git a/third_party/libwebrtc/video/encoder_bitrate_adjuster_unittest.cc b/third_party/libwebrtc/video/encoder_bitrate_adjuster_unittest.cc
new file mode 100644
index 0000000000..4ec223a208
--- /dev/null
+++ b/third_party/libwebrtc/video/encoder_bitrate_adjuster_unittest.cc
@@ -0,0 +1,506 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video/encoder_bitrate_adjuster.h"
+
+#include <memory>
+#include <vector>
+
+#include "api/units/data_rate.h"
+#include "rtc_base/fake_clock.h"
+#include "rtc_base/numerics/safe_conversions.h"
+#include "test/field_trial.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace test {
+
+class EncoderBitrateAdjusterTest : public ::testing::Test {
+ public:
+ static constexpr int64_t kWindowSizeMs = 3000;
+ static constexpr int kDefaultBitrateBps = 300000;
+ static constexpr int kDefaultFrameRateFps = 30;
+ // For network utilization higher than media utilization, loop over a
+ // sequence where the first half undershoots and the second half overshoots
+ // by the same amount.
+ static constexpr int kSequenceLength = 4;
+ static_assert(kSequenceLength % 2 == 0, "Sequence length must be even.");
+
+ EncoderBitrateAdjusterTest()
+ : target_bitrate_(DataRate::BitsPerSec(kDefaultBitrateBps)),
+ target_framerate_fps_(kDefaultFrameRateFps),
+ tl_pattern_idx_{},
+ sequence_idx_{} {}
+
+ protected:
+ void SetUpAdjuster(size_t num_spatial_layers,
+ size_t num_temporal_layers,
+ bool vp9_svc) {
+ // Initialize some default VideoCodec instance with the given number of
+ // layers.
+ if (vp9_svc) {
+ codec_.codecType = VideoCodecType::kVideoCodecVP9;
+ codec_.numberOfSimulcastStreams = 1;
+ codec_.VP9()->numberOfSpatialLayers = num_spatial_layers;
+ codec_.VP9()->numberOfTemporalLayers = num_temporal_layers;
+ for (size_t si = 0; si < num_spatial_layers; ++si) {
+ codec_.spatialLayers[si].minBitrate = 100 * (1 << si);
+ codec_.spatialLayers[si].targetBitrate = 200 * (1 << si);
+ codec_.spatialLayers[si].maxBitrate = 300 * (1 << si);
+ codec_.spatialLayers[si].active = true;
+ codec_.spatialLayers[si].numberOfTemporalLayers = num_temporal_layers;
+ }
+ } else {
+ codec_.codecType = VideoCodecType::kVideoCodecVP8;
+ codec_.numberOfSimulcastStreams = num_spatial_layers;
+ codec_.VP8()->numberOfTemporalLayers = num_temporal_layers;
+ for (size_t si = 0; si < num_spatial_layers; ++si) {
+ codec_.simulcastStream[si].minBitrate = 100 * (1 << si);
+ codec_.simulcastStream[si].targetBitrate = 200 * (1 << si);
+ codec_.simulcastStream[si].maxBitrate = 300 * (1 << si);
+ codec_.simulcastStream[si].active = true;
+ codec_.simulcastStream[si].numberOfTemporalLayers = num_temporal_layers;
+ }
+ }
+
+ for (size_t si = 0; si < num_spatial_layers; ++si) {
+ encoder_info_.fps_allocation[si].resize(num_temporal_layers);
+ double fraction = 1.0;
+ for (int ti = num_temporal_layers - 1; ti >= 0; --ti) {
+ encoder_info_.fps_allocation[si][ti] = static_cast<uint8_t>(
+ VideoEncoder::EncoderInfo::kMaxFramerateFraction * fraction + 0.5);
+ fraction /= 2.0;
+ }
+ }
+
+ adjuster_ = std::make_unique<EncoderBitrateAdjuster>(codec_);
+ adjuster_->OnEncoderInfo(encoder_info_);
+ current_adjusted_allocation_ =
+ adjuster_->AdjustRateAllocation(VideoEncoder::RateControlParameters(
+ current_input_allocation_, target_framerate_fps_));
+ }
+
+ void InsertFrames(std::vector<std::vector<double>> media_utilization_factors,
+ int64_t duration_ms) {
+ InsertFrames(media_utilization_factors, media_utilization_factors,
+ duration_ms);
+ }
+
+ void InsertFrames(
+ std::vector<std::vector<double>> media_utilization_factors,
+ std::vector<std::vector<double>> network_utilization_factors,
+ int64_t duration_ms) {
+ RTC_DCHECK_EQ(media_utilization_factors.size(),
+ network_utilization_factors.size());
+
+ const int64_t start_us = rtc::TimeMicros();
+ while (rtc::TimeMicros() <
+ start_us + (duration_ms * rtc::kNumMicrosecsPerMillisec)) {
+ clock_.AdvanceTime(TimeDelta::Seconds(1) / target_framerate_fps_);
+ for (size_t si = 0; si < NumSpatialLayers(); ++si) {
+ const std::vector<int>& tl_pattern =
+ kTlPatterns[NumTemporalLayers(si) - 1];
+ const size_t ti =
+ tl_pattern[(tl_pattern_idx_[si]++) % tl_pattern.size()];
+
+ uint32_t layer_bitrate_bps =
+ current_adjusted_allocation_.GetBitrate(si, ti);
+ double layer_framerate_fps = target_framerate_fps_;
+ if (encoder_info_.fps_allocation[si].size() > ti) {
+ uint8_t layer_fps_fraction = encoder_info_.fps_allocation[si][ti];
+ if (ti > 0) {
+ // We're interested in the frame rate for this layer only, not
+ // cumulative frame rate.
+ layer_fps_fraction -= encoder_info_.fps_allocation[si][ti - 1];
+ }
+ layer_framerate_fps =
+ (target_framerate_fps_ * layer_fps_fraction) /
+ VideoEncoder::EncoderInfo::kMaxFramerateFraction;
+ }
+ double media_utilization_factor = 1.0;
+ double network_utilization_factor = 1.0;
+ if (media_utilization_factors.size() > si) {
+ RTC_DCHECK_EQ(media_utilization_factors[si].size(),
+ network_utilization_factors[si].size());
+ if (media_utilization_factors[si].size() > ti) {
+ media_utilization_factor = media_utilization_factors[si][ti];
+ network_utilization_factor = network_utilization_factors[si][ti];
+ }
+ }
+ RTC_DCHECK_GE(network_utilization_factor, media_utilization_factor);
+
+ // Frame size based on constant (media) overshoot.
+ const size_t media_frame_size = media_utilization_factor *
+ (layer_bitrate_bps / 8.0) /
+ layer_framerate_fps;
+
+ constexpr int kFramesWithPenalty = (kSequenceLength / 2) - 1;
+ RTC_DCHECK_GT(kFramesWithPenalty, 0);
+
+ // The positive/negative size diff needed to achieve network rate but
+ // not media rate penalty is the difference between the utilization
+ // factors times the media rate frame size, then scaled by the fraction
+ // between total frames and penalized frames in the sequence.
+ // Cap to media frame size to avoid negative size undershoot.
+ const size_t network_frame_size_diff_bytes = std::min(
+ media_frame_size,
+ static_cast<size_t>(
+ (((network_utilization_factor - media_utilization_factor) *
+ media_frame_size) *
+ kSequenceLength) /
+ kFramesWithPenalty +
+ 0.5));
+
+ int sequence_idx = sequence_idx_[si][ti];
+ sequence_idx_[si][ti] = (sequence_idx_[si][ti] + 1) % kSequenceLength;
+ const DataSize frame_size = DataSize::Bytes(
+ (sequence_idx < kSequenceLength / 2)
+ ? media_frame_size - network_frame_size_diff_bytes
+ : media_frame_size + network_frame_size_diff_bytes);
+
+ adjuster_->OnEncodedFrame(frame_size, si, ti);
+ sequence_idx = ++sequence_idx % kSequenceLength;
+ }
+ }
+ }
+
+ size_t NumSpatialLayers() const {
+ if (codec_.codecType == VideoCodecType::kVideoCodecVP9) {
+ return codec_.VP9().numberOfSpatialLayers;
+ }
+ return codec_.numberOfSimulcastStreams;
+ }
+
+ size_t NumTemporalLayers(int spatial_index) {
+ if (codec_.codecType == VideoCodecType::kVideoCodecVP9) {
+ return codec_.spatialLayers[spatial_index].numberOfTemporalLayers;
+ }
+ return codec_.simulcastStream[spatial_index].numberOfTemporalLayers;
+ }
+
+ void ExpectNear(const VideoBitrateAllocation& expected_allocation,
+ const VideoBitrateAllocation& actual_allocation,
+ double allowed_error_fraction) {
+ for (size_t si = 0; si < kMaxSpatialLayers; ++si) {
+ for (size_t ti = 0; ti < kMaxTemporalStreams; ++ti) {
+ if (expected_allocation.HasBitrate(si, ti)) {
+ EXPECT_TRUE(actual_allocation.HasBitrate(si, ti));
+ uint32_t expected_layer_bitrate_bps =
+ expected_allocation.GetBitrate(si, ti);
+ EXPECT_NEAR(expected_layer_bitrate_bps,
+ actual_allocation.GetBitrate(si, ti),
+ static_cast<uint32_t>(expected_layer_bitrate_bps *
+ allowed_error_fraction));
+ } else {
+ EXPECT_FALSE(actual_allocation.HasBitrate(si, ti));
+ }
+ }
+ }
+ }
+
+ VideoBitrateAllocation MultiplyAllocation(
+ const VideoBitrateAllocation& allocation,
+ double factor) {
+ VideoBitrateAllocation multiplied_allocation;
+ for (size_t si = 0; si < kMaxSpatialLayers; ++si) {
+ for (size_t ti = 0; ti < kMaxTemporalStreams; ++ti) {
+ if (allocation.HasBitrate(si, ti)) {
+ multiplied_allocation.SetBitrate(
+ si, ti,
+ static_cast<uint32_t>(factor * allocation.GetBitrate(si, ti) +
+ 0.5));
+ }
+ }
+ }
+ return multiplied_allocation;
+ }
+
+ VideoCodec codec_;
+ VideoEncoder::EncoderInfo encoder_info_;
+ std::unique_ptr<EncoderBitrateAdjuster> adjuster_;
+ VideoBitrateAllocation current_input_allocation_;
+ VideoBitrateAllocation current_adjusted_allocation_;
+ rtc::ScopedFakeClock clock_;
+ DataRate target_bitrate_;
+ double target_framerate_fps_;
+ int tl_pattern_idx_[kMaxSpatialLayers];
+ int sequence_idx_[kMaxSpatialLayers][kMaxTemporalStreams];
+
+ const std::vector<int> kTlPatterns[kMaxTemporalStreams] = {
+ {0},
+ {0, 1},
+ {0, 2, 1, 2},
+ {0, 3, 2, 3, 1, 3, 2, 3}};
+};
+
+TEST_F(EncoderBitrateAdjusterTest, SingleLayerOptimal) {
+ // Single layer, well behaved encoder.
+ current_input_allocation_.SetBitrate(0, 0, 300000);
+ target_framerate_fps_ = 30;
+ SetUpAdjuster(1, 1, false);
+ InsertFrames({{1.0}}, kWindowSizeMs);
+ current_adjusted_allocation_ =
+ adjuster_->AdjustRateAllocation(VideoEncoder::RateControlParameters(
+ current_input_allocation_, target_framerate_fps_));
+ // Adjusted allocation near input. Allow 1% error margin due to rounding
+ // errors etc.
+ ExpectNear(current_input_allocation_, current_adjusted_allocation_, 0.01);
+}
+
+TEST_F(EncoderBitrateAdjusterTest, SingleLayerOveruse) {
+ // Single layer, well behaved encoder.
+ current_input_allocation_.SetBitrate(0, 0, 300000);
+ target_framerate_fps_ = 30;
+ SetUpAdjuster(1, 1, false);
+ InsertFrames({{1.2}}, kWindowSizeMs);
+ current_adjusted_allocation_ =
+ adjuster_->AdjustRateAllocation(VideoEncoder::RateControlParameters(
+ current_input_allocation_, target_framerate_fps_));
+ // Adjusted allocation lowered by 20%.
+ ExpectNear(MultiplyAllocation(current_input_allocation_, 1 / 1.2),
+ current_adjusted_allocation_, 0.01);
+}
+
+TEST_F(EncoderBitrateAdjusterTest, SingleLayerUnderuse) {
+ // Single layer, well behaved encoder.
+ current_input_allocation_.SetBitrate(0, 0, 300000);
+ target_framerate_fps_ = 30;
+ SetUpAdjuster(1, 1, false);
+ InsertFrames({{0.5}}, kWindowSizeMs);
+ current_adjusted_allocation_ =
+ adjuster_->AdjustRateAllocation(VideoEncoder::RateControlParameters(
+ current_input_allocation_, target_framerate_fps_));
+ // Undershoot, adjusted should exactly match input.
+ ExpectNear(current_input_allocation_, current_adjusted_allocation_, 0.00);
+}
+
+TEST_F(EncoderBitrateAdjusterTest, ThreeTemporalLayersOptimalSize) {
+ // Three temporal layers, 60%/20%/20% bps distro, well behaved encoder.
+ current_input_allocation_.SetBitrate(0, 0, 180000);
+ current_input_allocation_.SetBitrate(0, 1, 60000);
+ current_input_allocation_.SetBitrate(0, 2, 60000);
+ target_framerate_fps_ = 30;
+ SetUpAdjuster(1, 3, false);
+ InsertFrames({{1.0, 1.0, 1.0}}, kWindowSizeMs);
+ current_adjusted_allocation_ =
+ adjuster_->AdjustRateAllocation(VideoEncoder::RateControlParameters(
+ current_input_allocation_, target_framerate_fps_));
+ ExpectNear(current_input_allocation_, current_adjusted_allocation_, 0.01);
+}
+
+TEST_F(EncoderBitrateAdjusterTest, ThreeTemporalLayersOvershoot) {
+ // Three temporal layers, 60%/20%/20% bps distro.
+ // 10% overshoot on all layers.
+ current_input_allocation_.SetBitrate(0, 0, 180000);
+ current_input_allocation_.SetBitrate(0, 1, 60000);
+ current_input_allocation_.SetBitrate(0, 2, 60000);
+ target_framerate_fps_ = 30;
+ SetUpAdjuster(1, 3, false);
+ InsertFrames({{1.1, 1.1, 1.1}}, kWindowSizeMs);
+ current_adjusted_allocation_ =
+ adjuster_->AdjustRateAllocation(VideoEncoder::RateControlParameters(
+ current_input_allocation_, target_framerate_fps_));
+ // Adjusted allocation lowered by 10%.
+ ExpectNear(MultiplyAllocation(current_input_allocation_, 1 / 1.1),
+ current_adjusted_allocation_, 0.01);
+}
+
+TEST_F(EncoderBitrateAdjusterTest, ThreeTemporalLayersUndershoot) {
+ // Three temporal layers, 60%/20%/20% bps distro, undershoot all layers.
+ current_input_allocation_.SetBitrate(0, 0, 180000);
+ current_input_allocation_.SetBitrate(0, 1, 60000);
+ current_input_allocation_.SetBitrate(0, 2, 60000);
+ target_framerate_fps_ = 30;
+ SetUpAdjuster(1, 3, false);
+ InsertFrames({{0.8, 0.8, 0.8}}, kWindowSizeMs);
+ current_adjusted_allocation_ =
+ adjuster_->AdjustRateAllocation(VideoEncoder::RateControlParameters(
+ current_input_allocation_, target_framerate_fps_));
+ // Adjusted allocation identical since we don't boost bitrates.
+ ExpectNear(current_input_allocation_, current_adjusted_allocation_, 0.0);
+}
+
+TEST_F(EncoderBitrateAdjusterTest, ThreeTemporalLayersSkewedOvershoot) {
+ // Three temporal layers, 60%/20%/20% bps distro.
+ // 10% overshoot on base layer, 20% on higher layers.
+ current_input_allocation_.SetBitrate(0, 0, 180000);
+ current_input_allocation_.SetBitrate(0, 1, 60000);
+ current_input_allocation_.SetBitrate(0, 2, 60000);
+ target_framerate_fps_ = 30;
+ SetUpAdjuster(1, 3, false);
+ InsertFrames({{1.1, 1.2, 1.2}}, kWindowSizeMs);
+ current_adjusted_allocation_ =
+ adjuster_->AdjustRateAllocation(VideoEncoder::RateControlParameters(
+ current_input_allocation_, target_framerate_fps_));
+ // Expected overshoot is weighted by bitrate:
+ // (0.6 * 1.1 + 0.2 * 1.2 + 0.2 * 1.2) = 1.14
+ ExpectNear(MultiplyAllocation(current_input_allocation_, 1 / 1.14),
+ current_adjusted_allocation_, 0.01);
+}
+
+TEST_F(EncoderBitrateAdjusterTest, ThreeTemporalLayersNonLayeredEncoder) {
+ // Three temporal layers, 60%/20%/20% bps allocation, 10% overshoot,
+ // encoder does not actually support temporal layers.
+ current_input_allocation_.SetBitrate(0, 0, 180000);
+ current_input_allocation_.SetBitrate(0, 1, 60000);
+ current_input_allocation_.SetBitrate(0, 2, 60000);
+ target_framerate_fps_ = 30;
+ SetUpAdjuster(1, 1, false);
+ InsertFrames({{1.1}}, kWindowSizeMs);
+ current_adjusted_allocation_ =
+ adjuster_->AdjustRateAllocation(VideoEncoder::RateControlParameters(
+ current_input_allocation_, target_framerate_fps_));
+ // Expect the actual 10% overuse to be detected and the allocation to
+ // only contain the one entry.
+ VideoBitrateAllocation expected_allocation;
+ expected_allocation.SetBitrate(
+ 0, 0,
+ static_cast<uint32_t>(current_input_allocation_.get_sum_bps() / 1.10));
+ ExpectNear(expected_allocation, current_adjusted_allocation_, 0.01);
+}
+
+TEST_F(EncoderBitrateAdjusterTest, IgnoredStream) {
+ // Encoder with three temporal layers, but in a mode that does not support
+ // deterministic frame rate. Those are ignored, even if bitrate overshoots.
+ current_input_allocation_.SetBitrate(0, 0, 180000);
+ current_input_allocation_.SetBitrate(0, 1, 60000);
+ target_framerate_fps_ = 30;
+ SetUpAdjuster(1, 1, false);
+ encoder_info_.fps_allocation[0].clear();
+ adjuster_->OnEncoderInfo(encoder_info_);
+
+ InsertFrames({{1.1}}, kWindowSizeMs);
+ current_adjusted_allocation_ =
+ adjuster_->AdjustRateAllocation(VideoEncoder::RateControlParameters(
+ current_input_allocation_, target_framerate_fps_));
+
+ // Values passed through.
+ ExpectNear(current_input_allocation_, current_adjusted_allocation_, 0.00);
+}
+
+TEST_F(EncoderBitrateAdjusterTest, DifferentSpatialOvershoots) {
+ // Two streams, both with three temporal layers.
+ // S0 has 5% overshoot, S1 has 25% overshoot.
+ current_input_allocation_.SetBitrate(0, 0, 180000);
+ current_input_allocation_.SetBitrate(0, 1, 60000);
+ current_input_allocation_.SetBitrate(0, 2, 60000);
+ current_input_allocation_.SetBitrate(1, 0, 400000);
+ current_input_allocation_.SetBitrate(1, 1, 150000);
+ current_input_allocation_.SetBitrate(1, 2, 150000);
+ target_framerate_fps_ = 30;
+ // Run twice, once configured as simulcast and once as VP9 SVC.
+ for (int i = 0; i < 2; ++i) {
+ SetUpAdjuster(2, 3, i == 0);
+ InsertFrames({{1.05, 1.05, 1.05}, {1.25, 1.25, 1.25}}, kWindowSizeMs);
+ current_adjusted_allocation_ =
+ adjuster_->AdjustRateAllocation(VideoEncoder::RateControlParameters(
+ current_input_allocation_, target_framerate_fps_));
+ VideoBitrateAllocation expected_allocation;
+ for (size_t ti = 0; ti < 3; ++ti) {
+ expected_allocation.SetBitrate(
+ 0, ti,
+ static_cast<uint32_t>(current_input_allocation_.GetBitrate(0, ti) /
+ 1.05));
+ expected_allocation.SetBitrate(
+ 1, ti,
+ static_cast<uint32_t>(current_input_allocation_.GetBitrate(1, ti) /
+ 1.25));
+ }
+ ExpectNear(expected_allocation, current_adjusted_allocation_, 0.01);
+ }
+}
+
+TEST_F(EncoderBitrateAdjusterTest, HeadroomAllowsOvershootToMediaRate) {
+ // Two streams, both with three temporal layers.
+ // Media rate is 1.0, but network rate is higher.
+ ScopedFieldTrials field_trial(
+ "WebRTC-VideoRateControl/adjuster_use_headroom:true/");
+
+ const uint32_t kS0Bitrate = 300000;
+ const uint32_t kS1Bitrate = 900000;
+ current_input_allocation_.SetBitrate(0, 0, kS0Bitrate / 3);
+ current_input_allocation_.SetBitrate(0, 1, kS0Bitrate / 3);
+ current_input_allocation_.SetBitrate(0, 2, kS0Bitrate / 3);
+ current_input_allocation_.SetBitrate(1, 0, kS1Bitrate / 3);
+ current_input_allocation_.SetBitrate(1, 1, kS1Bitrate / 3);
+ current_input_allocation_.SetBitrate(1, 2, kS1Bitrate / 3);
+
+ target_framerate_fps_ = 30;
+
+ // Run twice, once configured as simulcast and once as VP9 SVC.
+ for (int i = 0; i < 2; ++i) {
+ SetUpAdjuster(2, 3, i == 0);
+ // Network rate has 10% overshoot, but media rate is correct at 1.0.
+ InsertFrames({{1.0, 1.0, 1.0}, {1.0, 1.0, 1.0}},
+ {{1.1, 1.1, 1.1}, {1.1, 1.1, 1.1}},
+ kWindowSizeMs * kSequenceLength);
+
+ // Push back by 10%.
+ current_adjusted_allocation_ =
+ adjuster_->AdjustRateAllocation(VideoEncoder::RateControlParameters(
+ current_input_allocation_, target_framerate_fps_));
+ ExpectNear(MultiplyAllocation(current_input_allocation_, 1 / 1.1),
+ current_adjusted_allocation_, 0.01);
+
+ // Add 10% link headroom, overshoot is now allowed.
+ current_adjusted_allocation_ =
+ adjuster_->AdjustRateAllocation(VideoEncoder::RateControlParameters(
+ current_input_allocation_, target_framerate_fps_,
+ DataRate::BitsPerSec(current_input_allocation_.get_sum_bps() *
+ 1.1)));
+ ExpectNear(current_input_allocation_, current_adjusted_allocation_, 0.01);
+ }
+}
+
+TEST_F(EncoderBitrateAdjusterTest, DontExceedMediaRateEvenWithHeadroom) {
+ // Two streams, both with three temporal layers.
+ // Media rate is 1.1, but network rate is higher.
+ ScopedFieldTrials field_trial(
+ "WebRTC-VideoRateControl/adjuster_use_headroom:true/");
+
+ const uint32_t kS0Bitrate = 300000;
+ const uint32_t kS1Bitrate = 900000;
+ current_input_allocation_.SetBitrate(0, 0, kS0Bitrate / 3);
+ current_input_allocation_.SetBitrate(0, 1, kS0Bitrate / 3);
+ current_input_allocation_.SetBitrate(0, 2, kS0Bitrate / 3);
+ current_input_allocation_.SetBitrate(1, 0, kS1Bitrate / 3);
+ current_input_allocation_.SetBitrate(1, 1, kS1Bitrate / 3);
+ current_input_allocation_.SetBitrate(1, 2, kS1Bitrate / 3);
+
+ target_framerate_fps_ = 30;
+
+ // Run twice, once configured as simulcast and once as VP9 SVC.
+ for (int i = 0; i < 2; ++i) {
+ SetUpAdjuster(2, 3, i == 0);
+ // Network rate has 30% overshoot, media rate has 10% overshoot.
+ InsertFrames({{1.1, 1.1, 1.1}, {1.1, 1.1, 1.1}},
+ {{1.3, 1.3, 1.3}, {1.3, 1.3, 1.3}},
+ kWindowSizeMs * kSequenceLength);
+
+ // Push back by 30%.
+ current_adjusted_allocation_ =
+ adjuster_->AdjustRateAllocation(VideoEncoder::RateControlParameters(
+ current_input_allocation_, target_framerate_fps_));
+ // The up-down causes a bit more noise, allow slightly more error margin.
+ ExpectNear(MultiplyAllocation(current_input_allocation_, 1 / 1.3),
+ current_adjusted_allocation_, 0.015);
+
+ // Add 100% link headroom, overshoot from network to media rate is allowed.
+ current_adjusted_allocation_ =
+ adjuster_->AdjustRateAllocation(VideoEncoder::RateControlParameters(
+ current_input_allocation_, target_framerate_fps_,
+ DataRate::BitsPerSec(current_input_allocation_.get_sum_bps() * 2)));
+ ExpectNear(MultiplyAllocation(current_input_allocation_, 1 / 1.1),
+ current_adjusted_allocation_, 0.015);
+ }
+}
+
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/video/encoder_overshoot_detector.cc b/third_party/libwebrtc/video/encoder_overshoot_detector.cc
new file mode 100644
index 0000000000..80b2ec12b0
--- /dev/null
+++ b/third_party/libwebrtc/video/encoder_overshoot_detector.cc
@@ -0,0 +1,204 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video/encoder_overshoot_detector.h"
+
+#include <algorithm>
+
+namespace webrtc {
+namespace {
+// The buffer level for media-rate utilization is allowed to go below zero,
+// down to
+// -(`kMaxMediaUnderrunFrames` / `target_framerate_fps_`) * `target_bitrate_`.
+static constexpr double kMaxMediaUnderrunFrames = 5.0;
+} // namespace
+
+EncoderOvershootDetector::EncoderOvershootDetector(int64_t window_size_ms)
+ : window_size_ms_(window_size_ms),
+ time_last_update_ms_(-1),
+ sum_network_utilization_factors_(0.0),
+ sum_media_utilization_factors_(0.0),
+ target_bitrate_(DataRate::Zero()),
+ target_framerate_fps_(0),
+ network_buffer_level_bits_(0),
+ media_buffer_level_bits_(0) {}
+
+EncoderOvershootDetector::~EncoderOvershootDetector() = default;
+
+void EncoderOvershootDetector::SetTargetRate(DataRate target_bitrate,
+ double target_framerate_fps,
+ int64_t time_ms) {
+ // First leak bits according to the previous target rate.
+ if (target_bitrate_ != DataRate::Zero()) {
+ LeakBits(time_ms);
+ } else if (target_bitrate != DataRate::Zero()) {
+ // Stream was just enabled, reset state.
+ time_last_update_ms_ = time_ms;
+ utilization_factors_.clear();
+ sum_network_utilization_factors_ = 0.0;
+ sum_media_utilization_factors_ = 0.0;
+ network_buffer_level_bits_ = 0;
+ media_buffer_level_bits_ = 0;
+ }
+
+ target_bitrate_ = target_bitrate;
+ target_framerate_fps_ = target_framerate_fps;
+}
+
+void EncoderOvershootDetector::OnEncodedFrame(size_t bytes, int64_t time_ms) {
+ // Leak bits from the virtual pacer buffer, according to the current target
+ // bitrate.
+ LeakBits(time_ms);
+
+ // Ideal size of a frame given the current rates.
+ const int64_t ideal_frame_size_bits = IdealFrameSizeBits();
+ if (ideal_frame_size_bits == 0) {
+ // Frame without updated bitrate and/or framerate, ignore it.
+ return;
+ }
+
+ const double network_utilization_factor = HandleEncodedFrame(
+ bytes * 8, ideal_frame_size_bits, time_ms, &network_buffer_level_bits_);
+ const double media_utilization_factor = HandleEncodedFrame(
+ bytes * 8, ideal_frame_size_bits, time_ms, &media_buffer_level_bits_);
+
+ sum_network_utilization_factors_ += network_utilization_factor;
+ sum_media_utilization_factors_ += media_utilization_factor;
+
+ utilization_factors_.emplace_back(network_utilization_factor,
+ media_utilization_factor, time_ms);
+}
+
+double EncoderOvershootDetector::HandleEncodedFrame(
+ size_t frame_size_bits,
+ int64_t ideal_frame_size_bits,
+ int64_t time_ms,
+ int64_t* buffer_level_bits) const {
+ // Add new frame to the buffer level. If doing so exceeds the ideal buffer
+ // size, penalize this frame but cap overshoot to current buffer level rather
+ // than size of this frame. This is done so that a single large frame is not
+ // penalized if the encoder afterwards compensates by dropping frames and/or
+ // reducing frame size. If however a large frame is followed by more data,
+ // we cannot pace that next frame out within one frame space.
+ const int64_t bitsum = frame_size_bits + *buffer_level_bits;
+ int64_t overshoot_bits = 0;
+ if (bitsum > ideal_frame_size_bits) {
+ overshoot_bits =
+ std::min(*buffer_level_bits, bitsum - ideal_frame_size_bits);
+ }
+
+ // Add entry for the (over) utilization for this frame. Factor is capped
+ // at 1.0 so that we don't risk overshooting on sudden changes.
+ double utilization_factor;
+ if (utilization_factors_.empty()) {
+ // First frame, cannot estimate overshoot based on previous one so
+ // for this particular frame, just like as size vs optimal size.
+ utilization_factor = std::max(
+ 1.0, static_cast<double>(frame_size_bits) / ideal_frame_size_bits);
+ } else {
+ utilization_factor =
+ 1.0 + (static_cast<double>(overshoot_bits) / ideal_frame_size_bits);
+ }
+
+ // Remove the overshot bits from the virtual buffer so we don't penalize
+ // those bits multiple times.
+ *buffer_level_bits -= overshoot_bits;
+ *buffer_level_bits += frame_size_bits;
+
+ return utilization_factor;
+}
+
+absl::optional<double>
+EncoderOvershootDetector::GetNetworkRateUtilizationFactor(int64_t time_ms) {
+ CullOldUpdates(time_ms);
+
+ // No data points within window, return.
+ if (utilization_factors_.empty()) {
+ return absl::nullopt;
+ }
+
+ // TODO(sprang): Consider changing from arithmetic mean to some other
+ // function such as 90th percentile.
+ return sum_network_utilization_factors_ / utilization_factors_.size();
+}
+
+absl::optional<double> EncoderOvershootDetector::GetMediaRateUtilizationFactor(
+ int64_t time_ms) {
+ CullOldUpdates(time_ms);
+
+ // No data points within window, return.
+ if (utilization_factors_.empty()) {
+ return absl::nullopt;
+ }
+
+ return sum_media_utilization_factors_ / utilization_factors_.size();
+}
+
+void EncoderOvershootDetector::Reset() {
+ time_last_update_ms_ = -1;
+ utilization_factors_.clear();
+ target_bitrate_ = DataRate::Zero();
+ sum_network_utilization_factors_ = 0.0;
+ sum_media_utilization_factors_ = 0.0;
+ target_framerate_fps_ = 0.0;
+ network_buffer_level_bits_ = 0;
+ media_buffer_level_bits_ = 0;
+}
+
+int64_t EncoderOvershootDetector::IdealFrameSizeBits() const {
+ if (target_framerate_fps_ <= 0 || target_bitrate_ == DataRate::Zero()) {
+ return 0;
+ }
+
+ // Current ideal frame size, based on the current target bitrate.
+ return static_cast<int64_t>(
+ (target_bitrate_.bps() + target_framerate_fps_ / 2) /
+ target_framerate_fps_);
+}
+
+void EncoderOvershootDetector::LeakBits(int64_t time_ms) {
+ if (time_last_update_ms_ != -1 && target_bitrate_ > DataRate::Zero()) {
+ int64_t time_delta_ms = time_ms - time_last_update_ms_;
+ // Leak bits according to the current target bitrate.
+ const int64_t leaked_bits = (target_bitrate_.bps() * time_delta_ms) / 1000;
+
+ // Network buffer may not go below zero.
+ network_buffer_level_bits_ =
+ std::max<int64_t>(0, network_buffer_level_bits_ - leaked_bits);
+
+ // Media buffer my go down to minus `kMaxMediaUnderrunFrames` frames worth
+ // of data.
+ const double max_underrun_seconds =
+ std::min(kMaxMediaUnderrunFrames, target_framerate_fps_) /
+ target_framerate_fps_;
+ media_buffer_level_bits_ = std::max<int64_t>(
+ -max_underrun_seconds * target_bitrate_.bps<int64_t>(),
+ media_buffer_level_bits_ - leaked_bits);
+ }
+ time_last_update_ms_ = time_ms;
+}
+
+void EncoderOvershootDetector::CullOldUpdates(int64_t time_ms) {
+ // Cull old data points.
+ const int64_t cutoff_time_ms = time_ms - window_size_ms_;
+ while (!utilization_factors_.empty() &&
+ utilization_factors_.front().update_time_ms < cutoff_time_ms) {
+ // Make sure sum is never allowed to become negative due rounding errors.
+ sum_network_utilization_factors_ = std::max(
+ 0.0, sum_network_utilization_factors_ -
+ utilization_factors_.front().network_utilization_factor);
+ sum_media_utilization_factors_ = std::max(
+ 0.0, sum_media_utilization_factors_ -
+ utilization_factors_.front().media_utilization_factor);
+ utilization_factors_.pop_front();
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/video/encoder_overshoot_detector.h b/third_party/libwebrtc/video/encoder_overshoot_detector.h
new file mode 100644
index 0000000000..1f8908e54f
--- /dev/null
+++ b/third_party/libwebrtc/video/encoder_overshoot_detector.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VIDEO_ENCODER_OVERSHOOT_DETECTOR_H_
+#define VIDEO_ENCODER_OVERSHOOT_DETECTOR_H_
+
+#include <deque>
+
+#include "absl/types/optional.h"
+#include "api/units/data_rate.h"
+
+namespace webrtc {
+
+class EncoderOvershootDetector {
+ public:
+ explicit EncoderOvershootDetector(int64_t window_size_ms);
+ ~EncoderOvershootDetector();
+
+ void SetTargetRate(DataRate target_bitrate,
+ double target_framerate_fps,
+ int64_t time_ms);
+ // A frame has been encoded or dropped. `bytes` == 0 indicates a drop.
+ void OnEncodedFrame(size_t bytes, int64_t time_ms);
+ // This utilization factor reaches 1.0 only if the encoder produces encoded
+ // frame in such a way that they can be sent onto the network at
+ // `target_bitrate` without building growing queues.
+ absl::optional<double> GetNetworkRateUtilizationFactor(int64_t time_ms);
+ // This utilization factor is based just on actual encoded frame sizes in
+ // relation to ideal sizes. An undershoot may be compensated by an
+ // overshoot so that the average over time is close to `target_bitrate`.
+ absl::optional<double> GetMediaRateUtilizationFactor(int64_t time_ms);
+ void Reset();
+
+ private:
+ int64_t IdealFrameSizeBits() const;
+ void LeakBits(int64_t time_ms);
+ void CullOldUpdates(int64_t time_ms);
+ // Updates provided buffer and checks if overuse ensues, returns
+ // the calculated utilization factor for this frame.
+ double HandleEncodedFrame(size_t frame_size_bits,
+ int64_t ideal_frame_size_bits,
+ int64_t time_ms,
+ int64_t* buffer_level_bits) const;
+
+ const int64_t window_size_ms_;
+ int64_t time_last_update_ms_;
+ struct BitrateUpdate {
+ BitrateUpdate(double network_utilization_factor,
+ double media_utilization_factor,
+ int64_t update_time_ms)
+ : network_utilization_factor(network_utilization_factor),
+ media_utilization_factor(media_utilization_factor),
+ update_time_ms(update_time_ms) {}
+ // The utilization factor based on strict network rate.
+ double network_utilization_factor;
+ // The utilization based on average media rate.
+ double media_utilization_factor;
+ int64_t update_time_ms;
+ };
+ std::deque<BitrateUpdate> utilization_factors_;
+ double sum_network_utilization_factors_;
+ double sum_media_utilization_factors_;
+ DataRate target_bitrate_;
+ double target_framerate_fps_;
+ int64_t network_buffer_level_bits_;
+ int64_t media_buffer_level_bits_;
+};
+
+} // namespace webrtc
+
+#endif // VIDEO_ENCODER_OVERSHOOT_DETECTOR_H_
diff --git a/third_party/libwebrtc/video/encoder_overshoot_detector_unittest.cc b/third_party/libwebrtc/video/encoder_overshoot_detector_unittest.cc
new file mode 100644
index 0000000000..a3c44eb013
--- /dev/null
+++ b/third_party/libwebrtc/video/encoder_overshoot_detector_unittest.cc
@@ -0,0 +1,166 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video/encoder_overshoot_detector.h"
+
+#include "api/units/data_rate.h"
+#include "rtc_base/fake_clock.h"
+#include "rtc_base/time_utils.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+class EncoderOvershootDetectorTest : public ::testing::Test {
+ public:
+ static constexpr int kDefaultBitrateBps = 300000;
+ static constexpr double kDefaultFrameRateFps = 15;
+ EncoderOvershootDetectorTest()
+ : detector_(kWindowSizeMs),
+ target_bitrate_(DataRate::BitsPerSec(kDefaultBitrateBps)),
+ target_framerate_fps_(kDefaultFrameRateFps) {}
+
+ protected:
+ void RunConstantUtilizationTest(double actual_utilization_factor,
+ double expected_utilization_factor,
+ double allowed_error,
+ int64_t test_duration_ms) {
+ const int frame_size_bytes =
+ static_cast<int>(actual_utilization_factor *
+ (target_bitrate_.bps() / target_framerate_fps_) / 8);
+ detector_.SetTargetRate(target_bitrate_, target_framerate_fps_,
+ rtc::TimeMillis());
+
+ if (rtc::TimeMillis() == 0) {
+ // Encode a first frame which by definition has no overuse factor.
+ detector_.OnEncodedFrame(frame_size_bytes, rtc::TimeMillis());
+ clock_.AdvanceTime(TimeDelta::Seconds(1) / target_framerate_fps_);
+ }
+
+ int64_t runtime_us = 0;
+ while (runtime_us < test_duration_ms * 1000) {
+ detector_.OnEncodedFrame(frame_size_bytes, rtc::TimeMillis());
+ runtime_us += rtc::kNumMicrosecsPerSec / target_framerate_fps_;
+ clock_.AdvanceTime(TimeDelta::Seconds(1) / target_framerate_fps_);
+ }
+
+ // At constant utilization, both network and media utilization should be
+ // close to expected.
+ const absl::optional<double> network_utilization_factor =
+ detector_.GetNetworkRateUtilizationFactor(rtc::TimeMillis());
+ EXPECT_NEAR(network_utilization_factor.value_or(-1),
+ expected_utilization_factor, allowed_error);
+
+ const absl::optional<double> media_utilization_factor =
+ detector_.GetMediaRateUtilizationFactor(rtc::TimeMillis());
+ EXPECT_NEAR(media_utilization_factor.value_or(-1),
+ expected_utilization_factor, allowed_error);
+ }
+
+ static constexpr int64_t kWindowSizeMs = 3000;
+ EncoderOvershootDetector detector_;
+ rtc::ScopedFakeClock clock_;
+ DataRate target_bitrate_;
+ double target_framerate_fps_;
+};
+
+TEST_F(EncoderOvershootDetectorTest, NoUtilizationIfNoRate) {
+ const int frame_size_bytes = 1000;
+ const int64_t time_interval_ms = 33;
+ detector_.SetTargetRate(target_bitrate_, target_framerate_fps_,
+ rtc::TimeMillis());
+
+ // No data points, can't determine overshoot rate.
+ EXPECT_FALSE(
+ detector_.GetNetworkRateUtilizationFactor(rtc::TimeMillis()).has_value());
+
+ detector_.OnEncodedFrame(frame_size_bytes, rtc::TimeMillis());
+ clock_.AdvanceTime(TimeDelta::Millis(time_interval_ms));
+ EXPECT_TRUE(
+ detector_.GetNetworkRateUtilizationFactor(rtc::TimeMillis()).has_value());
+}
+
+TEST_F(EncoderOvershootDetectorTest, OptimalSize) {
+ // Optimally behaved encoder.
+ // Allow some error margin due to rounding errors, eg due to frame
+ // interval not being an integer.
+ RunConstantUtilizationTest(1.0, 1.0, 0.01, kWindowSizeMs);
+}
+
+TEST_F(EncoderOvershootDetectorTest, Undershoot) {
+ // Undershoot, reported utilization factor should be capped to 1.0 so
+ // that we don't incorrectly boost encoder bitrate during movement.
+ RunConstantUtilizationTest(0.5, 1.0, 0.00, kWindowSizeMs);
+}
+
+TEST_F(EncoderOvershootDetectorTest, Overshoot) {
+ // Overshoot by 20%.
+ // Allow some error margin due to rounding errors.
+ RunConstantUtilizationTest(1.2, 1.2, 0.01, kWindowSizeMs);
+}
+
+TEST_F(EncoderOvershootDetectorTest, ConstantOvershootVaryingRates) {
+ // Overshoot by 20%, but vary framerate and bitrate.
+ // Allow some error margin due to rounding errors.
+ RunConstantUtilizationTest(1.2, 1.2, 0.01, kWindowSizeMs);
+ target_framerate_fps_ /= 2;
+ RunConstantUtilizationTest(1.2, 1.2, 0.01, kWindowSizeMs / 2);
+ target_bitrate_ = DataRate::BitsPerSec(target_bitrate_.bps() / 2);
+ RunConstantUtilizationTest(1.2, 1.2, 0.01, kWindowSizeMs / 2);
+}
+
+TEST_F(EncoderOvershootDetectorTest, ConstantRateVaryingOvershoot) {
+ // Overshoot by 10%, keep framerate and bitrate constant.
+ // Allow some error margin due to rounding errors.
+ RunConstantUtilizationTest(1.1, 1.1, 0.01, kWindowSizeMs);
+ // Change overshoot to 20%, run for half window and expect overshoot
+ // to be 15%.
+ RunConstantUtilizationTest(1.2, 1.15, 0.01, kWindowSizeMs / 2);
+ // Keep running at 20% overshoot, after window is full that should now
+ // be the reported overshoot.
+ RunConstantUtilizationTest(1.2, 1.2, 0.01, kWindowSizeMs / 2);
+}
+
+TEST_F(EncoderOvershootDetectorTest, PartialOvershoot) {
+ const int ideal_frame_size_bytes =
+ (target_bitrate_.bps() / target_framerate_fps_) / 8;
+ detector_.SetTargetRate(target_bitrate_, target_framerate_fps_,
+ rtc::TimeMillis());
+
+ // Test scenario with average bitrate matching the target bitrate, but
+ // with some utilization factor penalty as the frames can't be paced out
+ // on the network at the target rate.
+ // Insert a series of four frames:
+ // 1) 20% overshoot, not penalized as buffer if empty.
+ // 2) 20% overshoot, the 20% overshoot from the first frame is penalized.
+ // 3) 20% undershoot, negating the overshoot from the last frame.
+ // 4) 20% undershoot, no penalty.
+ // On average then utilization penalty is thus 5%.
+
+ int64_t runtime_us = 0;
+ int i = 0;
+ while (runtime_us < kWindowSizeMs * rtc::kNumMicrosecsPerMillisec) {
+ runtime_us += rtc::kNumMicrosecsPerSec / target_framerate_fps_;
+ clock_.AdvanceTime(TimeDelta::Seconds(1) / target_framerate_fps_);
+ int frame_size_bytes = (i++ % 4 < 2) ? (ideal_frame_size_bytes * 120) / 100
+ : (ideal_frame_size_bytes * 80) / 100;
+ detector_.OnEncodedFrame(frame_size_bytes, rtc::TimeMillis());
+ }
+
+ // Expect 5% overshoot for network rate, see above.
+ const absl::optional<double> network_utilization_factor =
+ detector_.GetNetworkRateUtilizationFactor(rtc::TimeMillis());
+ EXPECT_NEAR(network_utilization_factor.value_or(-1), 1.05, 0.01);
+
+ // Expect media rate to be on average correct.
+ const absl::optional<double> media_utilization_factor =
+ detector_.GetMediaRateUtilizationFactor(rtc::TimeMillis());
+ EXPECT_NEAR(media_utilization_factor.value_or(-1), 1.00, 0.01);
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/video/encoder_rtcp_feedback.cc b/third_party/libwebrtc/video/encoder_rtcp_feedback.cc
new file mode 100644
index 0000000000..ebba41e807
--- /dev/null
+++ b/third_party/libwebrtc/video/encoder_rtcp_feedback.cc
@@ -0,0 +1,139 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video/encoder_rtcp_feedback.h"
+
+#include <algorithm>
+#include <utility>
+
+#include "absl/types/optional.h"
+#include "api/video_codecs/video_encoder.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/experiments/keyframe_interval_settings.h"
+
+namespace webrtc {
+
+namespace {
+constexpr int kMinKeyframeSendIntervalMs = 300;
+} // namespace
+
+EncoderRtcpFeedback::EncoderRtcpFeedback(
+ Clock* clock,
+ const std::vector<uint32_t>& ssrcs,
+ VideoStreamEncoderInterface* encoder,
+ std::function<std::vector<RtpSequenceNumberMap::Info>(
+ uint32_t ssrc,
+ const std::vector<uint16_t>& seq_nums)> get_packet_infos)
+ : clock_(clock),
+ ssrcs_(ssrcs),
+ get_packet_infos_(std::move(get_packet_infos)),
+ video_stream_encoder_(encoder),
+ time_last_packet_delivery_queue_(Timestamp::Zero()),
+ min_keyframe_send_interval_(
+ TimeDelta::Millis(KeyframeIntervalSettings::ParseFromFieldTrials()
+ .MinKeyframeSendIntervalMs()
+ .value_or(kMinKeyframeSendIntervalMs))) {
+ RTC_DCHECK(!ssrcs.empty());
+ packet_delivery_queue_.Detach();
+}
+
+// Called via Call::DeliverRtcp.
+void EncoderRtcpFeedback::OnReceivedIntraFrameRequest(uint32_t ssrc) {
+ RTC_DCHECK_RUN_ON(&packet_delivery_queue_);
+ RTC_DCHECK(std::find(ssrcs_.begin(), ssrcs_.end(), ssrc) != ssrcs_.end());
+
+ const Timestamp now = clock_->CurrentTime();
+ if (time_last_packet_delivery_queue_ + min_keyframe_send_interval_ > now)
+ return;
+
+ time_last_packet_delivery_queue_ = now;
+
+ // Always produce key frame for all streams.
+ video_stream_encoder_->SendKeyFrame();
+}
+
+void EncoderRtcpFeedback::OnReceivedLossNotification(
+ uint32_t ssrc,
+ uint16_t seq_num_of_last_decodable,
+ uint16_t seq_num_of_last_received,
+ bool decodability_flag) {
+ RTC_DCHECK(get_packet_infos_) << "Object initialization incomplete.";
+
+ const std::vector<uint16_t> seq_nums = {seq_num_of_last_decodable,
+ seq_num_of_last_received};
+ const std::vector<RtpSequenceNumberMap::Info> infos =
+ get_packet_infos_(ssrc, seq_nums);
+ if (infos.empty()) {
+ return;
+ }
+ RTC_DCHECK_EQ(infos.size(), 2u);
+
+ const RtpSequenceNumberMap::Info& last_decodable = infos[0];
+ const RtpSequenceNumberMap::Info& last_received = infos[1];
+
+ VideoEncoder::LossNotification loss_notification;
+ loss_notification.timestamp_of_last_decodable = last_decodable.timestamp;
+ loss_notification.timestamp_of_last_received = last_received.timestamp;
+
+ // Deduce decodability of the last received frame and of its dependencies.
+ if (last_received.is_first && last_received.is_last) {
+ // The frame consists of a single packet, and that packet has evidently
+ // been received in full; the frame is therefore assemblable.
+ // In this case, the decodability of the dependencies is communicated by
+ // the decodability flag, and the frame itself is decodable if and only
+ // if they are decodable.
+ loss_notification.dependencies_of_last_received_decodable =
+ decodability_flag;
+ loss_notification.last_received_decodable = decodability_flag;
+ } else if (last_received.is_first && !last_received.is_last) {
+ // In this case, the decodability flag communicates the decodability of
+ // the dependencies. If any is undecodable, we also know that the frame
+ // itself will not be decodable; if all are decodable, the frame's own
+ // decodability will remain unknown, as not all of its packets have
+ // been received.
+ loss_notification.dependencies_of_last_received_decodable =
+ decodability_flag;
+ loss_notification.last_received_decodable =
+ !decodability_flag ? absl::make_optional(false) : absl::nullopt;
+ } else if (!last_received.is_first && last_received.is_last) {
+ if (decodability_flag) {
+ // The frame has been received in full, and found to be decodable.
+ // (Messages of this type are not sent by WebRTC at the moment, but are
+ // theoretically possible, for example for serving as acks.)
+ loss_notification.dependencies_of_last_received_decodable = true;
+ loss_notification.last_received_decodable = true;
+ } else {
+ // It is impossible to tell whether some dependencies were undecodable,
+ // or whether the frame was unassemblable, but in either case, the frame
+ // itself was undecodable.
+ loss_notification.dependencies_of_last_received_decodable = absl::nullopt;
+ loss_notification.last_received_decodable = false;
+ }
+ } else { // !last_received.is_first && !last_received.is_last
+ if (decodability_flag) {
+ // The frame has not yet been received in full, but no gaps have
+ // been encountered so far, and the dependencies were all decodable.
+ // (Messages of this type are not sent by WebRTC at the moment, but are
+ // theoretically possible, for example for serving as acks.)
+ loss_notification.dependencies_of_last_received_decodable = true;
+ loss_notification.last_received_decodable = absl::nullopt;
+ } else {
+ // It is impossible to tell whether some dependencies were undecodable,
+ // or whether the frame was unassemblable, but in either case, the frame
+ // itself was undecodable.
+ loss_notification.dependencies_of_last_received_decodable = absl::nullopt;
+ loss_notification.last_received_decodable = false;
+ }
+ }
+
+ video_stream_encoder_->OnLossNotification(loss_notification);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/video/encoder_rtcp_feedback.h b/third_party/libwebrtc/video/encoder_rtcp_feedback.h
new file mode 100644
index 0000000000..c66a94503e
--- /dev/null
+++ b/third_party/libwebrtc/video/encoder_rtcp_feedback.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#ifndef VIDEO_ENCODER_RTCP_FEEDBACK_H_
+#define VIDEO_ENCODER_RTCP_FEEDBACK_H_
+
+#include <functional>
+#include <vector>
+
+#include "api/sequence_checker.h"
+#include "api/units/time_delta.h"
+#include "api/units/timestamp.h"
+#include "call/rtp_video_sender_interface.h"
+#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
+#include "rtc_base/system/no_unique_address.h"
+#include "system_wrappers/include/clock.h"
+#include "video/video_stream_encoder_interface.h"
+
+namespace webrtc {
+
+class VideoStreamEncoderInterface;
+
+// This class passes feedback (such as key frame requests or loss notifications)
+// from the RtpRtcp module.
+class EncoderRtcpFeedback : public RtcpIntraFrameObserver,
+ public RtcpLossNotificationObserver {
+ public:
+ EncoderRtcpFeedback(
+ Clock* clock,
+ const std::vector<uint32_t>& ssrcs,
+ VideoStreamEncoderInterface* encoder,
+ std::function<std::vector<RtpSequenceNumberMap::Info>(
+ uint32_t ssrc,
+ const std::vector<uint16_t>& seq_nums)> get_packet_infos);
+ ~EncoderRtcpFeedback() override = default;
+
+ void OnReceivedIntraFrameRequest(uint32_t ssrc) override;
+
+ // Implements RtcpLossNotificationObserver.
+ void OnReceivedLossNotification(uint32_t ssrc,
+ uint16_t seq_num_of_last_decodable,
+ uint16_t seq_num_of_last_received,
+ bool decodability_flag) override;
+
+ private:
+ Clock* const clock_;
+ const std::vector<uint32_t> ssrcs_;
+ const std::function<std::vector<RtpSequenceNumberMap::Info>(
+ uint32_t ssrc,
+ const std::vector<uint16_t>& seq_nums)>
+ get_packet_infos_;
+ VideoStreamEncoderInterface* const video_stream_encoder_;
+
+ RTC_NO_UNIQUE_ADDRESS SequenceChecker packet_delivery_queue_;
+ Timestamp time_last_packet_delivery_queue_
+ RTC_GUARDED_BY(packet_delivery_queue_);
+
+ const TimeDelta min_keyframe_send_interval_;
+};
+
+} // namespace webrtc
+
+#endif // VIDEO_ENCODER_RTCP_FEEDBACK_H_
diff --git a/third_party/libwebrtc/video/encoder_rtcp_feedback_unittest.cc b/third_party/libwebrtc/video/encoder_rtcp_feedback_unittest.cc
new file mode 100644
index 0000000000..f1ac65d48f
--- /dev/null
+++ b/third_party/libwebrtc/video/encoder_rtcp_feedback_unittest.cc
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video/encoder_rtcp_feedback.h"
+
+#include <memory>
+
+#include "test/gmock.h"
+#include "test/gtest.h"
+#include "video/test/mock_video_stream_encoder.h"
+
+using ::testing::_;
+
+namespace webrtc {
+
+class VieKeyRequestTest : public ::testing::Test {
+ public:
+ VieKeyRequestTest()
+ : simulated_clock_(123456789),
+ encoder_(),
+ encoder_rtcp_feedback_(
+ &simulated_clock_,
+ std::vector<uint32_t>(1, VieKeyRequestTest::kSsrc),
+ &encoder_,
+ nullptr) {}
+
+ protected:
+ const uint32_t kSsrc = 1234;
+
+ SimulatedClock simulated_clock_;
+ ::testing::StrictMock<MockVideoStreamEncoder> encoder_;
+ EncoderRtcpFeedback encoder_rtcp_feedback_;
+};
+
+TEST_F(VieKeyRequestTest, CreateAndTriggerRequests) {
+ EXPECT_CALL(encoder_, SendKeyFrame(_)).Times(1);
+ encoder_rtcp_feedback_.OnReceivedIntraFrameRequest(kSsrc);
+}
+
+TEST_F(VieKeyRequestTest, TooManyOnReceivedIntraFrameRequest) {
+ EXPECT_CALL(encoder_, SendKeyFrame(_)).Times(1);
+ encoder_rtcp_feedback_.OnReceivedIntraFrameRequest(kSsrc);
+ encoder_rtcp_feedback_.OnReceivedIntraFrameRequest(kSsrc);
+ simulated_clock_.AdvanceTimeMilliseconds(10);
+ encoder_rtcp_feedback_.OnReceivedIntraFrameRequest(kSsrc);
+
+ EXPECT_CALL(encoder_, SendKeyFrame(_)).Times(1);
+ simulated_clock_.AdvanceTimeMilliseconds(300);
+ encoder_rtcp_feedback_.OnReceivedIntraFrameRequest(kSsrc);
+ encoder_rtcp_feedback_.OnReceivedIntraFrameRequest(kSsrc);
+ encoder_rtcp_feedback_.OnReceivedIntraFrameRequest(kSsrc);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/video/end_to_end_tests/bandwidth_tests.cc b/third_party/libwebrtc/video/end_to_end_tests/bandwidth_tests.cc
new file mode 100644
index 0000000000..d6610a8ec2
--- /dev/null
+++ b/third_party/libwebrtc/video/end_to_end_tests/bandwidth_tests.cc
@@ -0,0 +1,402 @@
+/*
+ * Copyright 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <memory>
+
+#include "api/task_queue/task_queue_base.h"
+#include "api/test/simulated_network.h"
+#include "api/units/time_delta.h"
+#include "api/video/builtin_video_bitrate_allocator_factory.h"
+#include "api/video/video_bitrate_allocation.h"
+#include "call/fake_network_pipe.h"
+#include "call/simulated_network.h"
+#include "modules/rtp_rtcp/source/rtp_rtcp_impl2.h"
+#include "rtc_base/rate_limiter.h"
+#include "rtc_base/synchronization/mutex.h"
+#include "rtc_base/task_queue_for_test.h"
+#include "system_wrappers/include/sleep.h"
+#include "test/call_test.h"
+#include "test/fake_encoder.h"
+#include "test/field_trial.h"
+#include "test/gtest.h"
+#include "test/rtcp_packet_parser.h"
+#include "test/rtp_rtcp_observer.h"
+#include "test/video_encoder_proxy_factory.h"
+
+namespace webrtc {
+namespace {
+enum : int { // The first valid value is 1.
+ kAbsSendTimeExtensionId = 1,
+ kTransportSequenceNumberId,
+};
+} // namespace
+
+class BandwidthEndToEndTest : public test::CallTest {
+ public:
+ BandwidthEndToEndTest() = default;
+};
+
+TEST_F(BandwidthEndToEndTest, ReceiveStreamSendsRemb) {
+ class RembObserver : public test::EndToEndTest {
+ public:
+ RembObserver() : EndToEndTest(kDefaultTimeout) {}
+
+ void ModifyVideoConfigs(
+ VideoSendStream::Config* send_config,
+ std::vector<VideoReceiveStreamInterface::Config>* receive_configs,
+ VideoEncoderConfig* encoder_config) override {
+ send_config->rtp.extensions.clear();
+ send_config->rtp.extensions.push_back(
+ RtpExtension(RtpExtension::kAbsSendTimeUri, kAbsSendTimeExtensionId));
+ }
+
+ Action OnReceiveRtcp(const uint8_t* packet, size_t length) override {
+ test::RtcpPacketParser parser;
+ EXPECT_TRUE(parser.Parse(packet, length));
+
+ if (parser.remb()->num_packets() > 0) {
+ EXPECT_EQ(kReceiverLocalVideoSsrc, parser.remb()->sender_ssrc());
+ EXPECT_LT(0U, parser.remb()->bitrate_bps());
+ EXPECT_EQ(1U, parser.remb()->ssrcs().size());
+ EXPECT_EQ(kVideoSendSsrcs[0], parser.remb()->ssrcs()[0]);
+ observation_complete_.Set();
+ }
+
+ return SEND_PACKET;
+ }
+ void PerformTest() override {
+ EXPECT_TRUE(Wait()) << "Timed out while waiting for a "
+ "receiver RTCP REMB packet to be "
+ "sent.";
+ }
+ } test;
+
+ RunBaseTest(&test);
+}
+
+class BandwidthStatsTest : public test::EndToEndTest {
+ public:
+ BandwidthStatsTest(bool send_side_bwe, TaskQueueBase* task_queue)
+ : EndToEndTest(test::CallTest::kDefaultTimeout),
+ sender_call_(nullptr),
+ receiver_call_(nullptr),
+ has_seen_pacer_delay_(false),
+ send_side_bwe_(send_side_bwe),
+ task_queue_(task_queue) {}
+
+ ~BandwidthStatsTest() override {
+ // Block until all already posted tasks run to avoid races when such task
+ // accesses `this`.
+ SendTask(task_queue_, [] {});
+ }
+
+ void ModifyVideoConfigs(
+ VideoSendStream::Config* send_config,
+ std::vector<VideoReceiveStreamInterface::Config>* receive_configs,
+ VideoEncoderConfig* encoder_config) override {
+ send_config->rtp.extensions.clear();
+ if (!send_side_bwe_) {
+ send_config->rtp.extensions.push_back(
+ RtpExtension(RtpExtension::kAbsSendTimeUri, kAbsSendTimeExtensionId));
+ } else {
+ send_config->rtp.extensions.push_back(
+ RtpExtension(RtpExtension::kTransportSequenceNumberUri,
+ kTransportSequenceNumberId));
+ }
+
+ // Force a too high encoder bitrate to make sure we get pacer delay.
+ encoder_config->number_of_streams = 1;
+ encoder_config->max_bitrate_bps = kMaxBitrateBps * 2;
+ encoder_config->simulcast_layers[0].min_bitrate_bps = kMaxBitrateBps * 2;
+ encoder_config->simulcast_layers[0].target_bitrate_bps = kMaxBitrateBps * 2;
+ encoder_config->simulcast_layers[0].max_bitrate_bps = kMaxBitrateBps * 2;
+ }
+
+ void ModifySenderBitrateConfig(BitrateConstraints* bitrate_config) override {
+ bitrate_config->max_bitrate_bps = kMaxBitrateBps;
+ }
+
+ // Called on the pacer thread.
+ Action OnSendRtp(const uint8_t* packet, size_t length) override {
+ // Stats need to be fetched on the thread where the caller objects were
+ // constructed.
+ task_queue_->PostTask([this]() {
+ if (!sender_call_ || !receiver_call_) {
+ return;
+ }
+
+ Call::Stats sender_stats = sender_call_->GetStats();
+ if (!has_seen_pacer_delay_) {
+ has_seen_pacer_delay_ = sender_stats.pacer_delay_ms > 0;
+ }
+
+ if (sender_stats.send_bandwidth_bps > 0 && has_seen_pacer_delay_) {
+ Call::Stats receiver_stats = receiver_call_->GetStats();
+ if (send_side_bwe_ || receiver_stats.recv_bandwidth_bps > 0) {
+ observation_complete_.Set();
+ }
+ }
+ });
+
+ return SEND_PACKET;
+ }
+
+ void OnCallsCreated(Call* sender_call, Call* receiver_call) override {
+ sender_call_ = sender_call;
+ receiver_call_ = receiver_call;
+ }
+
+ void OnStreamsStopped() override {
+ sender_call_ = nullptr;
+ receiver_call_ = nullptr;
+ }
+
+ void PerformTest() override {
+ EXPECT_TRUE(Wait()) << "Timed out while waiting for "
+ "non-zero bandwidth stats.";
+ }
+
+ private:
+ static const int kMaxBitrateBps = 3000000;
+ Call* sender_call_;
+ Call* receiver_call_;
+ bool has_seen_pacer_delay_;
+ const bool send_side_bwe_;
+ TaskQueueBase* const task_queue_;
+};
+
+TEST_F(BandwidthEndToEndTest, VerifySendSideBweStats) {
+ BandwidthStatsTest test(true, task_queue());
+ RunBaseTest(&test);
+}
+
+TEST_F(BandwidthEndToEndTest, VerifyRecvSideBweStats) {
+ BandwidthStatsTest test(false, task_queue());
+ RunBaseTest(&test);
+}
+
+// Verifies that it's possible to limit the send BWE by sending a REMB.
+// This is verified by allowing the send BWE to ramp-up to >1000 kbps,
+// then have the test generate a REMB of 500 kbps and verify that the send BWE
+// is reduced to exactly 500 kbps. Then a REMB of 1000 kbps is generated and the
+// test verifies that the send BWE ramps back up to exactly 1000 kbps.
+TEST_F(BandwidthEndToEndTest, RembWithSendSideBwe) {
+ class BweObserver : public test::EndToEndTest {
+ public:
+ explicit BweObserver(TaskQueueBase* task_queue)
+ : EndToEndTest(kDefaultTimeout),
+ sender_call_(nullptr),
+ clock_(Clock::GetRealTimeClock()),
+ sender_ssrc_(0),
+ remb_bitrate_bps_(1000000),
+ state_(kWaitForFirstRampUp),
+ retransmission_rate_limiter_(clock_, 1000),
+ task_queue_(task_queue) {}
+
+ void OnStreamsStopped() override { rtp_rtcp_ = nullptr; }
+
+ void ModifySenderBitrateConfig(
+ BitrateConstraints* bitrate_config) override {
+ // Set a high start bitrate to reduce the test completion time.
+ bitrate_config->start_bitrate_bps = remb_bitrate_bps_;
+ }
+
+ void ModifyVideoConfigs(
+ VideoSendStream::Config* send_config,
+ std::vector<VideoReceiveStreamInterface::Config>* receive_configs,
+ VideoEncoderConfig* encoder_config) override {
+ ASSERT_EQ(1u, send_config->rtp.ssrcs.size());
+ sender_ssrc_ = send_config->rtp.ssrcs[0];
+
+ encoder_config->max_bitrate_bps = 2000000;
+
+ ASSERT_EQ(1u, receive_configs->size());
+ remb_sender_local_ssrc_ = (*receive_configs)[0].rtp.local_ssrc;
+ remb_sender_remote_ssrc_ = (*receive_configs)[0].rtp.remote_ssrc;
+ }
+
+ void OnCallsCreated(Call* sender_call, Call* receiver_call) override {
+ RTC_DCHECK(sender_call);
+ sender_call_ = sender_call;
+ task_queue_->PostTask([this]() { PollStats(); });
+ }
+
+ void OnTransportCreated(
+ test::PacketTransport* /*to_receiver*/,
+ SimulatedNetworkInterface* /*sender_network*/,
+ test::PacketTransport* to_sender,
+ SimulatedNetworkInterface* /*receiver_network*/) override {
+ RtpRtcpInterface::Configuration config;
+ config.receiver_only = true;
+ config.clock = clock_;
+ config.outgoing_transport = to_sender;
+ config.retransmission_rate_limiter = &retransmission_rate_limiter_;
+ config.local_media_ssrc = remb_sender_local_ssrc_;
+
+ rtp_rtcp_ = ModuleRtpRtcpImpl2::Create(config);
+ rtp_rtcp_->SetRemoteSSRC(remb_sender_remote_ssrc_);
+ rtp_rtcp_->SetRTCPStatus(RtcpMode::kReducedSize);
+ }
+
+ void PollStats() {
+ Call::Stats stats = sender_call_->GetStats();
+ switch (state_) {
+ case kWaitForFirstRampUp:
+ if (stats.send_bandwidth_bps >= remb_bitrate_bps_) {
+ state_ = kWaitForRemb;
+ remb_bitrate_bps_ /= 2;
+ rtp_rtcp_->SetRemb(
+ remb_bitrate_bps_,
+ std::vector<uint32_t>(&sender_ssrc_, &sender_ssrc_ + 1));
+ rtp_rtcp_->SendRTCP(kRtcpRr);
+ }
+ break;
+
+ case kWaitForRemb:
+ if (stats.send_bandwidth_bps == remb_bitrate_bps_) {
+ state_ = kWaitForSecondRampUp;
+ remb_bitrate_bps_ *= 2;
+ rtp_rtcp_->SetRemb(
+ remb_bitrate_bps_,
+ std::vector<uint32_t>(&sender_ssrc_, &sender_ssrc_ + 1));
+ rtp_rtcp_->SendRTCP(kRtcpRr);
+ }
+ break;
+
+ case kWaitForSecondRampUp:
+ if (stats.send_bandwidth_bps == remb_bitrate_bps_) {
+ observation_complete_.Set();
+ return;
+ }
+ break;
+ }
+
+ task_queue_->PostDelayedTask([this] { PollStats(); },
+ TimeDelta::Seconds(1));
+ }
+
+ void PerformTest() override {
+ EXPECT_TRUE(Wait())
+ << "Timed out while waiting for bitrate to change according to REMB.";
+ }
+
+ private:
+ enum TestState { kWaitForFirstRampUp, kWaitForRemb, kWaitForSecondRampUp };
+
+ Call* sender_call_;
+ Clock* const clock_;
+ uint32_t sender_ssrc_;
+ uint32_t remb_sender_local_ssrc_ = 0;
+ uint32_t remb_sender_remote_ssrc_ = 0;
+ int remb_bitrate_bps_;
+ std::unique_ptr<ModuleRtpRtcpImpl2> rtp_rtcp_;
+ TestState state_;
+ RateLimiter retransmission_rate_limiter_;
+ TaskQueueBase* const task_queue_;
+ } test(task_queue());
+
+ RunBaseTest(&test);
+}
+
+TEST_F(BandwidthEndToEndTest, ReportsSetEncoderRates) {
+ // If these fields trial are on, we get lower bitrates than expected by this
+ // test, due to the packetization overhead and encoder pushback.
+ webrtc::test::ScopedFieldTrials field_trials(
+ std::string(field_trial::GetFieldTrialString()) +
+ "WebRTC-VideoRateControl/bitrate_adjuster:false/");
+ class EncoderRateStatsTest : public test::EndToEndTest,
+ public test::FakeEncoder {
+ public:
+ explicit EncoderRateStatsTest(TaskQueueBase* task_queue)
+ : EndToEndTest(kDefaultTimeout),
+ FakeEncoder(Clock::GetRealTimeClock()),
+ task_queue_(task_queue),
+ send_stream_(nullptr),
+ encoder_factory_(this),
+ bitrate_allocator_factory_(
+ CreateBuiltinVideoBitrateAllocatorFactory()),
+ bitrate_kbps_(0) {}
+
+ void OnVideoStreamsCreated(VideoSendStream* send_stream,
+ const std::vector<VideoReceiveStreamInterface*>&
+ receive_streams) override {
+ send_stream_ = send_stream;
+ }
+
+ void ModifyVideoConfigs(
+ VideoSendStream::Config* send_config,
+ std::vector<VideoReceiveStreamInterface::Config>* receive_configs,
+ VideoEncoderConfig* encoder_config) override {
+ send_config->encoder_settings.encoder_factory = &encoder_factory_;
+ send_config->encoder_settings.bitrate_allocator_factory =
+ bitrate_allocator_factory_.get();
+ RTC_DCHECK_EQ(1, encoder_config->number_of_streams);
+ }
+
+ void SetRates(const RateControlParameters& parameters) override {
+ // Make sure not to trigger on any default zero bitrates.
+ if (parameters.bitrate.get_sum_bps() == 0)
+ return;
+ MutexLock lock(&mutex_);
+ bitrate_kbps_ = parameters.bitrate.get_sum_kbps();
+ observation_complete_.Set();
+ }
+
+ void PerformTest() override {
+ ASSERT_TRUE(Wait())
+ << "Timed out while waiting for encoder SetRates() call.";
+
+ SendTask(task_queue_, [this]() {
+ WaitForEncoderTargetBitrateMatchStats();
+ send_stream_->Stop();
+ WaitForStatsReportZeroTargetBitrate();
+ send_stream_->Start();
+ WaitForEncoderTargetBitrateMatchStats();
+ });
+ }
+
+ void WaitForEncoderTargetBitrateMatchStats() {
+ for (int i = 0; i < kDefaultTimeout.ms(); ++i) {
+ VideoSendStream::Stats stats = send_stream_->GetStats();
+ {
+ MutexLock lock(&mutex_);
+ if ((stats.target_media_bitrate_bps + 500) / 1000 ==
+ static_cast<int>(bitrate_kbps_)) {
+ return;
+ }
+ }
+ SleepMs(1);
+ }
+ FAIL()
+ << "Timed out waiting for stats reporting the currently set bitrate.";
+ }
+
+ void WaitForStatsReportZeroTargetBitrate() {
+ for (int i = 0; i < kDefaultTimeout.ms(); ++i) {
+ if (send_stream_->GetStats().target_media_bitrate_bps == 0) {
+ return;
+ }
+ SleepMs(1);
+ }
+ FAIL() << "Timed out waiting for stats reporting zero bitrate.";
+ }
+
+ private:
+ TaskQueueBase* const task_queue_;
+ Mutex mutex_;
+ VideoSendStream* send_stream_;
+ test::VideoEncoderProxyFactory encoder_factory_;
+ std::unique_ptr<VideoBitrateAllocatorFactory> bitrate_allocator_factory_;
+ uint32_t bitrate_kbps_ RTC_GUARDED_BY(mutex_);
+ } test(task_queue());
+
+ RunBaseTest(&test);
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/video/end_to_end_tests/call_operation_tests.cc b/third_party/libwebrtc/video/end_to_end_tests/call_operation_tests.cc
new file mode 100644
index 0000000000..f5b32388b1
--- /dev/null
+++ b/third_party/libwebrtc/video/end_to_end_tests/call_operation_tests.cc
@@ -0,0 +1,195 @@
+/*
+ * Copyright 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <memory>
+
+#include "api/test/create_frame_generator.h"
+#include "api/test/frame_generator_interface.h"
+#include "api/test/simulated_network.h"
+#include "call/fake_network_pipe.h"
+#include "call/simulated_network.h"
+#include "rtc_base/task_queue_for_test.h"
+#include "system_wrappers/include/sleep.h"
+#include "test/call_test.h"
+#include "test/field_trial.h"
+#include "test/frame_forwarder.h"
+#include "test/gtest.h"
+#include "test/null_transport.h"
+
+namespace webrtc {
+
+class CallOperationEndToEndTest : public test::CallTest {};
+
+TEST_F(CallOperationEndToEndTest, ReceiverCanBeStartedTwice) {
+ CreateCalls();
+
+ test::NullTransport transport;
+ CreateSendConfig(1, 0, 0, &transport);
+ CreateMatchingReceiveConfigs(&transport);
+
+ CreateVideoStreams();
+
+ video_receive_streams_[0]->Start();
+ video_receive_streams_[0]->Start();
+
+ DestroyStreams();
+}
+
+TEST_F(CallOperationEndToEndTest, ReceiverCanBeStoppedTwice) {
+ CreateCalls();
+
+ test::NullTransport transport;
+ CreateSendConfig(1, 0, 0, &transport);
+ CreateMatchingReceiveConfigs(&transport);
+
+ CreateVideoStreams();
+
+ video_receive_streams_[0]->Stop();
+ video_receive_streams_[0]->Stop();
+
+ DestroyStreams();
+}
+
+TEST_F(CallOperationEndToEndTest, ReceiverCanBeStoppedAndRestarted) {
+ CreateCalls();
+
+ test::NullTransport transport;
+ CreateSendConfig(1, 0, 0, &transport);
+ CreateMatchingReceiveConfigs(&transport);
+
+ CreateVideoStreams();
+
+ video_receive_streams_[0]->Stop();
+ video_receive_streams_[0]->Start();
+ video_receive_streams_[0]->Stop();
+
+ DestroyStreams();
+}
+
+TEST_F(CallOperationEndToEndTest, RendersSingleDelayedFrame) {
+ static const int kWidth = 320;
+ static const int kHeight = 240;
+ // This constant is chosen to be higher than the timeout in the video_render
+ // module. This makes sure that frames aren't dropped if there are no other
+ // frames in the queue.
+ static const int kRenderDelayMs = 1000;
+
+ class Renderer : public rtc::VideoSinkInterface<VideoFrame> {
+ public:
+ void OnFrame(const VideoFrame& video_frame) override {
+ SleepMs(kRenderDelayMs);
+ event_.Set();
+ }
+
+ bool Wait() { return event_.Wait(kDefaultTimeout); }
+
+ rtc::Event event_;
+ } renderer;
+
+ test::FrameForwarder frame_forwarder;
+
+ SendTask(
+ task_queue(), [this, &renderer, &frame_forwarder]() {
+ CreateCalls();
+ CreateSendTransport(BuiltInNetworkBehaviorConfig(),
+ /*observer=*/nullptr);
+
+ CreateReceiveTransport(BuiltInNetworkBehaviorConfig(),
+ /*observer=*/nullptr);
+ CreateSendConfig(1, 0, 0);
+ CreateMatchingReceiveConfigs();
+
+ video_receive_configs_[0].renderer = &renderer;
+
+ CreateVideoStreams();
+ Start();
+
+ // Create frames that are smaller than the send width/height, this is
+ // done to check that the callbacks are done after processing video.
+ std::unique_ptr<test::FrameGeneratorInterface> frame_generator(
+ test::CreateSquareFrameGenerator(kWidth, kHeight, absl::nullopt,
+ absl::nullopt));
+ GetVideoSendStream()->SetSource(
+ &frame_forwarder, DegradationPreference::MAINTAIN_FRAMERATE);
+
+ test::FrameGeneratorInterface::VideoFrameData frame_data =
+ frame_generator->NextFrame();
+ VideoFrame frame = VideoFrame::Builder()
+ .set_video_frame_buffer(frame_data.buffer)
+ .set_update_rect(frame_data.update_rect)
+ .build();
+ frame_forwarder.IncomingCapturedFrame(frame);
+ });
+
+ EXPECT_TRUE(renderer.Wait())
+ << "Timed out while waiting for the frame to render.";
+
+ SendTask(task_queue(), [this]() {
+ Stop();
+ DestroyStreams();
+ DestroyCalls();
+ });
+}
+
+TEST_F(CallOperationEndToEndTest, TransmitsFirstFrame) {
+ class Renderer : public rtc::VideoSinkInterface<VideoFrame> {
+ public:
+ void OnFrame(const VideoFrame& video_frame) override { event_.Set(); }
+
+ bool Wait() { return event_.Wait(kDefaultTimeout); }
+
+ rtc::Event event_;
+ } renderer;
+
+ std::unique_ptr<test::FrameGeneratorInterface> frame_generator;
+ test::FrameForwarder frame_forwarder;
+
+ std::unique_ptr<test::DirectTransport> sender_transport;
+ std::unique_ptr<test::DirectTransport> receiver_transport;
+
+ SendTask(
+ task_queue(), [this, &renderer, &frame_generator, &frame_forwarder]() {
+ CreateCalls();
+ CreateSendTransport(BuiltInNetworkBehaviorConfig(),
+ /*observer=*/nullptr);
+ CreateReceiveTransport(BuiltInNetworkBehaviorConfig(),
+ /*observer=*/nullptr);
+
+ CreateSendConfig(1, 0, 0);
+ CreateMatchingReceiveConfigs();
+ video_receive_configs_[0].renderer = &renderer;
+
+ CreateVideoStreams();
+ Start();
+
+ frame_generator = test::CreateSquareFrameGenerator(
+ kDefaultWidth, kDefaultHeight, absl::nullopt, absl::nullopt);
+ GetVideoSendStream()->SetSource(
+ &frame_forwarder, DegradationPreference::MAINTAIN_FRAMERATE);
+ test::FrameGeneratorInterface::VideoFrameData frame_data =
+ frame_generator->NextFrame();
+ VideoFrame frame = VideoFrame::Builder()
+ .set_video_frame_buffer(frame_data.buffer)
+ .set_update_rect(frame_data.update_rect)
+ .build();
+ frame_forwarder.IncomingCapturedFrame(frame);
+ });
+
+ EXPECT_TRUE(renderer.Wait())
+ << "Timed out while waiting for the frame to render.";
+
+ SendTask(task_queue(), [this]() {
+ Stop();
+ DestroyStreams();
+ DestroyCalls();
+ });
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/video/end_to_end_tests/codec_tests.cc b/third_party/libwebrtc/video/end_to_end_tests/codec_tests.cc
new file mode 100644
index 0000000000..53ec9f5b17
--- /dev/null
+++ b/third_party/libwebrtc/video/end_to_end_tests/codec_tests.cc
@@ -0,0 +1,288 @@
+/*
+ * Copyright 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <memory>
+#include "absl/types/optional.h"
+#include "api/test/video/function_video_encoder_factory.h"
+#include "api/video/color_space.h"
+#include "api/video/video_rotation.h"
+#include "common_video/test/utilities.h"
+#include "media/engine/internal_decoder_factory.h"
+#include "media/engine/internal_encoder_factory.h"
+#include "modules/video_coding/codecs/h264/include/h264.h"
+#include "modules/video_coding/codecs/multiplex/include/multiplex_decoder_adapter.h"
+#include "modules/video_coding/codecs/multiplex/include/multiplex_encoder_adapter.h"
+#include "modules/video_coding/codecs/vp8/include/vp8.h"
+#include "modules/video_coding/codecs/vp9/include/vp9.h"
+#include "test/call_test.h"
+#include "test/encoder_settings.h"
+#include "test/field_trial.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+enum : int { // The first valid value is 1.
+ kColorSpaceExtensionId = 1,
+ kVideoRotationExtensionId,
+};
+} // namespace
+
+class CodecEndToEndTest : public test::CallTest {
+ public:
+ CodecEndToEndTest() {
+ RegisterRtpExtension(
+ RtpExtension(RtpExtension::kColorSpaceUri, kColorSpaceExtensionId));
+ RegisterRtpExtension(RtpExtension(RtpExtension::kVideoRotationUri,
+ kVideoRotationExtensionId));
+ }
+};
+
+class CodecObserver : public test::EndToEndTest,
+ public rtc::VideoSinkInterface<VideoFrame> {
+ public:
+ CodecObserver(int no_frames_to_wait_for,
+ VideoRotation rotation_to_test,
+ absl::optional<ColorSpace> color_space_to_test,
+ const std::string& payload_name,
+ VideoEncoderFactory* encoder_factory,
+ VideoDecoderFactory* decoder_factory)
+ : EndToEndTest(4 * CodecEndToEndTest::kDefaultTimeout),
+ // TODO(hta): This timeout (120 seconds) is excessive.
+ // https://bugs.webrtc.org/6830
+ no_frames_to_wait_for_(no_frames_to_wait_for),
+ expected_rotation_(rotation_to_test),
+ expected_color_space_(color_space_to_test),
+ payload_name_(payload_name),
+ encoder_factory_(encoder_factory),
+ decoder_factory_(decoder_factory),
+ frame_counter_(0) {}
+
+ void PerformTest() override {
+ EXPECT_TRUE(Wait())
+ << "Timed out while waiting for enough frames to be decoded.";
+ }
+
+ void ModifyVideoConfigs(
+ VideoSendStream::Config* send_config,
+ std::vector<VideoReceiveStreamInterface::Config>* receive_configs,
+ VideoEncoderConfig* encoder_config) override {
+ encoder_config->codec_type = PayloadStringToCodecType(payload_name_);
+ send_config->encoder_settings.encoder_factory = encoder_factory_;
+ send_config->rtp.payload_name = payload_name_;
+ send_config->rtp.payload_type = test::CallTest::kVideoSendPayloadType;
+
+ (*receive_configs)[0].renderer = this;
+ (*receive_configs)[0].decoders.resize(1);
+ (*receive_configs)[0].decoders[0].payload_type =
+ send_config->rtp.payload_type;
+ (*receive_configs)[0].decoders[0].video_format =
+ SdpVideoFormat(send_config->rtp.payload_name);
+ (*receive_configs)[0].decoder_factory = decoder_factory_;
+ }
+
+ void OnFrame(const VideoFrame& video_frame) override {
+ EXPECT_EQ(expected_rotation_, video_frame.rotation());
+ // Test only if explicit color space has been specified since otherwise the
+ // color space is codec dependent.
+ if (expected_color_space_) {
+ EXPECT_EQ(expected_color_space_,
+ video_frame.color_space()
+ ? absl::make_optional(*video_frame.color_space())
+ : absl::nullopt);
+ }
+ if (++frame_counter_ == no_frames_to_wait_for_)
+ observation_complete_.Set();
+ }
+
+ void OnFrameGeneratorCapturerCreated(
+ test::FrameGeneratorCapturer* frame_generator_capturer) override {
+ frame_generator_capturer->SetFakeRotation(expected_rotation_);
+ frame_generator_capturer->SetFakeColorSpace(expected_color_space_);
+ }
+
+ private:
+ int no_frames_to_wait_for_;
+ VideoRotation expected_rotation_;
+ absl::optional<ColorSpace> expected_color_space_;
+ std::string payload_name_;
+ VideoEncoderFactory* encoder_factory_;
+ VideoDecoderFactory* decoder_factory_;
+ int frame_counter_;
+};
+
+TEST_F(CodecEndToEndTest, SendsAndReceivesVP8) {
+ test::FunctionVideoEncoderFactory encoder_factory(
+ []() { return VP8Encoder::Create(); });
+ test::FunctionVideoDecoderFactory decoder_factory(
+ []() { return VP8Decoder::Create(); });
+ CodecObserver test(5, kVideoRotation_0, absl::nullopt, "VP8",
+ &encoder_factory, &decoder_factory);
+ RunBaseTest(&test);
+}
+
+TEST_F(CodecEndToEndTest, SendsAndReceivesVP8Rotation90) {
+ test::FunctionVideoEncoderFactory encoder_factory(
+ []() { return VP8Encoder::Create(); });
+ test::FunctionVideoDecoderFactory decoder_factory(
+ []() { return VP8Decoder::Create(); });
+ CodecObserver test(5, kVideoRotation_90, absl::nullopt, "VP8",
+ &encoder_factory, &decoder_factory);
+ RunBaseTest(&test);
+}
+
+#if defined(RTC_ENABLE_VP9)
+TEST_F(CodecEndToEndTest, SendsAndReceivesVP9) {
+ test::FunctionVideoEncoderFactory encoder_factory(
+ []() { return VP9Encoder::Create(); });
+ test::FunctionVideoDecoderFactory decoder_factory(
+ []() { return VP9Decoder::Create(); });
+ CodecObserver test(500, kVideoRotation_0, absl::nullopt, "VP9",
+ &encoder_factory, &decoder_factory);
+ RunBaseTest(&test);
+}
+
+TEST_F(CodecEndToEndTest, SendsAndReceivesVP9VideoRotation90) {
+ test::FunctionVideoEncoderFactory encoder_factory(
+ []() { return VP9Encoder::Create(); });
+ test::FunctionVideoDecoderFactory decoder_factory(
+ []() { return VP9Decoder::Create(); });
+ CodecObserver test(5, kVideoRotation_90, absl::nullopt, "VP9",
+ &encoder_factory, &decoder_factory);
+ RunBaseTest(&test);
+}
+
+TEST_F(CodecEndToEndTest, SendsAndReceivesVP9ExplicitColorSpace) {
+ test::FunctionVideoEncoderFactory encoder_factory(
+ []() { return VP9Encoder::Create(); });
+ test::FunctionVideoDecoderFactory decoder_factory(
+ []() { return VP9Decoder::Create(); });
+ CodecObserver test(5, kVideoRotation_90,
+ CreateTestColorSpace(/*with_hdr_metadata=*/false), "VP9",
+ &encoder_factory, &decoder_factory);
+ RunBaseTest(&test);
+}
+
+TEST_F(CodecEndToEndTest,
+ SendsAndReceivesVP9ExplicitColorSpaceWithHdrMetadata) {
+ test::FunctionVideoEncoderFactory encoder_factory(
+ []() { return VP9Encoder::Create(); });
+ test::FunctionVideoDecoderFactory decoder_factory(
+ []() { return VP9Decoder::Create(); });
+ CodecObserver test(5, kVideoRotation_90,
+ CreateTestColorSpace(/*with_hdr_metadata=*/true), "VP9",
+ &encoder_factory, &decoder_factory);
+ RunBaseTest(&test);
+}
+
+// Mutiplex tests are using VP9 as the underlying implementation.
+TEST_F(CodecEndToEndTest, SendsAndReceivesMultiplex) {
+ InternalEncoderFactory internal_encoder_factory;
+ InternalDecoderFactory internal_decoder_factory;
+ test::FunctionVideoEncoderFactory encoder_factory(
+ [&internal_encoder_factory]() {
+ return std::make_unique<MultiplexEncoderAdapter>(
+ &internal_encoder_factory, SdpVideoFormat(cricket::kVp9CodecName));
+ });
+ test::FunctionVideoDecoderFactory decoder_factory(
+ [&internal_decoder_factory]() {
+ return std::make_unique<MultiplexDecoderAdapter>(
+ &internal_decoder_factory, SdpVideoFormat(cricket::kVp9CodecName));
+ });
+
+ CodecObserver test(5, kVideoRotation_0, absl::nullopt, "multiplex",
+ &encoder_factory, &decoder_factory);
+ RunBaseTest(&test);
+}
+
+TEST_F(CodecEndToEndTest, SendsAndReceivesMultiplexVideoRotation90) {
+ InternalEncoderFactory internal_encoder_factory;
+ InternalDecoderFactory internal_decoder_factory;
+ test::FunctionVideoEncoderFactory encoder_factory(
+ [&internal_encoder_factory]() {
+ return std::make_unique<MultiplexEncoderAdapter>(
+ &internal_encoder_factory, SdpVideoFormat(cricket::kVp9CodecName));
+ });
+ test::FunctionVideoDecoderFactory decoder_factory(
+ [&internal_decoder_factory]() {
+ return std::make_unique<MultiplexDecoderAdapter>(
+ &internal_decoder_factory, SdpVideoFormat(cricket::kVp9CodecName));
+ });
+ CodecObserver test(5, kVideoRotation_90, absl::nullopt, "multiplex",
+ &encoder_factory, &decoder_factory);
+ RunBaseTest(&test);
+}
+
+#endif // defined(RTC_ENABLE_VP9)
+
+#if defined(WEBRTC_USE_H264)
+class EndToEndTestH264 : public test::CallTest,
+ public ::testing::WithParamInterface<std::string> {
+ public:
+ EndToEndTestH264() : field_trial_(GetParam()) {
+ RegisterRtpExtension(RtpExtension(RtpExtension::kVideoRotationUri,
+ kVideoRotationExtensionId));
+ }
+
+ private:
+ test::ScopedFieldTrials field_trial_;
+};
+
+INSTANTIATE_TEST_SUITE_P(
+ SpsPpsIdrIsKeyframe,
+ EndToEndTestH264,
+ ::testing::Values("WebRTC-SpsPpsIdrIsH264Keyframe/Disabled/",
+ "WebRTC-SpsPpsIdrIsH264Keyframe/Enabled/"));
+
+TEST_P(EndToEndTestH264, SendsAndReceivesH264) {
+ test::FunctionVideoEncoderFactory encoder_factory(
+ []() { return H264Encoder::Create(cricket::VideoCodec("H264")); });
+ test::FunctionVideoDecoderFactory decoder_factory(
+ []() { return H264Decoder::Create(); });
+ CodecObserver test(500, kVideoRotation_0, absl::nullopt, "H264",
+ &encoder_factory, &decoder_factory);
+ RunBaseTest(&test);
+}
+
+TEST_P(EndToEndTestH264, SendsAndReceivesH264VideoRotation90) {
+ test::FunctionVideoEncoderFactory encoder_factory(
+ []() { return H264Encoder::Create(cricket::VideoCodec("H264")); });
+ test::FunctionVideoDecoderFactory decoder_factory(
+ []() { return H264Decoder::Create(); });
+ CodecObserver test(5, kVideoRotation_90, absl::nullopt, "H264",
+ &encoder_factory, &decoder_factory);
+ RunBaseTest(&test);
+}
+
+TEST_P(EndToEndTestH264, SendsAndReceivesH264PacketizationMode0) {
+ cricket::VideoCodec codec = cricket::VideoCodec("H264");
+ codec.SetParam(cricket::kH264FmtpPacketizationMode, "0");
+ test::FunctionVideoEncoderFactory encoder_factory(
+ [codec]() { return H264Encoder::Create(codec); });
+ test::FunctionVideoDecoderFactory decoder_factory(
+ []() { return H264Decoder::Create(); });
+ CodecObserver test(500, kVideoRotation_0, absl::nullopt, "H264",
+ &encoder_factory, &decoder_factory);
+ RunBaseTest(&test);
+}
+
+TEST_P(EndToEndTestH264, SendsAndReceivesH264PacketizationMode1) {
+ cricket::VideoCodec codec = cricket::VideoCodec("H264");
+ codec.SetParam(cricket::kH264FmtpPacketizationMode, "1");
+ test::FunctionVideoEncoderFactory encoder_factory(
+ [codec]() { return H264Encoder::Create(codec); });
+ test::FunctionVideoDecoderFactory decoder_factory(
+ []() { return H264Decoder::Create(); });
+ CodecObserver test(500, kVideoRotation_0, absl::nullopt, "H264",
+ &encoder_factory, &decoder_factory);
+ RunBaseTest(&test);
+}
+#endif // defined(WEBRTC_USE_H264)
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/video/end_to_end_tests/config_tests.cc b/third_party/libwebrtc/video/end_to_end_tests/config_tests.cc
new file mode 100644
index 0000000000..7e27448991
--- /dev/null
+++ b/third_party/libwebrtc/video/end_to_end_tests/config_tests.cc
@@ -0,0 +1,113 @@
+/*
+ * Copyright 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <map>
+#include <vector>
+
+#include "api/crypto/crypto_options.h"
+#include "api/rtp_headers.h"
+#include "call/flexfec_receive_stream.h"
+#include "call/rtp_config.h"
+#include "call/video_receive_stream.h"
+#include "call/video_send_stream.h"
+#include "test/call_test.h"
+#include "test/gtest.h"
+#include "test/null_transport.h"
+
+namespace webrtc {
+
+class ConfigEndToEndTest : public test::CallTest {};
+
+namespace {
+void VerifyEmptyNackConfig(const NackConfig& config) {
+ EXPECT_EQ(0, config.rtp_history_ms)
+ << "Enabling NACK requires rtcp-fb: nack negotiation.";
+}
+
+void VerifyEmptyUlpfecConfig(const UlpfecConfig& config) {
+ EXPECT_EQ(-1, config.ulpfec_payload_type)
+ << "Enabling ULPFEC requires rtpmap: ulpfec negotiation.";
+ EXPECT_EQ(-1, config.red_payload_type)
+ << "Enabling ULPFEC requires rtpmap: red negotiation.";
+ EXPECT_EQ(-1, config.red_rtx_payload_type)
+ << "Enabling RTX in ULPFEC requires rtpmap: rtx negotiation.";
+}
+
+void VerifyEmptyFlexfecConfig(const RtpConfig::Flexfec& config) {
+ EXPECT_EQ(-1, config.payload_type)
+ << "Enabling FlexFEC requires rtpmap: flexfec negotiation.";
+ EXPECT_EQ(0U, config.ssrc)
+ << "Enabling FlexFEC requires ssrc-group: FEC-FR negotiation.";
+ EXPECT_TRUE(config.protected_media_ssrcs.empty())
+ << "Enabling FlexFEC requires ssrc-group: FEC-FR negotiation.";
+}
+} // namespace
+
+TEST_F(ConfigEndToEndTest, VerifyDefaultSendConfigParameters) {
+ VideoSendStream::Config default_send_config(nullptr);
+ EXPECT_FALSE(default_send_config.rtp.lntf.enabled)
+ << "Enabling LNTF require rtcp-fb: goog-lntf negotiation.";
+ EXPECT_EQ(0, default_send_config.rtp.nack.rtp_history_ms)
+ << "Enabling NACK require rtcp-fb: nack negotiation.";
+ EXPECT_TRUE(default_send_config.rtp.rtx.ssrcs.empty())
+ << "Enabling RTX requires rtpmap: rtx negotiation.";
+ EXPECT_TRUE(default_send_config.rtp.extensions.empty())
+ << "Enabling RTP extensions require negotiation.";
+ EXPECT_EQ(nullptr, default_send_config.frame_encryptor)
+ << "Enabling Frame Encryption requires a frame encryptor to be attached";
+ EXPECT_FALSE(
+ default_send_config.crypto_options.sframe.require_frame_encryption)
+ << "Enabling Require Frame Encryption means an encryptor must be "
+ "attached";
+
+ VerifyEmptyNackConfig(default_send_config.rtp.nack);
+ VerifyEmptyUlpfecConfig(default_send_config.rtp.ulpfec);
+ VerifyEmptyFlexfecConfig(default_send_config.rtp.flexfec);
+}
+
+TEST_F(ConfigEndToEndTest, VerifyDefaultVideoReceiveConfigParameters) {
+ VideoReceiveStreamInterface::Config default_receive_config(nullptr);
+ EXPECT_EQ(RtcpMode::kCompound, default_receive_config.rtp.rtcp_mode)
+ << "Reduced-size RTCP require rtcp-rsize to be negotiated.";
+ EXPECT_FALSE(default_receive_config.rtp.lntf.enabled)
+ << "Enabling LNTF require rtcp-fb: goog-lntf negotiation.";
+ EXPECT_FALSE(
+ default_receive_config.rtp.rtcp_xr.receiver_reference_time_report)
+ << "RTCP XR settings require rtcp-xr to be negotiated.";
+ EXPECT_EQ(0U, default_receive_config.rtp.rtx_ssrc)
+ << "Enabling RTX requires ssrc-group: FID negotiation";
+ EXPECT_TRUE(default_receive_config.rtp.rtx_associated_payload_types.empty())
+ << "Enabling RTX requires rtpmap: rtx negotiation.";
+ EXPECT_TRUE(default_receive_config.rtp.extensions.empty())
+ << "Enabling RTP extensions require negotiation.";
+ VerifyEmptyNackConfig(default_receive_config.rtp.nack);
+ EXPECT_EQ(-1, default_receive_config.rtp.ulpfec_payload_type)
+ << "Enabling ULPFEC requires rtpmap: ulpfec negotiation.";
+ EXPECT_EQ(-1, default_receive_config.rtp.red_payload_type)
+ << "Enabling ULPFEC requires rtpmap: red negotiation.";
+ EXPECT_EQ(nullptr, default_receive_config.frame_decryptor)
+ << "Enabling Frame Decryption requires a frame decryptor to be attached";
+ EXPECT_FALSE(
+ default_receive_config.crypto_options.sframe.require_frame_encryption)
+ << "Enabling Require Frame Encryption means a decryptor must be attached";
+}
+
+TEST_F(ConfigEndToEndTest, VerifyDefaultFlexfecReceiveConfigParameters) {
+ test::NullTransport rtcp_send_transport;
+ FlexfecReceiveStream::Config default_receive_config(&rtcp_send_transport);
+ EXPECT_EQ(-1, default_receive_config.payload_type)
+ << "Enabling FlexFEC requires rtpmap: flexfec negotiation.";
+ EXPECT_EQ(0U, default_receive_config.rtp.remote_ssrc)
+ << "Enabling FlexFEC requires ssrc-group: FEC-FR negotiation.";
+ EXPECT_TRUE(default_receive_config.protected_media_ssrcs.empty())
+ << "Enabling FlexFEC requires ssrc-group: FEC-FR negotiation.";
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/video/end_to_end_tests/extended_reports_tests.cc b/third_party/libwebrtc/video/end_to_end_tests/extended_reports_tests.cc
new file mode 100644
index 0000000000..2897212e0b
--- /dev/null
+++ b/third_party/libwebrtc/video/end_to_end_tests/extended_reports_tests.cc
@@ -0,0 +1,264 @@
+/*
+ * Copyright 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <memory>
+#include <string>
+#include <utility>
+#include <vector>
+
+#include "absl/types/optional.h"
+#include "api/rtp_headers.h"
+#include "api/task_queue/task_queue_base.h"
+#include "api/test/simulated_network.h"
+#include "api/video_codecs/sdp_video_format.h"
+#include "call/call.h"
+#include "call/fake_network_pipe.h"
+#include "call/rtp_config.h"
+#include "call/simulated_network.h"
+#include "call/simulated_packet_receiver.h"
+#include "call/video_receive_stream.h"
+#include "call/video_send_stream.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/dlrr.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/target_bitrate.h"
+#include "rtc_base/event.h"
+#include "rtc_base/synchronization/mutex.h"
+#include "rtc_base/thread_annotations.h"
+#include "system_wrappers/include/clock.h"
+#include "test/call_test.h"
+#include "test/field_trial.h"
+#include "test/gtest.h"
+#include "test/rtcp_packet_parser.h"
+#include "test/rtp_rtcp_observer.h"
+#include "video/config/video_encoder_config.h"
+
+namespace webrtc {
+namespace {
+enum : int { // The first valid value is 1.
+ kColorSpaceExtensionId = 1,
+ kTransportSequenceNumberExtensionId,
+};
+} // namespace
+
+class ExtendedReportsEndToEndTest : public test::CallTest {
+ public:
+ ExtendedReportsEndToEndTest() {
+ RegisterRtpExtension(RtpExtension(RtpExtension::kTransportSequenceNumberUri,
+ kTransportSequenceNumberExtensionId));
+ }
+};
+
+class RtcpXrObserver : public test::EndToEndTest {
+ public:
+ RtcpXrObserver(bool enable_rrtr,
+ bool expect_target_bitrate,
+ bool enable_zero_target_bitrate,
+ VideoEncoderConfig::ContentType content_type)
+ : EndToEndTest(test::CallTest::kDefaultTimeout),
+ enable_rrtr_(enable_rrtr),
+ expect_target_bitrate_(expect_target_bitrate),
+ enable_zero_target_bitrate_(enable_zero_target_bitrate),
+ content_type_(content_type),
+ sent_rtcp_sr_(0),
+ sent_rtcp_rr_(0),
+ sent_rtcp_rrtr_(0),
+ sent_rtcp_target_bitrate_(false),
+ sent_zero_rtcp_target_bitrate_(false),
+ sent_rtcp_dlrr_(0),
+ send_simulated_network_(nullptr) {
+ forward_transport_config_.link_capacity_kbps = 500;
+ forward_transport_config_.queue_delay_ms = 0;
+ forward_transport_config_.loss_percent = 0;
+ }
+
+ private:
+ // Receive stream should send RR packets (and RRTR packets if enabled).
+ Action OnReceiveRtcp(const uint8_t* packet, size_t length) override {
+ MutexLock lock(&mutex_);
+ test::RtcpPacketParser parser;
+ EXPECT_TRUE(parser.Parse(packet, length));
+
+ sent_rtcp_rr_ += parser.receiver_report()->num_packets();
+ EXPECT_EQ(0, parser.sender_report()->num_packets());
+ EXPECT_GE(1, parser.xr()->num_packets());
+ if (parser.xr()->num_packets() > 0) {
+ if (parser.xr()->rrtr())
+ ++sent_rtcp_rrtr_;
+ EXPECT_FALSE(parser.xr()->dlrr());
+ }
+
+ return SEND_PACKET;
+ }
+ // Send stream should send SR packets (and DLRR packets if enabled).
+ Action OnSendRtcp(const uint8_t* packet, size_t length) override {
+ MutexLock lock(&mutex_);
+ test::RtcpPacketParser parser;
+ EXPECT_TRUE(parser.Parse(packet, length));
+
+ if (parser.sender_ssrc() == test::CallTest::kVideoSendSsrcs[1] &&
+ enable_zero_target_bitrate_) {
+ // Reduce bandwidth restriction to disable second stream after it was
+ // enabled for some time.
+ forward_transport_config_.link_capacity_kbps = 200;
+ send_simulated_network_->SetConfig(forward_transport_config_);
+ }
+
+ sent_rtcp_sr_ += parser.sender_report()->num_packets();
+ EXPECT_LE(parser.xr()->num_packets(), 1);
+ if (parser.xr()->num_packets() > 0) {
+ EXPECT_FALSE(parser.xr()->rrtr());
+ if (parser.xr()->dlrr())
+ ++sent_rtcp_dlrr_;
+ if (parser.xr()->target_bitrate()) {
+ sent_rtcp_target_bitrate_ = true;
+ auto target_bitrates =
+ parser.xr()->target_bitrate()->GetTargetBitrates();
+ if (target_bitrates.empty()) {
+ sent_zero_rtcp_target_bitrate_ = true;
+ }
+ for (const rtcp::TargetBitrate::BitrateItem& item : target_bitrates) {
+ if (item.target_bitrate_kbps == 0) {
+ sent_zero_rtcp_target_bitrate_ = true;
+ break;
+ }
+ }
+ }
+ }
+
+ if (sent_rtcp_sr_ > kNumRtcpReportPacketsToObserve &&
+ sent_rtcp_rr_ > kNumRtcpReportPacketsToObserve &&
+ (sent_rtcp_target_bitrate_ || !expect_target_bitrate_) &&
+ (sent_zero_rtcp_target_bitrate_ || !enable_zero_target_bitrate_)) {
+ if (enable_rrtr_) {
+ EXPECT_GT(sent_rtcp_rrtr_, 0);
+ EXPECT_GT(sent_rtcp_dlrr_, 0);
+ } else {
+ EXPECT_EQ(sent_rtcp_rrtr_, 0);
+ EXPECT_EQ(sent_rtcp_dlrr_, 0);
+ }
+ EXPECT_EQ(expect_target_bitrate_, sent_rtcp_target_bitrate_);
+ EXPECT_EQ(enable_zero_target_bitrate_, sent_zero_rtcp_target_bitrate_);
+ observation_complete_.Set();
+ }
+ return SEND_PACKET;
+ }
+
+ size_t GetNumVideoStreams() const override {
+ // When sending a zero target bitrate, we use two spatial layers so that
+ // we'll still have a layer with non-zero bitrate.
+ return enable_zero_target_bitrate_ ? 2 : 1;
+ }
+
+ BuiltInNetworkBehaviorConfig GetSendTransportConfig() const override {
+ return forward_transport_config_;
+ }
+
+ void OnTransportCreated(
+ test::PacketTransport* to_receiver,
+ SimulatedNetworkInterface* sender_network,
+ test::PacketTransport* to_sender,
+ SimulatedNetworkInterface* receiver_network) override {
+ send_simulated_network_ = sender_network;
+ }
+
+ void ModifyVideoConfigs(
+ VideoSendStream::Config* send_config,
+ std::vector<VideoReceiveStreamInterface::Config>* receive_configs,
+ VideoEncoderConfig* encoder_config) override {
+ if (enable_zero_target_bitrate_) {
+ // Configure VP8 to be able to use simulcast.
+ send_config->rtp.payload_name = "VP8";
+ encoder_config->codec_type = kVideoCodecVP8;
+ (*receive_configs)[0].decoders.resize(1);
+ (*receive_configs)[0].decoders[0].payload_type =
+ send_config->rtp.payload_type;
+ (*receive_configs)[0].decoders[0].video_format =
+ SdpVideoFormat(send_config->rtp.payload_name);
+ }
+ encoder_config->content_type = content_type_;
+ (*receive_configs)[0].rtp.rtcp_mode = RtcpMode::kReducedSize;
+ (*receive_configs)[0].rtp.rtcp_xr.receiver_reference_time_report =
+ enable_rrtr_;
+ }
+
+ void PerformTest() override {
+ EXPECT_TRUE(Wait())
+ << "Timed out while waiting for RTCP SR/RR packets to be sent.";
+ }
+
+ static const int kNumRtcpReportPacketsToObserve = 5;
+
+ Mutex mutex_;
+ const bool enable_rrtr_;
+ const bool expect_target_bitrate_;
+ const bool enable_zero_target_bitrate_;
+ const VideoEncoderConfig::ContentType content_type_;
+ int sent_rtcp_sr_;
+ int sent_rtcp_rr_ RTC_GUARDED_BY(&mutex_);
+ int sent_rtcp_rrtr_ RTC_GUARDED_BY(&mutex_);
+ bool sent_rtcp_target_bitrate_ RTC_GUARDED_BY(&mutex_);
+ bool sent_zero_rtcp_target_bitrate_ RTC_GUARDED_BY(&mutex_);
+ int sent_rtcp_dlrr_;
+ BuiltInNetworkBehaviorConfig forward_transport_config_;
+ SimulatedNetworkInterface* send_simulated_network_ = nullptr;
+};
+
+TEST_F(ExtendedReportsEndToEndTest,
+ TestExtendedReportsWithRrtrWithoutTargetBitrate) {
+ RtcpXrObserver test(/*enable_rrtr=*/true, /*expect_target_bitrate=*/false,
+ /*enable_zero_target_bitrate=*/false,
+ VideoEncoderConfig::ContentType::kRealtimeVideo);
+ RunBaseTest(&test);
+}
+
+TEST_F(ExtendedReportsEndToEndTest,
+ TestExtendedReportsWithoutRrtrWithoutTargetBitrate) {
+ RtcpXrObserver test(/*enable_rrtr=*/false, /*expect_target_bitrate=*/false,
+ /*enable_zero_target_bitrate=*/false,
+ VideoEncoderConfig::ContentType::kRealtimeVideo);
+ RunBaseTest(&test);
+}
+
+TEST_F(ExtendedReportsEndToEndTest,
+ TestExtendedReportsWithRrtrWithTargetBitrate) {
+ RtcpXrObserver test(/*enable_rrtr=*/true, /*expect_target_bitrate=*/true,
+ /*enable_zero_target_bitrate=*/false,
+ VideoEncoderConfig::ContentType::kScreen);
+ RunBaseTest(&test);
+}
+
+TEST_F(ExtendedReportsEndToEndTest,
+ TestExtendedReportsWithoutRrtrWithTargetBitrate) {
+ RtcpXrObserver test(/*enable_rrtr=*/false, /*expect_target_bitrate=*/true,
+ /*enable_zero_target_bitrate=*/false,
+ VideoEncoderConfig::ContentType::kScreen);
+ RunBaseTest(&test);
+}
+
+TEST_F(ExtendedReportsEndToEndTest,
+ TestExtendedReportsWithoutRrtrWithTargetBitrateExplicitlySet) {
+ test::ScopedKeyValueConfig field_trials(
+ field_trials_, "WebRTC-Target-Bitrate-Rtcp/Enabled/");
+ RtcpXrObserver test(/*enable_rrtr=*/false, /*expect_target_bitrate=*/true,
+ /*enable_zero_target_bitrate=*/false,
+ VideoEncoderConfig::ContentType::kRealtimeVideo);
+ RunBaseTest(&test);
+}
+
+TEST_F(ExtendedReportsEndToEndTest,
+ TestExtendedReportsCanSignalZeroTargetBitrate) {
+ RtcpXrObserver test(/*enable_rrtr=*/false, /*expect_target_bitrate=*/true,
+ /*enable_zero_target_bitrate=*/true,
+ VideoEncoderConfig::ContentType::kScreen);
+ RunBaseTest(&test);
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/video/end_to_end_tests/fec_tests.cc b/third_party/libwebrtc/video/end_to_end_tests/fec_tests.cc
new file mode 100644
index 0000000000..bf3ad0b22d
--- /dev/null
+++ b/third_party/libwebrtc/video/end_to_end_tests/fec_tests.cc
@@ -0,0 +1,502 @@
+/*
+ * Copyright 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <memory>
+
+#include "api/task_queue/task_queue_base.h"
+#include "api/test/simulated_network.h"
+#include "api/test/video/function_video_encoder_factory.h"
+#include "call/fake_network_pipe.h"
+#include "call/simulated_network.h"
+#include "media/engine/internal_decoder_factory.h"
+#include "modules/include/module_common_types_public.h"
+#include "modules/rtp_rtcp/source/byte_io.h"
+#include "modules/rtp_rtcp/source/rtp_packet.h"
+#include "modules/video_coding/codecs/vp8/include/vp8.h"
+#include "rtc_base/synchronization/mutex.h"
+#include "test/call_test.h"
+#include "test/field_trial.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+#include "test/rtcp_packet_parser.h"
+
+using ::testing::Contains;
+using ::testing::Not;
+
+namespace webrtc {
+namespace {
+enum : int { // The first valid value is 1.
+ kTransportSequenceNumberExtensionId = 1,
+ kVideoRotationExtensionId,
+};
+} // namespace
+
+class FecEndToEndTest : public test::CallTest {
+ public:
+ FecEndToEndTest() {
+ RegisterRtpExtension(RtpExtension(RtpExtension::kTransportSequenceNumberUri,
+ kTransportSequenceNumberExtensionId));
+ RegisterRtpExtension(RtpExtension(RtpExtension::kVideoRotationUri,
+ kVideoRotationExtensionId));
+ }
+};
+
+TEST_F(FecEndToEndTest, ReceivesUlpfec) {
+ class UlpfecRenderObserver : public test::EndToEndTest,
+ public rtc::VideoSinkInterface<VideoFrame> {
+ public:
+ UlpfecRenderObserver()
+ : EndToEndTest(kDefaultTimeout),
+ encoder_factory_([]() { return VP8Encoder::Create(); }),
+ random_(0xcafef00d1),
+ num_packets_sent_(0) {}
+
+ private:
+ Action OnSendRtp(const uint8_t* packet, size_t length) override {
+ MutexLock lock(&mutex_);
+ RtpPacket rtp_packet;
+ EXPECT_TRUE(rtp_packet.Parse(packet, length));
+
+ EXPECT_TRUE(rtp_packet.PayloadType() == kVideoSendPayloadType ||
+ rtp_packet.PayloadType() == kRedPayloadType)
+ << "Unknown payload type received.";
+ EXPECT_EQ(kVideoSendSsrcs[0], rtp_packet.Ssrc())
+ << "Unknown SSRC received.";
+
+ // Parse RED header.
+ int encapsulated_payload_type = -1;
+ if (rtp_packet.PayloadType() == kRedPayloadType) {
+ encapsulated_payload_type = rtp_packet.payload()[0];
+
+ EXPECT_TRUE(encapsulated_payload_type == kVideoSendPayloadType ||
+ encapsulated_payload_type == kUlpfecPayloadType)
+ << "Unknown encapsulated payload type received.";
+ }
+
+ // To minimize test flakiness, always let ULPFEC packets through.
+ if (encapsulated_payload_type == kUlpfecPayloadType) {
+ return SEND_PACKET;
+ }
+
+ // Simulate 5% video packet loss after rampup period. Record the
+ // corresponding timestamps that were dropped.
+ if (num_packets_sent_++ > 100 && random_.Rand(1, 100) <= 5) {
+ if (encapsulated_payload_type == kVideoSendPayloadType) {
+ dropped_sequence_numbers_.insert(rtp_packet.SequenceNumber());
+ dropped_timestamps_.insert(rtp_packet.Timestamp());
+ }
+ return DROP_PACKET;
+ }
+
+ return SEND_PACKET;
+ }
+
+ void OnFrame(const VideoFrame& video_frame) override {
+ MutexLock lock(&mutex_);
+ // Rendering frame with timestamp of packet that was dropped -> FEC
+ // protection worked.
+ auto it = dropped_timestamps_.find(video_frame.timestamp());
+ if (it != dropped_timestamps_.end()) {
+ observation_complete_.Set();
+ }
+ }
+
+ void ModifyVideoConfigs(
+ VideoSendStream::Config* send_config,
+ std::vector<VideoReceiveStreamInterface::Config>* receive_configs,
+ VideoEncoderConfig* encoder_config) override {
+ // Use VP8 instead of FAKE, since the latter does not have PictureID
+ // in the packetization headers.
+ send_config->encoder_settings.encoder_factory = &encoder_factory_;
+ send_config->rtp.payload_name = "VP8";
+ send_config->rtp.payload_type = kVideoSendPayloadType;
+ encoder_config->codec_type = kVideoCodecVP8;
+ VideoReceiveStreamInterface::Decoder decoder =
+ test::CreateMatchingDecoder(*send_config);
+ (*receive_configs)[0].decoder_factory = &decoder_factory_;
+ (*receive_configs)[0].decoders.clear();
+ (*receive_configs)[0].decoders.push_back(decoder);
+
+ // Enable ULPFEC over RED.
+ send_config->rtp.ulpfec.red_payload_type = kRedPayloadType;
+ send_config->rtp.ulpfec.ulpfec_payload_type = kUlpfecPayloadType;
+ (*receive_configs)[0].rtp.red_payload_type = kRedPayloadType;
+ (*receive_configs)[0].rtp.ulpfec_payload_type = kUlpfecPayloadType;
+
+ (*receive_configs)[0].renderer = this;
+ }
+
+ void PerformTest() override {
+ EXPECT_TRUE(Wait())
+ << "Timed out waiting for dropped frames to be rendered.";
+ }
+
+ Mutex mutex_;
+ std::unique_ptr<VideoEncoder> encoder_;
+ test::FunctionVideoEncoderFactory encoder_factory_;
+ InternalDecoderFactory decoder_factory_;
+ std::set<uint32_t> dropped_sequence_numbers_ RTC_GUARDED_BY(mutex_);
+ // Several packets can have the same timestamp.
+ std::multiset<uint32_t> dropped_timestamps_ RTC_GUARDED_BY(mutex_);
+ Random random_;
+ int num_packets_sent_ RTC_GUARDED_BY(mutex_);
+ } test;
+
+ RunBaseTest(&test);
+}
+
+class FlexfecRenderObserver : public test::EndToEndTest,
+ public rtc::VideoSinkInterface<VideoFrame> {
+ public:
+ static constexpr uint32_t kVideoLocalSsrc = 123;
+ static constexpr uint32_t kFlexfecLocalSsrc = 456;
+
+ explicit FlexfecRenderObserver(bool enable_nack, bool expect_flexfec_rtcp)
+ : test::EndToEndTest(test::CallTest::kLongTimeout),
+ enable_nack_(enable_nack),
+ expect_flexfec_rtcp_(expect_flexfec_rtcp),
+ received_flexfec_rtcp_(false),
+ random_(0xcafef00d1),
+ num_packets_sent_(0) {}
+
+ size_t GetNumFlexfecStreams() const override { return 1; }
+
+ private:
+ Action OnSendRtp(const uint8_t* packet, size_t length) override {
+ MutexLock lock(&mutex_);
+ RtpPacket rtp_packet;
+ EXPECT_TRUE(rtp_packet.Parse(packet, length));
+
+ EXPECT_TRUE(
+ rtp_packet.PayloadType() == test::CallTest::kFakeVideoSendPayloadType ||
+ rtp_packet.PayloadType() == test::CallTest::kFlexfecPayloadType ||
+ (enable_nack_ &&
+ rtp_packet.PayloadType() == test::CallTest::kSendRtxPayloadType))
+ << "Unknown payload type received.";
+ EXPECT_TRUE(
+ rtp_packet.Ssrc() == test::CallTest::kVideoSendSsrcs[0] ||
+ rtp_packet.Ssrc() == test::CallTest::kFlexfecSendSsrc ||
+ (enable_nack_ && rtp_packet.Ssrc() == test::CallTest::kSendRtxSsrcs[0]))
+ << "Unknown SSRC received.";
+
+ // To reduce test flakiness, always let FlexFEC packets through.
+ if (rtp_packet.PayloadType() == test::CallTest::kFlexfecPayloadType) {
+ EXPECT_EQ(test::CallTest::kFlexfecSendSsrc, rtp_packet.Ssrc());
+
+ return SEND_PACKET;
+ }
+
+ // To reduce test flakiness, always let RTX packets through.
+ if (rtp_packet.PayloadType() == test::CallTest::kSendRtxPayloadType) {
+ EXPECT_EQ(test::CallTest::kSendRtxSsrcs[0], rtp_packet.Ssrc());
+
+ if (rtp_packet.payload_size() == 0) {
+ // Pure padding packet.
+ return SEND_PACKET;
+ }
+
+ // Parse RTX header.
+ uint16_t original_sequence_number =
+ ByteReader<uint16_t>::ReadBigEndian(rtp_packet.payload().data());
+
+ // From the perspective of FEC, a retransmitted packet is no longer
+ // dropped, so remove it from list of dropped packets.
+ auto seq_num_it =
+ dropped_sequence_numbers_.find(original_sequence_number);
+ if (seq_num_it != dropped_sequence_numbers_.end()) {
+ dropped_sequence_numbers_.erase(seq_num_it);
+ auto ts_it = dropped_timestamps_.find(rtp_packet.Timestamp());
+ EXPECT_NE(ts_it, dropped_timestamps_.end());
+ dropped_timestamps_.erase(ts_it);
+ }
+
+ return SEND_PACKET;
+ }
+
+ // Simulate 5% video packet loss after rampup period. Record the
+ // corresponding timestamps that were dropped.
+ if (num_packets_sent_++ > 100 && random_.Rand(1, 100) <= 5) {
+ EXPECT_EQ(test::CallTest::kFakeVideoSendPayloadType,
+ rtp_packet.PayloadType());
+ EXPECT_EQ(test::CallTest::kVideoSendSsrcs[0], rtp_packet.Ssrc());
+
+ dropped_sequence_numbers_.insert(rtp_packet.SequenceNumber());
+ dropped_timestamps_.insert(rtp_packet.Timestamp());
+
+ return DROP_PACKET;
+ }
+
+ return SEND_PACKET;
+ }
+
+ Action OnReceiveRtcp(const uint8_t* data, size_t length) override {
+ test::RtcpPacketParser parser;
+
+ parser.Parse(data, length);
+ if (parser.sender_ssrc() == kFlexfecLocalSsrc) {
+ EXPECT_EQ(1, parser.receiver_report()->num_packets());
+ const std::vector<rtcp::ReportBlock>& report_blocks =
+ parser.receiver_report()->report_blocks();
+ if (!report_blocks.empty()) {
+ EXPECT_EQ(1U, report_blocks.size());
+ EXPECT_EQ(test::CallTest::kFlexfecSendSsrc,
+ report_blocks[0].source_ssrc());
+ MutexLock lock(&mutex_);
+ received_flexfec_rtcp_ = true;
+ }
+ }
+
+ return SEND_PACKET;
+ }
+
+ BuiltInNetworkBehaviorConfig GetSendTransportConfig() const override {
+ // At low RTT (< kLowRttNackMs) -> NACK only, no FEC.
+ const int kNetworkDelayMs = 100;
+ BuiltInNetworkBehaviorConfig config;
+ config.queue_delay_ms = kNetworkDelayMs;
+ return config;
+ }
+
+ void OnFrame(const VideoFrame& video_frame) override {
+ EXPECT_EQ(kVideoRotation_90, video_frame.rotation());
+
+ MutexLock lock(&mutex_);
+ // Rendering frame with timestamp of packet that was dropped -> FEC
+ // protection worked.
+ auto it = dropped_timestamps_.find(video_frame.timestamp());
+ if (it != dropped_timestamps_.end()) {
+ if (!expect_flexfec_rtcp_ || received_flexfec_rtcp_) {
+ observation_complete_.Set();
+ }
+ }
+ }
+
+ void ModifyVideoConfigs(
+ VideoSendStream::Config* send_config,
+ std::vector<VideoReceiveStreamInterface::Config>* receive_configs,
+ VideoEncoderConfig* encoder_config) override {
+ (*receive_configs)[0].rtp.local_ssrc = kVideoLocalSsrc;
+ (*receive_configs)[0].renderer = this;
+
+ if (enable_nack_) {
+ send_config->rtp.nack.rtp_history_ms = test::CallTest::kNackRtpHistoryMs;
+ send_config->rtp.rtx.ssrcs.push_back(test::CallTest::kSendRtxSsrcs[0]);
+ send_config->rtp.rtx.payload_type = test::CallTest::kSendRtxPayloadType;
+
+ (*receive_configs)[0].rtp.nack.rtp_history_ms =
+ test::CallTest::kNackRtpHistoryMs;
+ (*receive_configs)[0].rtp.rtx_ssrc = test::CallTest::kSendRtxSsrcs[0];
+ (*receive_configs)[0]
+ .rtp
+ .rtx_associated_payload_types[test::CallTest::kSendRtxPayloadType] =
+ test::CallTest::kVideoSendPayloadType;
+ }
+ }
+
+ void OnFrameGeneratorCapturerCreated(
+ test::FrameGeneratorCapturer* frame_generator_capturer) override {
+ frame_generator_capturer->SetFakeRotation(kVideoRotation_90);
+ }
+
+ void ModifyFlexfecConfigs(
+ std::vector<FlexfecReceiveStream::Config>* receive_configs) override {
+ (*receive_configs)[0].rtp.local_ssrc = kFlexfecLocalSsrc;
+ }
+
+ void PerformTest() override {
+ EXPECT_TRUE(Wait())
+ << "Timed out waiting for dropped frames to be rendered.";
+ }
+
+ Mutex mutex_;
+ std::set<uint32_t> dropped_sequence_numbers_ RTC_GUARDED_BY(mutex_);
+ // Several packets can have the same timestamp.
+ std::multiset<uint32_t> dropped_timestamps_ RTC_GUARDED_BY(mutex_);
+ const bool enable_nack_;
+ const bool expect_flexfec_rtcp_;
+ bool received_flexfec_rtcp_ RTC_GUARDED_BY(mutex_);
+ Random random_;
+ int num_packets_sent_;
+};
+
+TEST_F(FecEndToEndTest, RecoversWithFlexfec) {
+ FlexfecRenderObserver test(false, false);
+ RunBaseTest(&test);
+}
+
+TEST_F(FecEndToEndTest, RecoversWithFlexfecAndNack) {
+ FlexfecRenderObserver test(true, false);
+ RunBaseTest(&test);
+}
+
+TEST_F(FecEndToEndTest, RecoversWithFlexfecAndSendsCorrespondingRtcp) {
+ FlexfecRenderObserver test(false, true);
+ RunBaseTest(&test);
+}
+
+TEST_F(FecEndToEndTest, ReceivedUlpfecPacketsNotNacked) {
+ class UlpfecNackObserver : public test::EndToEndTest {
+ public:
+ UlpfecNackObserver()
+ : EndToEndTest(kDefaultTimeout),
+ state_(kFirstPacket),
+ ulpfec_sequence_number_(0),
+ has_last_sequence_number_(false),
+ last_sequence_number_(0),
+ encoder_factory_([]() { return VP8Encoder::Create(); }) {}
+
+ private:
+ Action OnSendRtp(const uint8_t* packet, size_t length) override {
+ MutexLock lock_(&mutex_);
+ RtpPacket rtp_packet;
+ EXPECT_TRUE(rtp_packet.Parse(packet, length));
+
+ int encapsulated_payload_type = -1;
+ if (rtp_packet.PayloadType() == kRedPayloadType) {
+ encapsulated_payload_type = rtp_packet.payload()[0];
+ if (encapsulated_payload_type != kFakeVideoSendPayloadType)
+ EXPECT_EQ(kUlpfecPayloadType, encapsulated_payload_type);
+ } else {
+ EXPECT_EQ(kFakeVideoSendPayloadType, rtp_packet.PayloadType());
+ }
+
+ if (has_last_sequence_number_ &&
+ !IsNewerSequenceNumber(rtp_packet.SequenceNumber(),
+ last_sequence_number_)) {
+ // Drop retransmitted packets.
+ return DROP_PACKET;
+ }
+ last_sequence_number_ = rtp_packet.SequenceNumber();
+ has_last_sequence_number_ = true;
+
+ bool ulpfec_packet = encapsulated_payload_type == kUlpfecPayloadType;
+ switch (state_) {
+ case kFirstPacket:
+ state_ = kDropEveryOtherPacketUntilUlpfec;
+ break;
+ case kDropEveryOtherPacketUntilUlpfec:
+ if (ulpfec_packet) {
+ state_ = kDropAllMediaPacketsUntilUlpfec;
+ } else if (rtp_packet.SequenceNumber() % 2 == 0) {
+ return DROP_PACKET;
+ }
+ break;
+ case kDropAllMediaPacketsUntilUlpfec:
+ if (!ulpfec_packet)
+ return DROP_PACKET;
+ ulpfec_sequence_number_ = rtp_packet.SequenceNumber();
+ state_ = kDropOneMediaPacket;
+ break;
+ case kDropOneMediaPacket:
+ if (ulpfec_packet)
+ return DROP_PACKET;
+ state_ = kPassOneMediaPacket;
+ return DROP_PACKET;
+ case kPassOneMediaPacket:
+ if (ulpfec_packet)
+ return DROP_PACKET;
+ // Pass one media packet after dropped packet after last FEC,
+ // otherwise receiver might never see a seq_no after
+ // `ulpfec_sequence_number_`
+ state_ = kVerifyUlpfecPacketNotInNackList;
+ break;
+ case kVerifyUlpfecPacketNotInNackList:
+ // Continue to drop packets. Make sure no frame can be decoded.
+ if (ulpfec_packet || rtp_packet.SequenceNumber() % 2 == 0)
+ return DROP_PACKET;
+ break;
+ }
+ return SEND_PACKET;
+ }
+
+ Action OnReceiveRtcp(const uint8_t* packet, size_t length) override {
+ MutexLock lock_(&mutex_);
+ if (state_ == kVerifyUlpfecPacketNotInNackList) {
+ test::RtcpPacketParser rtcp_parser;
+ rtcp_parser.Parse(packet, length);
+ const std::vector<uint16_t>& nacks = rtcp_parser.nack()->packet_ids();
+ EXPECT_THAT(nacks, Not(Contains(ulpfec_sequence_number_)))
+ << "Got nack for ULPFEC packet";
+ if (!nacks.empty() &&
+ IsNewerSequenceNumber(nacks.back(), ulpfec_sequence_number_)) {
+ observation_complete_.Set();
+ }
+ }
+ return SEND_PACKET;
+ }
+
+ BuiltInNetworkBehaviorConfig GetSendTransportConfig() const override {
+ // At low RTT (< kLowRttNackMs) -> NACK only, no FEC.
+ // Configure some network delay.
+ const int kNetworkDelayMs = 50;
+ BuiltInNetworkBehaviorConfig config;
+ config.queue_delay_ms = kNetworkDelayMs;
+ return config;
+ }
+
+ // TODO(holmer): Investigate why we don't send FEC packets when the bitrate
+ // is 10 kbps.
+ void ModifySenderBitrateConfig(
+ BitrateConstraints* bitrate_config) override {
+ const int kMinBitrateBps = 30000;
+ bitrate_config->min_bitrate_bps = kMinBitrateBps;
+ }
+
+ void ModifyVideoConfigs(
+ VideoSendStream::Config* send_config,
+ std::vector<VideoReceiveStreamInterface::Config>* receive_configs,
+ VideoEncoderConfig* encoder_config) override {
+ // Configure hybrid NACK/FEC.
+ send_config->rtp.nack.rtp_history_ms = kNackRtpHistoryMs;
+ send_config->rtp.ulpfec.red_payload_type = kRedPayloadType;
+ send_config->rtp.ulpfec.ulpfec_payload_type = kUlpfecPayloadType;
+ // Set codec to VP8, otherwise NACK/FEC hybrid will be disabled.
+ send_config->encoder_settings.encoder_factory = &encoder_factory_;
+ send_config->rtp.payload_name = "VP8";
+ send_config->rtp.payload_type = kFakeVideoSendPayloadType;
+ encoder_config->codec_type = kVideoCodecVP8;
+
+ (*receive_configs)[0].rtp.nack.rtp_history_ms = kNackRtpHistoryMs;
+ (*receive_configs)[0].rtp.red_payload_type = kRedPayloadType;
+ (*receive_configs)[0].rtp.ulpfec_payload_type = kUlpfecPayloadType;
+
+ (*receive_configs)[0].decoders.resize(1);
+ (*receive_configs)[0].decoders[0].payload_type =
+ send_config->rtp.payload_type;
+ (*receive_configs)[0].decoders[0].video_format =
+ SdpVideoFormat(send_config->rtp.payload_name);
+ (*receive_configs)[0].decoder_factory = &decoder_factory_;
+ }
+
+ void PerformTest() override {
+ EXPECT_TRUE(Wait())
+ << "Timed out while waiting for FEC packets to be received.";
+ }
+
+ enum {
+ kFirstPacket,
+ kDropEveryOtherPacketUntilUlpfec,
+ kDropAllMediaPacketsUntilUlpfec,
+ kDropOneMediaPacket,
+ kPassOneMediaPacket,
+ kVerifyUlpfecPacketNotInNackList,
+ } state_;
+
+ Mutex mutex_;
+ uint16_t ulpfec_sequence_number_ RTC_GUARDED_BY(&mutex_);
+ bool has_last_sequence_number_;
+ uint16_t last_sequence_number_;
+ test::FunctionVideoEncoderFactory encoder_factory_;
+ InternalDecoderFactory decoder_factory_;
+ } test;
+
+ RunBaseTest(&test);
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/video/end_to_end_tests/frame_encryption_tests.cc b/third_party/libwebrtc/video/end_to_end_tests/frame_encryption_tests.cc
new file mode 100644
index 0000000000..6a1b16927c
--- /dev/null
+++ b/third_party/libwebrtc/video/end_to_end_tests/frame_encryption_tests.cc
@@ -0,0 +1,91 @@
+/*
+ * Copyright 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "api/test/fake_frame_decryptor.h"
+#include "api/test/fake_frame_encryptor.h"
+#include "media/engine/internal_decoder_factory.h"
+#include "modules/rtp_rtcp/source/rtp_dependency_descriptor_extension.h"
+#include "modules/video_coding/codecs/vp8/include/vp8.h"
+#include "test/call_test.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+
+using FrameEncryptionEndToEndTest = test::CallTest;
+
+enum : int { // The first valid value is 1.
+ kGenericDescriptorExtensionId = 1,
+};
+
+class DecryptedFrameObserver : public test::EndToEndTest,
+ public rtc::VideoSinkInterface<VideoFrame> {
+ public:
+ DecryptedFrameObserver()
+ : EndToEndTest(test::CallTest::kDefaultTimeout),
+ encoder_factory_([] { return VP8Encoder::Create(); }) {}
+
+ private:
+ void ModifyVideoConfigs(
+ VideoSendStream::Config* send_config,
+ std::vector<VideoReceiveStreamInterface::Config>* receive_configs,
+ VideoEncoderConfig* encoder_config) override {
+ // Use VP8 instead of FAKE.
+ send_config->encoder_settings.encoder_factory = &encoder_factory_;
+ send_config->rtp.payload_name = "VP8";
+ send_config->rtp.payload_type = test::CallTest::kVideoSendPayloadType;
+ send_config->frame_encryptor = new FakeFrameEncryptor();
+ send_config->crypto_options.sframe.require_frame_encryption = true;
+ encoder_config->codec_type = kVideoCodecVP8;
+ VideoReceiveStreamInterface::Decoder decoder =
+ test::CreateMatchingDecoder(*send_config);
+ for (auto& recv_config : *receive_configs) {
+ recv_config.decoder_factory = &decoder_factory_;
+ recv_config.decoders.clear();
+ recv_config.decoders.push_back(decoder);
+ recv_config.renderer = this;
+ recv_config.frame_decryptor = rtc::make_ref_counted<FakeFrameDecryptor>();
+ recv_config.crypto_options.sframe.require_frame_encryption = true;
+ }
+ }
+
+ void OnFrame(const VideoFrame& video_frame) override {
+ observation_complete_.Set();
+ }
+
+ void PerformTest() override {
+ EXPECT_TRUE(Wait())
+ << "Timed out waiting for decrypted frames to be rendered.";
+ }
+
+ std::unique_ptr<VideoEncoder> encoder_;
+ test::FunctionVideoEncoderFactory encoder_factory_;
+ InternalDecoderFactory decoder_factory_;
+};
+
+// Validates that payloads cannot be sent without a frame encryptor and frame
+// decryptor attached.
+TEST_F(FrameEncryptionEndToEndTest,
+ WithGenericFrameDescriptorRequireFrameEncryptionEnforced) {
+ RegisterRtpExtension(RtpExtension(RtpExtension::kGenericFrameDescriptorUri00,
+ kGenericDescriptorExtensionId));
+ DecryptedFrameObserver test;
+ RunBaseTest(&test);
+}
+
+TEST_F(FrameEncryptionEndToEndTest,
+ WithDependencyDescriptorRequireFrameEncryptionEnforced) {
+ RegisterRtpExtension(RtpExtension(RtpExtension::kDependencyDescriptorUri,
+ kGenericDescriptorExtensionId));
+ DecryptedFrameObserver test;
+ RunBaseTest(&test);
+}
+} // namespace
+} // namespace webrtc
diff --git a/third_party/libwebrtc/video/end_to_end_tests/histogram_tests.cc b/third_party/libwebrtc/video/end_to_end_tests/histogram_tests.cc
new file mode 100644
index 0000000000..03e32ffba8
--- /dev/null
+++ b/third_party/libwebrtc/video/end_to_end_tests/histogram_tests.cc
@@ -0,0 +1,317 @@
+/*
+ * Copyright 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "absl/types/optional.h"
+#include "api/test/video/function_video_encoder_factory.h"
+#include "modules/video_coding/codecs/vp8/include/vp8.h"
+#include "rtc_base/synchronization/mutex.h"
+#include "system_wrappers/include/metrics.h"
+#include "test/call_test.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+enum : int { // The first valid value is 1.
+ kTransportSequenceNumberExtensionId = 1,
+ kVideoContentTypeExtensionId,
+};
+} // namespace
+
+class HistogramTest : public test::CallTest {
+ public:
+ HistogramTest() {
+ RegisterRtpExtension(RtpExtension(RtpExtension::kTransportSequenceNumberUri,
+ kTransportSequenceNumberExtensionId));
+ RegisterRtpExtension(RtpExtension(RtpExtension::kVideoContentTypeUri,
+ kVideoContentTypeExtensionId));
+ }
+
+ protected:
+ void VerifyHistogramStats(bool use_rtx, bool use_fec, bool screenshare);
+};
+
+void HistogramTest::VerifyHistogramStats(bool use_rtx,
+ bool use_fec,
+ bool screenshare) {
+ class FrameObserver : public test::EndToEndTest,
+ public rtc::VideoSinkInterface<VideoFrame> {
+ public:
+ FrameObserver(bool use_rtx, bool use_fec, bool screenshare)
+ : EndToEndTest(kLongTimeout),
+ use_rtx_(use_rtx),
+ use_fec_(use_fec),
+ screenshare_(screenshare),
+ // This test uses NACK, so to send FEC we can't use a fake encoder.
+ encoder_factory_([]() { return VP8Encoder::Create(); }),
+ num_frames_received_(0) {}
+
+ private:
+ void OnFrame(const VideoFrame& video_frame) override {
+ // The RTT is needed to estimate `ntp_time_ms` which is used by
+ // end-to-end delay stats. Therefore, start counting received frames once
+ // `ntp_time_ms` is valid.
+ if (video_frame.ntp_time_ms() > 0 &&
+ Clock::GetRealTimeClock()->CurrentNtpInMilliseconds() >=
+ video_frame.ntp_time_ms()) {
+ MutexLock lock(&mutex_);
+ ++num_frames_received_;
+ }
+ }
+
+ Action OnSendRtp(const uint8_t* packet, size_t length) override {
+ if (MinMetricRunTimePassed() && MinNumberOfFramesReceived())
+ observation_complete_.Set();
+
+ return SEND_PACKET;
+ }
+
+ bool MinMetricRunTimePassed() {
+ int64_t now_ms = Clock::GetRealTimeClock()->TimeInMilliseconds();
+ if (!start_runtime_ms_)
+ start_runtime_ms_ = now_ms;
+
+ int64_t elapsed_sec = (now_ms - *start_runtime_ms_) / 1000;
+ return elapsed_sec > metrics::kMinRunTimeInSeconds * 2;
+ }
+
+ bool MinNumberOfFramesReceived() const {
+ const int kMinRequiredHistogramSamples = 200;
+ MutexLock lock(&mutex_);
+ return num_frames_received_ > kMinRequiredHistogramSamples;
+ }
+
+ void ModifyVideoConfigs(
+ VideoSendStream::Config* send_config,
+ std::vector<VideoReceiveStreamInterface::Config>* receive_configs,
+ VideoEncoderConfig* encoder_config) override {
+ // NACK
+ send_config->rtp.nack.rtp_history_ms = kNackRtpHistoryMs;
+ (*receive_configs)[0].rtp.nack.rtp_history_ms = kNackRtpHistoryMs;
+ (*receive_configs)[0].renderer = this;
+ // FEC
+ if (use_fec_) {
+ send_config->rtp.ulpfec.ulpfec_payload_type = kUlpfecPayloadType;
+ send_config->rtp.ulpfec.red_payload_type = kRedPayloadType;
+ send_config->encoder_settings.encoder_factory = &encoder_factory_;
+ send_config->rtp.payload_name = "VP8";
+ encoder_config->codec_type = kVideoCodecVP8;
+ (*receive_configs)[0].decoders[0].video_format = SdpVideoFormat("VP8");
+ (*receive_configs)[0].rtp.red_payload_type = kRedPayloadType;
+ (*receive_configs)[0].rtp.ulpfec_payload_type = kUlpfecPayloadType;
+ }
+ // RTX
+ if (use_rtx_) {
+ send_config->rtp.rtx.ssrcs.push_back(kSendRtxSsrcs[0]);
+ send_config->rtp.rtx.payload_type = kSendRtxPayloadType;
+ (*receive_configs)[0].rtp.rtx_ssrc = kSendRtxSsrcs[0];
+ (*receive_configs)[0]
+ .rtp.rtx_associated_payload_types[kSendRtxPayloadType] =
+ kFakeVideoSendPayloadType;
+ if (use_fec_) {
+ send_config->rtp.ulpfec.red_rtx_payload_type = kRtxRedPayloadType;
+ (*receive_configs)[0]
+ .rtp.rtx_associated_payload_types[kRtxRedPayloadType] =
+ kSendRtxPayloadType;
+ }
+ }
+ // RTT needed for RemoteNtpTimeEstimator for the receive stream.
+ (*receive_configs)[0].rtp.rtcp_xr.receiver_reference_time_report = true;
+ encoder_config->content_type =
+ screenshare_ ? VideoEncoderConfig::ContentType::kScreen
+ : VideoEncoderConfig::ContentType::kRealtimeVideo;
+ }
+
+ void PerformTest() override {
+ EXPECT_TRUE(Wait()) << "Timed out waiting for min frames to be received.";
+ }
+
+ mutable Mutex mutex_;
+ const bool use_rtx_;
+ const bool use_fec_;
+ const bool screenshare_;
+ test::FunctionVideoEncoderFactory encoder_factory_;
+ absl::optional<int64_t> start_runtime_ms_;
+ int num_frames_received_ RTC_GUARDED_BY(&mutex_);
+ } test(use_rtx, use_fec, screenshare);
+
+ metrics::Reset();
+ RunBaseTest(&test);
+
+ const std::string video_prefix =
+ screenshare ? "WebRTC.Video.Screenshare." : "WebRTC.Video.";
+ // The content type extension is disabled in non screenshare test,
+ // therefore no slicing on simulcast id should be present.
+ const std::string video_suffix = screenshare ? ".S0" : "";
+
+ // Verify that stats have been updated once.
+ EXPECT_METRIC_EQ(2, metrics::NumSamples("WebRTC.Call.LifetimeInSeconds"));
+ EXPECT_METRIC_EQ(1, metrics::NumSamples(
+ "WebRTC.Call.TimeReceivingVideoRtpPacketsInSeconds"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples("WebRTC.Call.VideoBitrateReceivedInKbps"));
+ EXPECT_METRIC_EQ(1,
+ metrics::NumSamples("WebRTC.Call.RtcpBitrateReceivedInBps"));
+ EXPECT_METRIC_EQ(1, metrics::NumSamples("WebRTC.Call.BitrateReceivedInKbps"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples("WebRTC.Call.EstimatedSendBitrateInKbps"));
+ EXPECT_METRIC_EQ(1, metrics::NumSamples("WebRTC.Call.PacerBitrateInKbps"));
+
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.SendStreamLifetimeInSeconds"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.ReceiveStreamLifetimeInSeconds"));
+
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.NackPacketsSentPerMinute"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples(video_prefix + "NackPacketsReceivedPerMinute"));
+ EXPECT_METRIC_EQ(1,
+ metrics::NumSamples("WebRTC.Video.FirPacketsSentPerMinute"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples(video_prefix + "FirPacketsReceivedPerMinute"));
+ EXPECT_METRIC_EQ(1,
+ metrics::NumSamples("WebRTC.Video.PliPacketsSentPerMinute"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples(video_prefix + "PliPacketsReceivedPerMinute"));
+
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples(video_prefix + "KeyFramesSentInPermille"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.KeyFramesReceivedInPermille"));
+
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples(video_prefix + "SentPacketsLostInPercent"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.ReceivedPacketsLostInPercent"));
+
+ EXPECT_METRIC_EQ(1, metrics::NumSamples(video_prefix + "InputWidthInPixels"));
+ EXPECT_METRIC_EQ(1,
+ metrics::NumSamples(video_prefix + "InputHeightInPixels"));
+ EXPECT_METRIC_EQ(1, metrics::NumSamples(video_prefix + "SentWidthInPixels"));
+ EXPECT_METRIC_EQ(1, metrics::NumSamples(video_prefix + "SentHeightInPixels"));
+ EXPECT_METRIC_EQ(1,
+ metrics::NumSamples(video_prefix + "ReceivedWidthInPixels"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples(video_prefix + "ReceivedHeightInPixels"));
+
+ EXPECT_METRIC_EQ(1, metrics::NumEvents(video_prefix + "InputWidthInPixels",
+ kDefaultWidth));
+ EXPECT_METRIC_EQ(1, metrics::NumEvents(video_prefix + "InputHeightInPixels",
+ kDefaultHeight));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents(video_prefix + "SentWidthInPixels", kDefaultWidth));
+ EXPECT_METRIC_EQ(1, metrics::NumEvents(video_prefix + "SentHeightInPixels",
+ kDefaultHeight));
+ EXPECT_METRIC_EQ(1, metrics::NumEvents(video_prefix + "ReceivedWidthInPixels",
+ kDefaultWidth));
+ EXPECT_METRIC_EQ(1,
+ metrics::NumEvents(video_prefix + "ReceivedHeightInPixels",
+ kDefaultHeight));
+
+ EXPECT_METRIC_EQ(1,
+ metrics::NumSamples(video_prefix + "InputFramesPerSecond"));
+ EXPECT_METRIC_EQ(1,
+ metrics::NumSamples(video_prefix + "SentFramesPerSecond"));
+ EXPECT_METRIC_EQ(1,
+ metrics::NumSamples("WebRTC.Video.DecodedFramesPerSecond"));
+ EXPECT_METRIC_EQ(1,
+ metrics::NumSamples("WebRTC.Video.RenderFramesPerSecond"));
+ EXPECT_METRIC_EQ(1,
+ metrics::NumSamples("WebRTC.Video.DelayedFramesToRenderer"));
+
+ EXPECT_METRIC_EQ(1,
+ metrics::NumSamples("WebRTC.Video.JitterBufferDelayInMs"));
+ EXPECT_METRIC_EQ(1, metrics::NumSamples("WebRTC.Video.TargetDelayInMs"));
+ EXPECT_METRIC_EQ(1, metrics::NumSamples("WebRTC.Video.CurrentDelayInMs"));
+ EXPECT_METRIC_EQ(1, metrics::NumSamples("WebRTC.Video.OnewayDelayInMs"));
+
+ EXPECT_METRIC_EQ(1, metrics::NumSamples(video_prefix + "EndToEndDelayInMs" +
+ video_suffix));
+ EXPECT_METRIC_EQ(1,
+ metrics::NumSamples(video_prefix + "EndToEndDelayMaxInMs" +
+ video_suffix));
+ EXPECT_METRIC_EQ(1, metrics::NumSamples(video_prefix + "InterframeDelayInMs" +
+ video_suffix));
+ EXPECT_METRIC_EQ(1,
+ metrics::NumSamples(video_prefix + "InterframeDelayMaxInMs" +
+ video_suffix));
+
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.RenderSqrtPixelsPerSecond"));
+
+ EXPECT_METRIC_EQ(1, metrics::NumSamples(video_prefix + "EncodeTimeInMs"));
+ EXPECT_METRIC_EQ(1, metrics::NumSamples("WebRTC.Video.DecodeTimeInMs"));
+
+ EXPECT_METRIC_EQ(1,
+ metrics::NumSamples(video_prefix + "NumberOfPauseEvents"));
+ EXPECT_METRIC_EQ(1,
+ metrics::NumSamples(video_prefix + "PausedTimeInPercent"));
+
+ EXPECT_METRIC_EQ(1, metrics::NumSamples(video_prefix + "BitrateSentInKbps"));
+ EXPECT_METRIC_EQ(1,
+ metrics::NumSamples("WebRTC.Video.BitrateReceivedInKbps"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples(video_prefix + "MediaBitrateSentInKbps"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.MediaBitrateReceivedInKbps"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples(video_prefix + "PaddingBitrateSentInKbps"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.PaddingBitrateReceivedInKbps"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples(video_prefix + "RetransmittedBitrateSentInKbps"));
+ EXPECT_METRIC_EQ(1, metrics::NumSamples(
+ "WebRTC.Video.RetransmittedBitrateReceivedInKbps"));
+
+ EXPECT_METRIC_EQ(1, metrics::NumSamples("WebRTC.Video.SendDelayInMs"));
+ EXPECT_METRIC_EQ(1, metrics::NumSamples(video_prefix + "SendSideDelayInMs"));
+ EXPECT_METRIC_EQ(1,
+ metrics::NumSamples(video_prefix + "SendSideDelayMaxInMs"));
+
+ int num_rtx_samples = use_rtx ? 1 : 0;
+ EXPECT_METRIC_EQ(num_rtx_samples,
+ metrics::NumSamples("WebRTC.Video.RtxBitrateSentInKbps"));
+ EXPECT_METRIC_EQ(
+ num_rtx_samples,
+ metrics::NumSamples("WebRTC.Video.RtxBitrateReceivedInKbps"));
+
+ int num_red_samples = use_fec ? 1 : 0;
+ EXPECT_METRIC_EQ(num_red_samples,
+ metrics::NumSamples("WebRTC.Video.FecBitrateSentInKbps"));
+ EXPECT_METRIC_EQ(
+ num_red_samples,
+ metrics::NumSamples("WebRTC.Video.FecBitrateReceivedInKbps"));
+ EXPECT_METRIC_EQ(
+ num_red_samples,
+ metrics::NumSamples("WebRTC.Video.ReceivedFecPacketsInPercent"));
+}
+
+TEST_F(HistogramTest, VerifyStatsWithRtx) {
+ const bool kEnabledRtx = true;
+ const bool kEnabledRed = false;
+ const bool kScreenshare = false;
+ VerifyHistogramStats(kEnabledRtx, kEnabledRed, kScreenshare);
+}
+
+TEST_F(HistogramTest, VerifyStatsWithRed) {
+ const bool kEnabledRtx = false;
+ const bool kEnabledRed = true;
+ const bool kScreenshare = false;
+ VerifyHistogramStats(kEnabledRtx, kEnabledRed, kScreenshare);
+}
+
+TEST_F(HistogramTest, VerifyStatsWithScreenshare) {
+ const bool kEnabledRtx = false;
+ const bool kEnabledRed = false;
+ const bool kScreenshare = true;
+ VerifyHistogramStats(kEnabledRtx, kEnabledRed, kScreenshare);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/video/end_to_end_tests/multi_codec_receive_tests.cc b/third_party/libwebrtc/video/end_to_end_tests/multi_codec_receive_tests.cc
new file mode 100644
index 0000000000..d8ac606bfd
--- /dev/null
+++ b/third_party/libwebrtc/video/end_to_end_tests/multi_codec_receive_tests.cc
@@ -0,0 +1,291 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <memory>
+
+#include "api/test/simulated_network.h"
+#include "api/test/video/function_video_encoder_factory.h"
+#include "call/fake_network_pipe.h"
+#include "call/simulated_network.h"
+#include "modules/include/module_common_types_public.h"
+#include "modules/rtp_rtcp/source/rtp_packet.h"
+#include "modules/video_coding/codecs/h264/include/h264.h"
+#include "modules/video_coding/codecs/vp8/include/vp8.h"
+#include "modules/video_coding/codecs/vp9/include/vp9.h"
+#include "rtc_base/synchronization/mutex.h"
+#include "rtc_base/task_queue_for_test.h"
+#include "test/call_test.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+using ::testing::Contains;
+
+namespace webrtc {
+namespace {
+constexpr int kWidth = 1280;
+constexpr int kHeight = 720;
+constexpr int kFps = 30;
+constexpr int kFramesToObserve = 10;
+
+uint8_t PayloadNameToPayloadType(const std::string& payload_name) {
+ if (payload_name == "VP8") {
+ return test::CallTest::kPayloadTypeVP8;
+ } else if (payload_name == "VP9") {
+ return test::CallTest::kPayloadTypeVP9;
+ } else if (payload_name == "H264") {
+ return test::CallTest::kPayloadTypeH264;
+ } else {
+ RTC_DCHECK_NOTREACHED();
+ return 0;
+ }
+}
+
+int RemoveOlderOrEqual(uint32_t timestamp, std::vector<uint32_t>* timestamps) {
+ int num_removed = 0;
+ while (!timestamps->empty()) {
+ auto it = timestamps->begin();
+ if (IsNewerTimestamp(*it, timestamp))
+ break;
+
+ timestamps->erase(it);
+ ++num_removed;
+ }
+ return num_removed;
+}
+
+class FrameObserver : public test::RtpRtcpObserver,
+ public rtc::VideoSinkInterface<VideoFrame> {
+ public:
+ FrameObserver() : test::RtpRtcpObserver(test::CallTest::kDefaultTimeout) {}
+
+ void Reset(uint8_t expected_payload_type) {
+ MutexLock lock(&mutex_);
+ num_sent_frames_ = 0;
+ num_rendered_frames_ = 0;
+ expected_payload_type_ = expected_payload_type;
+ }
+
+ private:
+ // Sends kFramesToObserve.
+ Action OnSendRtp(const uint8_t* packet, size_t length) override {
+ MutexLock lock(&mutex_);
+
+ RtpPacket rtp_packet;
+ EXPECT_TRUE(rtp_packet.Parse(packet, length));
+ EXPECT_EQ(rtp_packet.Ssrc(), test::CallTest::kVideoSendSsrcs[0]);
+ if (rtp_packet.payload_size() == 0)
+ return SEND_PACKET; // Skip padding, may be sent after OnFrame is called.
+
+ if (expected_payload_type_ &&
+ rtp_packet.PayloadType() != expected_payload_type_.value()) {
+ return DROP_PACKET; // All frames sent.
+ }
+
+ if (!last_timestamp_ || rtp_packet.Timestamp() != *last_timestamp_) {
+ // New frame.
+ // Sent enough frames?
+ if (num_sent_frames_ >= kFramesToObserve)
+ return DROP_PACKET;
+
+ ++num_sent_frames_;
+ sent_timestamps_.push_back(rtp_packet.Timestamp());
+ }
+
+ last_timestamp_ = rtp_packet.Timestamp();
+ return SEND_PACKET;
+ }
+
+ // Verifies that all sent frames are decoded and rendered.
+ void OnFrame(const VideoFrame& rendered_frame) override {
+ MutexLock lock(&mutex_);
+ EXPECT_THAT(sent_timestamps_, Contains(rendered_frame.timestamp()));
+
+ // Remove old timestamps too, only the newest decoded frame is rendered.
+ num_rendered_frames_ +=
+ RemoveOlderOrEqual(rendered_frame.timestamp(), &sent_timestamps_);
+
+ if (num_rendered_frames_ >= kFramesToObserve) {
+ EXPECT_TRUE(sent_timestamps_.empty()) << "All sent frames not decoded.";
+ observation_complete_.Set();
+ }
+ }
+
+ Mutex mutex_;
+ absl::optional<uint32_t> last_timestamp_; // Only accessed from pacer thread.
+ absl::optional<uint8_t> expected_payload_type_ RTC_GUARDED_BY(mutex_);
+ int num_sent_frames_ RTC_GUARDED_BY(mutex_) = 0;
+ int num_rendered_frames_ RTC_GUARDED_BY(mutex_) = 0;
+ std::vector<uint32_t> sent_timestamps_ RTC_GUARDED_BY(mutex_);
+};
+} // namespace
+
+class MultiCodecReceiveTest : public test::CallTest {
+ public:
+ MultiCodecReceiveTest() {
+ SendTask(task_queue(), [this]() {
+ CreateCalls();
+ CreateSendTransport(BuiltInNetworkBehaviorConfig(), &observer_);
+ CreateReceiveTransport(BuiltInNetworkBehaviorConfig(), &observer_);
+ });
+ }
+
+ virtual ~MultiCodecReceiveTest() {
+ SendTask(task_queue(), [this]() {
+ send_transport_.reset();
+ receive_transport_.reset();
+ DestroyCalls();
+ });
+ }
+
+ struct CodecConfig {
+ std::string payload_name;
+ size_t num_temporal_layers;
+ };
+
+ void ConfigureEncoder(const CodecConfig& config,
+ VideoEncoderFactory* encoder_factory);
+ void ConfigureDecoders(const std::vector<CodecConfig>& configs,
+ VideoDecoderFactory* decoder_factory);
+ void RunTestWithCodecs(const std::vector<CodecConfig>& configs);
+
+ private:
+ FrameObserver observer_;
+};
+
+void MultiCodecReceiveTest::ConfigureDecoders(
+ const std::vector<CodecConfig>& configs,
+ VideoDecoderFactory* decoder_factory) {
+ video_receive_configs_[0].decoders.clear();
+ // Placing the payload names in a std::set retains the unique names only.
+ video_receive_configs_[0].decoder_factory = decoder_factory;
+ std::set<std::string> unique_payload_names;
+ for (const auto& config : configs)
+ if (unique_payload_names.insert(config.payload_name).second) {
+ VideoReceiveStreamInterface::Decoder decoder =
+ test::CreateMatchingDecoder(
+ PayloadNameToPayloadType(config.payload_name),
+ config.payload_name);
+
+ video_receive_configs_[0].decoders.push_back(decoder);
+ }
+}
+
+void MultiCodecReceiveTest::ConfigureEncoder(
+ const CodecConfig& config,
+ VideoEncoderFactory* encoder_factory) {
+ GetVideoSendConfig()->encoder_settings.encoder_factory = encoder_factory;
+ GetVideoSendConfig()->rtp.payload_name = config.payload_name;
+ GetVideoSendConfig()->rtp.payload_type =
+ PayloadNameToPayloadType(config.payload_name);
+ GetVideoEncoderConfig()->codec_type =
+ PayloadStringToCodecType(config.payload_name);
+ EXPECT_EQ(1u, GetVideoEncoderConfig()->simulcast_layers.size());
+ GetVideoEncoderConfig()->simulcast_layers[0].num_temporal_layers =
+ config.num_temporal_layers;
+ GetVideoEncoderConfig()->video_format.name = config.payload_name;
+}
+
+void MultiCodecReceiveTest::RunTestWithCodecs(
+ const std::vector<CodecConfig>& configs) {
+ EXPECT_TRUE(!configs.empty());
+
+ test::FunctionVideoEncoderFactory encoder_factory(
+ [](const SdpVideoFormat& format) -> std::unique_ptr<VideoEncoder> {
+ if (format.name == "VP8") {
+ return VP8Encoder::Create();
+ }
+ if (format.name == "VP9") {
+ return VP9Encoder::Create();
+ }
+ if (format.name == "H264") {
+ return H264Encoder::Create(cricket::VideoCodec("H264"));
+ }
+ RTC_DCHECK_NOTREACHED() << format.name;
+ return nullptr;
+ });
+ test::FunctionVideoDecoderFactory decoder_factory(
+ [](const SdpVideoFormat& format) -> std::unique_ptr<VideoDecoder> {
+ if (format.name == "VP8") {
+ return VP8Decoder::Create();
+ }
+ if (format.name == "VP9") {
+ return VP9Decoder::Create();
+ }
+ if (format.name == "H264") {
+ return H264Decoder::Create();
+ }
+ RTC_DCHECK_NOTREACHED() << format.name;
+ return nullptr;
+ });
+ // Create and start call.
+ SendTask(task_queue(),
+ [this, &configs, &encoder_factory, &decoder_factory]() {
+ CreateSendConfig(1, 0, 0);
+ ConfigureEncoder(configs[0], &encoder_factory);
+ CreateMatchingReceiveConfigs();
+ video_receive_configs_[0].renderer = &observer_;
+ // Disable to avoid post-decode frame dropping in
+ // VideoRenderFrames.
+ video_receive_configs_[0].enable_prerenderer_smoothing = false;
+ ConfigureDecoders(configs, &decoder_factory);
+ CreateVideoStreams();
+ CreateFrameGeneratorCapturer(kFps, kWidth, kHeight);
+ Start();
+ });
+ EXPECT_TRUE(observer_.Wait()) << "Timed out waiting for frames.";
+
+ for (size_t i = 1; i < configs.size(); ++i) {
+ // Recreate VideoSendStream with new config (codec, temporal layers).
+ SendTask(task_queue(), [this, i, &configs, &encoder_factory]() {
+ DestroyVideoSendStreams();
+ observer_.Reset(PayloadNameToPayloadType(configs[i].payload_name));
+
+ ConfigureEncoder(configs[i], &encoder_factory);
+ CreateVideoSendStreams();
+ GetVideoSendStream()->Start();
+ CreateFrameGeneratorCapturer(kFps, kWidth / 2, kHeight / 2);
+ ConnectVideoSourcesToStreams();
+ });
+ EXPECT_TRUE(observer_.Wait()) << "Timed out waiting for frames.";
+ }
+
+ SendTask(task_queue(), [this]() {
+ Stop();
+ DestroyStreams();
+ });
+}
+
+TEST_F(MultiCodecReceiveTest, SingleStreamReceivesVp8Vp9) {
+ RunTestWithCodecs({{"VP8", 1}, {"VP9", 1}, {"VP8", 1}});
+}
+
+TEST_F(MultiCodecReceiveTest, SingleStreamReceivesVp8Vp9WithTl) {
+ RunTestWithCodecs({{"VP8", 2}, {"VP9", 2}, {"VP8", 2}});
+}
+
+#if defined(WEBRTC_USE_H264)
+TEST_F(MultiCodecReceiveTest, SingleStreamReceivesVp8H264) {
+ RunTestWithCodecs({{"VP8", 1}, {"H264", 1}, {"VP8", 1}});
+}
+
+TEST_F(MultiCodecReceiveTest, SingleStreamReceivesVp8H264WithTl) {
+ RunTestWithCodecs({{"VP8", 3}, {"H264", 1}, {"VP8", 3}});
+}
+
+TEST_F(MultiCodecReceiveTest, SingleStreamReceivesVp8Vp9H264) {
+ RunTestWithCodecs({{"VP8", 1}, {"VP9", 1}, {"H264", 1}, {"VP9", 1}});
+}
+
+TEST_F(MultiCodecReceiveTest, SingleStreamReceivesVp8Vp9H264WithTl) {
+ RunTestWithCodecs({{"VP8", 3}, {"VP9", 2}, {"H264", 1}, {"VP9", 3}});
+}
+#endif // defined(WEBRTC_USE_H264)
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/video/end_to_end_tests/multi_stream_tester.cc b/third_party/libwebrtc/video/end_to_end_tests/multi_stream_tester.cc
new file mode 100644
index 0000000000..82e9eb9417
--- /dev/null
+++ b/third_party/libwebrtc/video/end_to_end_tests/multi_stream_tester.cc
@@ -0,0 +1,180 @@
+/*
+ * Copyright 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video/end_to_end_tests/multi_stream_tester.h"
+
+#include <memory>
+#include <utility>
+#include <vector>
+
+#include "absl/memory/memory.h"
+#include "api/rtc_event_log/rtc_event_log.h"
+#include "api/task_queue/default_task_queue_factory.h"
+#include "api/task_queue/task_queue_base.h"
+#include "api/test/create_frame_generator.h"
+#include "api/test/simulated_network.h"
+#include "api/test/video/function_video_encoder_factory.h"
+#include "api/video/builtin_video_bitrate_allocator_factory.h"
+#include "call/fake_network_pipe.h"
+#include "call/simulated_network.h"
+#include "media/engine/internal_decoder_factory.h"
+#include "modules/video_coding/codecs/vp8/include/vp8.h"
+#include "rtc_base/task_queue_for_test.h"
+#include "test/call_test.h"
+#include "test/encoder_settings.h"
+
+namespace webrtc {
+
+MultiStreamTester::MultiStreamTester() {
+ // TODO(sprang): Cleanup when msvc supports explicit initializers for array.
+ codec_settings[0] = {1, 640, 480};
+ codec_settings[1] = {2, 320, 240};
+ codec_settings[2] = {3, 240, 160};
+}
+
+MultiStreamTester::~MultiStreamTester() = default;
+
+void MultiStreamTester::RunTest() {
+ webrtc::RtcEventLogNull event_log;
+ auto task_queue_factory = CreateDefaultTaskQueueFactory();
+ // Use high prioirity since this task_queue used for fake network delivering
+ // at correct time. Those test tasks should be prefered over code under test
+ // to make test more stable.
+ auto task_queue = task_queue_factory->CreateTaskQueue(
+ "TaskQueue", TaskQueueFactory::Priority::HIGH);
+ Call::Config config(&event_log);
+ test::ScopedKeyValueConfig field_trials;
+ config.trials = &field_trials;
+ config.task_queue_factory = task_queue_factory.get();
+ std::unique_ptr<Call> sender_call;
+ std::unique_ptr<Call> receiver_call;
+ std::unique_ptr<test::DirectTransport> sender_transport;
+ std::unique_ptr<test::DirectTransport> receiver_transport;
+
+ VideoSendStream* send_streams[kNumStreams];
+ VideoReceiveStreamInterface* receive_streams[kNumStreams];
+ test::FrameGeneratorCapturer* frame_generators[kNumStreams];
+ test::FunctionVideoEncoderFactory encoder_factory(
+ []() { return VP8Encoder::Create(); });
+ std::unique_ptr<VideoBitrateAllocatorFactory> bitrate_allocator_factory =
+ CreateBuiltinVideoBitrateAllocatorFactory();
+ InternalDecoderFactory decoder_factory;
+
+ SendTask(task_queue.get(), [&]() {
+ sender_call = absl::WrapUnique(Call::Create(config));
+ receiver_call = absl::WrapUnique(Call::Create(config));
+ sender_transport = CreateSendTransport(task_queue.get(), sender_call.get());
+ receiver_transport =
+ CreateReceiveTransport(task_queue.get(), receiver_call.get());
+ sender_transport->SetReceiver(receiver_call->Receiver());
+ receiver_transport->SetReceiver(sender_call->Receiver());
+
+ for (size_t i = 0; i < kNumStreams; ++i) {
+ uint32_t ssrc = codec_settings[i].ssrc;
+ int width = codec_settings[i].width;
+ int height = codec_settings[i].height;
+
+ VideoSendStream::Config send_config(sender_transport.get());
+ send_config.rtp.ssrcs.push_back(ssrc);
+ send_config.encoder_settings.encoder_factory = &encoder_factory;
+ send_config.encoder_settings.bitrate_allocator_factory =
+ bitrate_allocator_factory.get();
+ send_config.rtp.payload_name = "VP8";
+ send_config.rtp.payload_type = kVideoPayloadType;
+ VideoEncoderConfig encoder_config;
+ test::FillEncoderConfiguration(kVideoCodecVP8, 1, &encoder_config);
+ encoder_config.max_bitrate_bps = 100000;
+
+ UpdateSendConfig(i, &send_config, &encoder_config, &frame_generators[i]);
+
+ send_streams[i] = sender_call->CreateVideoSendStream(
+ send_config.Copy(), encoder_config.Copy());
+ send_streams[i]->Start();
+
+ VideoReceiveStreamInterface::Config receive_config(
+ receiver_transport.get());
+ receive_config.rtp.remote_ssrc = ssrc;
+ receive_config.rtp.local_ssrc = test::CallTest::kReceiverLocalVideoSsrc;
+ receive_config.decoder_factory = &decoder_factory;
+ VideoReceiveStreamInterface::Decoder decoder =
+ test::CreateMatchingDecoder(send_config);
+ receive_config.decoders.push_back(decoder);
+
+ UpdateReceiveConfig(i, &receive_config);
+
+ receive_streams[i] =
+ receiver_call->CreateVideoReceiveStream(std::move(receive_config));
+ receive_streams[i]->Start();
+
+ auto* frame_generator = new test::FrameGeneratorCapturer(
+ Clock::GetRealTimeClock(),
+ test::CreateSquareFrameGenerator(width, height, absl::nullopt,
+ absl::nullopt),
+ 30, *task_queue_factory);
+ frame_generators[i] = frame_generator;
+ send_streams[i]->SetSource(frame_generator,
+ DegradationPreference::MAINTAIN_FRAMERATE);
+ frame_generator->Init();
+ frame_generator->Start();
+ }
+ });
+
+ Wait();
+
+ SendTask(task_queue.get(), [&]() {
+ for (size_t i = 0; i < kNumStreams; ++i) {
+ frame_generators[i]->Stop();
+ sender_call->DestroyVideoSendStream(send_streams[i]);
+ receiver_call->DestroyVideoReceiveStream(receive_streams[i]);
+ delete frame_generators[i];
+ }
+
+ sender_transport.reset();
+ receiver_transport.reset();
+
+ sender_call.reset();
+ receiver_call.reset();
+ });
+}
+
+void MultiStreamTester::UpdateSendConfig(
+ size_t stream_index,
+ VideoSendStream::Config* send_config,
+ VideoEncoderConfig* encoder_config,
+ test::FrameGeneratorCapturer** frame_generator) {}
+
+void MultiStreamTester::UpdateReceiveConfig(
+ size_t stream_index,
+ VideoReceiveStreamInterface::Config* receive_config) {}
+
+std::unique_ptr<test::DirectTransport> MultiStreamTester::CreateSendTransport(
+ TaskQueueBase* task_queue,
+ Call* sender_call) {
+ std::vector<RtpExtension> extensions = {};
+ return std::make_unique<test::DirectTransport>(
+ task_queue,
+ std::make_unique<FakeNetworkPipe>(
+ Clock::GetRealTimeClock(),
+ std::make_unique<SimulatedNetwork>(BuiltInNetworkBehaviorConfig())),
+ sender_call, payload_type_map_, extensions, extensions);
+}
+
+std::unique_ptr<test::DirectTransport>
+MultiStreamTester::CreateReceiveTransport(TaskQueueBase* task_queue,
+ Call* receiver_call) {
+ std::vector<RtpExtension> extensions = {};
+ return std::make_unique<test::DirectTransport>(
+ task_queue,
+ std::make_unique<FakeNetworkPipe>(
+ Clock::GetRealTimeClock(),
+ std::make_unique<SimulatedNetwork>(BuiltInNetworkBehaviorConfig())),
+ receiver_call, payload_type_map_, extensions, extensions);
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/video/end_to_end_tests/multi_stream_tester.h b/third_party/libwebrtc/video/end_to_end_tests/multi_stream_tester.h
new file mode 100644
index 0000000000..87200930f4
--- /dev/null
+++ b/third_party/libwebrtc/video/end_to_end_tests/multi_stream_tester.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VIDEO_END_TO_END_TESTS_MULTI_STREAM_TESTER_H_
+#define VIDEO_END_TO_END_TESTS_MULTI_STREAM_TESTER_H_
+
+#include <map>
+#include <memory>
+
+#include "api/task_queue/task_queue_base.h"
+#include "call/call.h"
+#include "test/direct_transport.h"
+#include "test/frame_generator_capturer.h"
+
+namespace webrtc {
+// Test sets up a Call multiple senders with different resolutions and SSRCs.
+// Another is set up to receive all three of these with different renderers.
+class MultiStreamTester {
+ public:
+ static constexpr size_t kNumStreams = 3;
+ const uint8_t kVideoPayloadType = 124;
+ const std::map<uint8_t, MediaType> payload_type_map_ = {
+ {kVideoPayloadType, MediaType::VIDEO}};
+
+ struct CodecSettings {
+ uint32_t ssrc;
+ int width;
+ int height;
+ } codec_settings[kNumStreams];
+
+ MultiStreamTester();
+
+ virtual ~MultiStreamTester();
+
+ void RunTest();
+
+ protected:
+ virtual void Wait() = 0;
+ // Note: frame_generator is a point-to-pointer, since the actual instance
+ // hasn't been created at the time of this call. Only when packets/frames
+ // start flowing should this be dereferenced.
+ virtual void UpdateSendConfig(size_t stream_index,
+ VideoSendStream::Config* send_config,
+ VideoEncoderConfig* encoder_config,
+ test::FrameGeneratorCapturer** frame_generator);
+ virtual void UpdateReceiveConfig(
+ size_t stream_index,
+ VideoReceiveStreamInterface::Config* receive_config);
+ virtual std::unique_ptr<test::DirectTransport> CreateSendTransport(
+ TaskQueueBase* task_queue,
+ Call* sender_call);
+ virtual std::unique_ptr<test::DirectTransport> CreateReceiveTransport(
+ TaskQueueBase* task_queue,
+ Call* receiver_call);
+};
+} // namespace webrtc
+#endif // VIDEO_END_TO_END_TESTS_MULTI_STREAM_TESTER_H_
diff --git a/third_party/libwebrtc/video/end_to_end_tests/multi_stream_tests.cc b/third_party/libwebrtc/video/end_to_end_tests/multi_stream_tests.cc
new file mode 100644
index 0000000000..b997538d96
--- /dev/null
+++ b/third_party/libwebrtc/video/end_to_end_tests/multi_stream_tests.cc
@@ -0,0 +1,92 @@
+/*
+ * Copyright 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <memory>
+#include <vector>
+
+#include "api/video/video_frame.h"
+#include "api/video/video_sink_interface.h"
+#include "call/rtp_config.h"
+#include "call/video_receive_stream.h"
+#include "call/video_send_stream.h"
+#include "rtc_base/event.h"
+#include "test/frame_generator_capturer.h"
+#include "test/gtest.h"
+#include "video/config/video_encoder_config.h"
+#include "video/end_to_end_tests/multi_stream_tester.h"
+
+namespace webrtc {
+// Each renderer verifies that it receives the expected resolution, and as soon
+// as every renderer has received a frame, the test finishes.
+TEST(MultiStreamEndToEndTest, SendsAndReceivesMultipleStreams) {
+ class VideoOutputObserver : public rtc::VideoSinkInterface<VideoFrame> {
+ public:
+ VideoOutputObserver(const MultiStreamTester::CodecSettings& settings,
+ uint32_t ssrc,
+ test::FrameGeneratorCapturer** frame_generator)
+ : settings_(settings), ssrc_(ssrc), frame_generator_(frame_generator) {}
+
+ void OnFrame(const VideoFrame& video_frame) override {
+ EXPECT_EQ(settings_.width, video_frame.width());
+ EXPECT_EQ(settings_.height, video_frame.height());
+ (*frame_generator_)->Stop();
+ done_.Set();
+ }
+
+ uint32_t Ssrc() { return ssrc_; }
+
+ bool Wait() { return done_.Wait(TimeDelta::Seconds(30)); }
+
+ private:
+ const MultiStreamTester::CodecSettings& settings_;
+ const uint32_t ssrc_;
+ test::FrameGeneratorCapturer** const frame_generator_;
+ rtc::Event done_;
+ };
+
+ class Tester : public MultiStreamTester {
+ public:
+ Tester() = default;
+ ~Tester() override = default;
+
+ protected:
+ void Wait() override {
+ for (const auto& observer : observers_) {
+ EXPECT_TRUE(observer->Wait())
+ << "Time out waiting for from on ssrc " << observer->Ssrc();
+ }
+ }
+
+ void UpdateSendConfig(
+ size_t stream_index,
+ VideoSendStream::Config* send_config,
+ VideoEncoderConfig* encoder_config,
+ test::FrameGeneratorCapturer** frame_generator) override {
+ observers_[stream_index] = std::make_unique<VideoOutputObserver>(
+ codec_settings[stream_index], send_config->rtp.ssrcs.front(),
+ frame_generator);
+ }
+
+ void UpdateReceiveConfig(
+ size_t stream_index,
+ VideoReceiveStreamInterface::Config* receive_config) override {
+ receive_config->renderer = observers_[stream_index].get();
+ }
+
+ private:
+ std::unique_ptr<VideoOutputObserver> observers_[kNumStreams];
+ } tester;
+
+ tester.RunTest();
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/video/end_to_end_tests/network_state_tests.cc b/third_party/libwebrtc/video/end_to_end_tests/network_state_tests.cc
new file mode 100644
index 0000000000..a39f9fe9e3
--- /dev/null
+++ b/third_party/libwebrtc/video/end_to_end_tests/network_state_tests.cc
@@ -0,0 +1,428 @@
+/*
+ * Copyright 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <memory>
+
+#include "api/media_types.h"
+#include "api/task_queue/default_task_queue_factory.h"
+#include "api/task_queue/task_queue_base.h"
+#include "api/task_queue/task_queue_factory.h"
+#include "api/test/simulated_network.h"
+#include "api/video_codecs/video_encoder.h"
+#include "call/fake_network_pipe.h"
+#include "call/simulated_network.h"
+#include "modules/rtp_rtcp/source/rtp_packet.h"
+#include "rtc_base/synchronization/mutex.h"
+#include "rtc_base/task_queue_for_test.h"
+#include "system_wrappers/include/sleep.h"
+#include "test/call_test.h"
+#include "test/fake_encoder.h"
+#include "test/gtest.h"
+#include "test/video_encoder_proxy_factory.h"
+
+namespace webrtc {
+namespace {
+constexpr int kSilenceTimeoutMs = 2000;
+}
+
+class NetworkStateEndToEndTest : public test::CallTest {
+ protected:
+ class UnusedTransport : public Transport {
+ private:
+ bool SendRtp(const uint8_t* packet,
+ size_t length,
+ const PacketOptions& options) override {
+ ADD_FAILURE() << "Unexpected RTP sent.";
+ return false;
+ }
+
+ bool SendRtcp(const uint8_t* packet, size_t length) override {
+ ADD_FAILURE() << "Unexpected RTCP sent.";
+ return false;
+ }
+ };
+ class RequiredTransport : public Transport {
+ public:
+ RequiredTransport(bool rtp_required, bool rtcp_required)
+ : need_rtp_(rtp_required), need_rtcp_(rtcp_required) {}
+ ~RequiredTransport() {
+ if (need_rtp_) {
+ ADD_FAILURE() << "Expected RTP packet not sent.";
+ }
+ if (need_rtcp_) {
+ ADD_FAILURE() << "Expected RTCP packet not sent.";
+ }
+ }
+
+ private:
+ bool SendRtp(const uint8_t* packet,
+ size_t length,
+ const PacketOptions& options) override {
+ MutexLock lock(&mutex_);
+ need_rtp_ = false;
+ return true;
+ }
+
+ bool SendRtcp(const uint8_t* packet, size_t length) override {
+ MutexLock lock(&mutex_);
+ need_rtcp_ = false;
+ return true;
+ }
+ bool need_rtp_;
+ bool need_rtcp_;
+ Mutex mutex_;
+ };
+ void VerifyNewVideoSendStreamsRespectNetworkState(
+ MediaType network_to_bring_up,
+ VideoEncoder* encoder,
+ Transport* transport);
+ void VerifyNewVideoReceiveStreamsRespectNetworkState(
+ MediaType network_to_bring_up,
+ Transport* transport);
+};
+
+void NetworkStateEndToEndTest::VerifyNewVideoSendStreamsRespectNetworkState(
+ MediaType network_to_bring_up,
+ VideoEncoder* encoder,
+ Transport* transport) {
+ test::VideoEncoderProxyFactory encoder_factory(encoder);
+
+ SendTask(task_queue(),
+ [this, network_to_bring_up, &encoder_factory, transport]() {
+ CreateSenderCall(Call::Config(send_event_log_.get()));
+ sender_call_->SignalChannelNetworkState(network_to_bring_up,
+ kNetworkUp);
+
+ CreateSendConfig(1, 0, 0, transport);
+ GetVideoSendConfig()->encoder_settings.encoder_factory =
+ &encoder_factory;
+ CreateVideoStreams();
+ CreateFrameGeneratorCapturer(kDefaultFramerate, kDefaultWidth,
+ kDefaultHeight);
+
+ Start();
+ });
+
+ SleepMs(kSilenceTimeoutMs);
+
+ SendTask(task_queue(), [this]() {
+ Stop();
+ DestroyStreams();
+ DestroyCalls();
+ });
+}
+
+void NetworkStateEndToEndTest::VerifyNewVideoReceiveStreamsRespectNetworkState(
+ MediaType network_to_bring_up,
+ Transport* transport) {
+ SendTask(task_queue(), [this, network_to_bring_up, transport]() {
+ CreateCalls();
+ receiver_call_->SignalChannelNetworkState(network_to_bring_up, kNetworkUp);
+ CreateSendTransport(BuiltInNetworkBehaviorConfig(),
+ /*observer=*/nullptr);
+
+ CreateSendConfig(1, 0, 0);
+ CreateMatchingReceiveConfigs(transport);
+ CreateVideoStreams();
+ CreateFrameGeneratorCapturer(kDefaultFramerate, kDefaultWidth,
+ kDefaultHeight);
+ Start();
+ });
+
+ SleepMs(kSilenceTimeoutMs);
+
+ SendTask(task_queue(), [this]() {
+ Stop();
+ DestroyStreams();
+ DestroyCalls();
+ });
+}
+
+TEST_F(NetworkStateEndToEndTest, RespectsNetworkState) {
+ // TODO(pbos): Remove accepted downtime packets etc. when signaling network
+ // down blocks until no more packets will be sent.
+
+ // Pacer will send from its packet list and then send required padding before
+ // checking paused_ again. This should be enough for one round of pacing,
+ // otherwise increase.
+ static const int kNumAcceptedDowntimeRtp = 5;
+ // A single RTCP may be in the pipeline.
+ static const int kNumAcceptedDowntimeRtcp = 1;
+ class NetworkStateTest : public test::EndToEndTest, public test::FakeEncoder {
+ public:
+ explicit NetworkStateTest(TaskQueueBase* task_queue)
+ : EndToEndTest(kDefaultTimeout),
+ FakeEncoder(Clock::GetRealTimeClock()),
+ e2e_test_task_queue_(task_queue),
+ task_queue_(CreateDefaultTaskQueueFactory()->CreateTaskQueue(
+ "NetworkStateTest",
+ TaskQueueFactory::Priority::NORMAL)),
+ sender_call_(nullptr),
+ receiver_call_(nullptr),
+ encoder_factory_(this),
+ sender_state_(kNetworkUp),
+ sender_rtp_(0),
+ sender_padding_(0),
+ sender_rtcp_(0),
+ receiver_rtcp_(0),
+ down_frames_(0) {}
+
+ Action OnSendRtp(const uint8_t* packet, size_t length) override {
+ MutexLock lock(&test_mutex_);
+ RtpPacket rtp_packet;
+ EXPECT_TRUE(rtp_packet.Parse(packet, length));
+ if (rtp_packet.payload_size() == 0)
+ ++sender_padding_;
+ ++sender_rtp_;
+ packet_event_.Set();
+ return SEND_PACKET;
+ }
+
+ Action OnSendRtcp(const uint8_t* packet, size_t length) override {
+ MutexLock lock(&test_mutex_);
+ ++sender_rtcp_;
+ packet_event_.Set();
+ return SEND_PACKET;
+ }
+
+ Action OnReceiveRtp(const uint8_t* packet, size_t length) override {
+ ADD_FAILURE() << "Unexpected receiver RTP, should not be sending.";
+ return SEND_PACKET;
+ }
+
+ Action OnReceiveRtcp(const uint8_t* packet, size_t length) override {
+ MutexLock lock(&test_mutex_);
+ ++receiver_rtcp_;
+ packet_event_.Set();
+ return SEND_PACKET;
+ }
+
+ void OnCallsCreated(Call* sender_call, Call* receiver_call) override {
+ sender_call_ = sender_call;
+ receiver_call_ = receiver_call;
+ }
+
+ void ModifyVideoConfigs(
+ VideoSendStream::Config* send_config,
+ std::vector<VideoReceiveStreamInterface::Config>* receive_configs,
+ VideoEncoderConfig* encoder_config) override {
+ send_config->encoder_settings.encoder_factory = &encoder_factory_;
+ }
+
+ void SignalChannelNetworkState(Call* call,
+ MediaType media_type,
+ NetworkState network_state) {
+ SendTask(e2e_test_task_queue_, [call, media_type, network_state] {
+ call->SignalChannelNetworkState(media_type, network_state);
+ });
+ }
+
+ void PerformTest() override {
+ EXPECT_TRUE(encoded_frames_.Wait(kDefaultTimeout))
+ << "No frames received by the encoder.";
+
+ SendTask(task_queue_.get(), [this]() {
+ // Wait for packets from both sender/receiver.
+ WaitForPacketsOrSilence(false, false);
+
+ // Sender-side network down for audio; there should be no effect on
+ // video
+ SignalChannelNetworkState(sender_call_, MediaType::AUDIO, kNetworkDown);
+
+ WaitForPacketsOrSilence(false, false);
+
+ // Receiver-side network down for audio; no change expected
+ SignalChannelNetworkState(receiver_call_, MediaType::AUDIO,
+ kNetworkDown);
+ WaitForPacketsOrSilence(false, false);
+
+ // Sender-side network down.
+ SignalChannelNetworkState(sender_call_, MediaType::VIDEO, kNetworkDown);
+ {
+ MutexLock lock(&test_mutex_);
+ // After network goes down we shouldn't be encoding more frames.
+ sender_state_ = kNetworkDown;
+ }
+ // Wait for receiver-packets and no sender packets.
+ WaitForPacketsOrSilence(true, false);
+
+ // Receiver-side network down.
+ SignalChannelNetworkState(receiver_call_, MediaType::VIDEO,
+ kNetworkDown);
+ WaitForPacketsOrSilence(true, true);
+
+ // Network up for audio for both sides; video is still not expected to
+ // start
+ SignalChannelNetworkState(sender_call_, MediaType::AUDIO, kNetworkUp);
+ SignalChannelNetworkState(receiver_call_, MediaType::AUDIO, kNetworkUp);
+ WaitForPacketsOrSilence(true, true);
+
+ // Network back up again for both.
+ {
+ MutexLock lock(&test_mutex_);
+ // It's OK to encode frames again, as we're about to bring up the
+ // network.
+ sender_state_ = kNetworkUp;
+ }
+ SignalChannelNetworkState(sender_call_, MediaType::VIDEO, kNetworkUp);
+ SignalChannelNetworkState(receiver_call_, MediaType::VIDEO, kNetworkUp);
+ WaitForPacketsOrSilence(false, false);
+
+ // TODO(skvlad): add tests to verify that the audio streams are stopped
+ // when the network goes down for audio once the workaround in
+ // paced_sender.cc is removed.
+ });
+ }
+
+ int32_t Encode(const VideoFrame& input_image,
+ const std::vector<VideoFrameType>* frame_types) override {
+ {
+ MutexLock lock(&test_mutex_);
+ if (sender_state_ == kNetworkDown) {
+ ++down_frames_;
+ EXPECT_LE(down_frames_, 1)
+ << "Encoding more than one frame while network is down.";
+ if (down_frames_ > 1)
+ encoded_frames_.Set();
+ } else {
+ encoded_frames_.Set();
+ }
+ }
+ return test::FakeEncoder::Encode(input_image, frame_types);
+ }
+
+ private:
+ void WaitForPacketsOrSilence(bool sender_down, bool receiver_down) {
+ int64_t initial_time_ms = clock_->TimeInMilliseconds();
+ int initial_sender_rtp;
+ int initial_sender_rtcp;
+ int initial_receiver_rtcp;
+ {
+ MutexLock lock(&test_mutex_);
+ initial_sender_rtp = sender_rtp_;
+ initial_sender_rtcp = sender_rtcp_;
+ initial_receiver_rtcp = receiver_rtcp_;
+ }
+ bool sender_done = false;
+ bool receiver_done = false;
+ while (!sender_done || !receiver_done) {
+ packet_event_.Wait(TimeDelta::Millis(kSilenceTimeoutMs));
+ int64_t time_now_ms = clock_->TimeInMilliseconds();
+ MutexLock lock(&test_mutex_);
+ if (sender_down) {
+ ASSERT_LE(sender_rtp_ - initial_sender_rtp - sender_padding_,
+ kNumAcceptedDowntimeRtp)
+ << "RTP sent during sender-side downtime.";
+ ASSERT_LE(sender_rtcp_ - initial_sender_rtcp,
+ kNumAcceptedDowntimeRtcp)
+ << "RTCP sent during sender-side downtime.";
+ if (time_now_ms - initial_time_ms >=
+ static_cast<int64_t>(kSilenceTimeoutMs)) {
+ sender_done = true;
+ }
+ } else {
+ if (sender_rtp_ > initial_sender_rtp + kNumAcceptedDowntimeRtp)
+ sender_done = true;
+ }
+ if (receiver_down) {
+ ASSERT_LE(receiver_rtcp_ - initial_receiver_rtcp,
+ kNumAcceptedDowntimeRtcp)
+ << "RTCP sent during receiver-side downtime.";
+ if (time_now_ms - initial_time_ms >=
+ static_cast<int64_t>(kSilenceTimeoutMs)) {
+ receiver_done = true;
+ }
+ } else {
+ if (receiver_rtcp_ > initial_receiver_rtcp + kNumAcceptedDowntimeRtcp)
+ receiver_done = true;
+ }
+ }
+ }
+
+ TaskQueueBase* const e2e_test_task_queue_;
+ std::unique_ptr<TaskQueueBase, TaskQueueDeleter> task_queue_;
+ Mutex test_mutex_;
+ rtc::Event encoded_frames_;
+ rtc::Event packet_event_;
+ Call* sender_call_;
+ Call* receiver_call_;
+ test::VideoEncoderProxyFactory encoder_factory_;
+ NetworkState sender_state_ RTC_GUARDED_BY(test_mutex_);
+ int sender_rtp_ RTC_GUARDED_BY(test_mutex_);
+ int sender_padding_ RTC_GUARDED_BY(test_mutex_);
+ int sender_rtcp_ RTC_GUARDED_BY(test_mutex_);
+ int receiver_rtcp_ RTC_GUARDED_BY(test_mutex_);
+ int down_frames_ RTC_GUARDED_BY(test_mutex_);
+ } test(task_queue());
+
+ RunBaseTest(&test);
+}
+
+TEST_F(NetworkStateEndToEndTest, NewVideoSendStreamsRespectVideoNetworkDown) {
+ class UnusedEncoder : public test::FakeEncoder {
+ public:
+ UnusedEncoder() : FakeEncoder(Clock::GetRealTimeClock()) {}
+
+ int32_t InitEncode(const VideoCodec* config,
+ const Settings& settings) override {
+ EXPECT_GT(config->startBitrate, 0u);
+ return 0;
+ }
+ int32_t Encode(const VideoFrame& input_image,
+ const std::vector<VideoFrameType>* frame_types) override {
+ ADD_FAILURE() << "Unexpected frame encode.";
+ return test::FakeEncoder::Encode(input_image, frame_types);
+ }
+ };
+
+ UnusedEncoder unused_encoder;
+ UnusedTransport unused_transport;
+ VerifyNewVideoSendStreamsRespectNetworkState(
+ MediaType::AUDIO, &unused_encoder, &unused_transport);
+}
+
+TEST_F(NetworkStateEndToEndTest, NewVideoSendStreamsIgnoreAudioNetworkDown) {
+ class RequiredEncoder : public test::FakeEncoder {
+ public:
+ RequiredEncoder()
+ : FakeEncoder(Clock::GetRealTimeClock()), encoded_frame_(false) {}
+ ~RequiredEncoder() {
+ if (!encoded_frame_) {
+ ADD_FAILURE() << "Didn't encode an expected frame";
+ }
+ }
+ int32_t Encode(const VideoFrame& input_image,
+ const std::vector<VideoFrameType>* frame_types) override {
+ encoded_frame_ = true;
+ return test::FakeEncoder::Encode(input_image, frame_types);
+ }
+
+ private:
+ bool encoded_frame_;
+ };
+
+ RequiredTransport required_transport(true /*rtp*/, false /*rtcp*/);
+ RequiredEncoder required_encoder;
+ VerifyNewVideoSendStreamsRespectNetworkState(
+ MediaType::VIDEO, &required_encoder, &required_transport);
+}
+
+TEST_F(NetworkStateEndToEndTest,
+ NewVideoReceiveStreamsRespectVideoNetworkDown) {
+ UnusedTransport transport;
+ VerifyNewVideoReceiveStreamsRespectNetworkState(MediaType::AUDIO, &transport);
+}
+
+TEST_F(NetworkStateEndToEndTest, NewVideoReceiveStreamsIgnoreAudioNetworkDown) {
+ RequiredTransport transport(false /*rtp*/, true /*rtcp*/);
+ VerifyNewVideoReceiveStreamsRespectNetworkState(MediaType::VIDEO, &transport);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/video/end_to_end_tests/resolution_bitrate_limits_tests.cc b/third_party/libwebrtc/video/end_to_end_tests/resolution_bitrate_limits_tests.cc
new file mode 100644
index 0000000000..8455832885
--- /dev/null
+++ b/third_party/libwebrtc/video/end_to_end_tests/resolution_bitrate_limits_tests.cc
@@ -0,0 +1,481 @@
+/*
+ * Copyright 2021 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <algorithm>
+
+#include "media/engine/webrtc_video_engine.h"
+#include "modules/video_coding/svc/scalability_mode_util.h"
+#include "rtc_base/experiments/encoder_info_settings.h"
+#include "test/call_test.h"
+#include "test/fake_encoder.h"
+#include "test/field_trial.h"
+#include "test/gtest.h"
+#include "test/video_encoder_proxy_factory.h"
+#include "video/config/encoder_stream_factory.h"
+
+namespace webrtc {
+namespace test {
+namespace {
+void SetEncoderSpecific(VideoEncoderConfig* encoder_config,
+ VideoCodecType type,
+ size_t num_spatial_layers) {
+ if (type == kVideoCodecVP9) {
+ VideoCodecVP9 vp9 = VideoEncoder::GetDefaultVp9Settings();
+ vp9.numberOfSpatialLayers = num_spatial_layers;
+ encoder_config->encoder_specific_settings =
+ rtc::make_ref_counted<VideoEncoderConfig::Vp9EncoderSpecificSettings>(
+ vp9);
+ }
+}
+
+struct BitrateLimits {
+ DataRate min = DataRate::Zero();
+ DataRate max = DataRate::Zero();
+};
+
+BitrateLimits GetLayerBitrateLimits(int pixels, const VideoCodec& codec) {
+ if (codec.codecType == VideoCodecType::kVideoCodecVP9) {
+ for (size_t i = 0; i < codec.VP9().numberOfSpatialLayers; ++i) {
+ if (codec.spatialLayers[i].width * codec.spatialLayers[i].height ==
+ pixels) {
+ return {DataRate::KilobitsPerSec(codec.spatialLayers[i].minBitrate),
+ DataRate::KilobitsPerSec(codec.spatialLayers[i].maxBitrate)};
+ }
+ }
+ } else {
+ for (int i = 0; i < codec.numberOfSimulcastStreams; ++i) {
+ if (codec.simulcastStream[i].width * codec.simulcastStream[i].height ==
+ pixels) {
+ return {DataRate::KilobitsPerSec(codec.simulcastStream[i].minBitrate),
+ DataRate::KilobitsPerSec(codec.simulcastStream[i].maxBitrate)};
+ }
+ }
+ }
+ ADD_FAILURE();
+ return BitrateLimits();
+}
+
+} // namespace
+
+class ResolutionBitrateLimitsWithScalabilityModeTest : public test::CallTest {};
+
+class ResolutionBitrateLimitsTest
+ : public test::CallTest,
+ public ::testing::WithParamInterface<std::string> {
+ public:
+ ResolutionBitrateLimitsTest() : payload_name_(GetParam()) {}
+
+ const std::string payload_name_;
+};
+
+INSTANTIATE_TEST_SUITE_P(PayloadName,
+ ResolutionBitrateLimitsTest,
+ ::testing::Values("VP8", "VP9"),
+ [](const ::testing::TestParamInfo<std::string>& info) {
+ return info.param;
+ });
+
+class InitEncodeTest : public test::EndToEndTest,
+ public test::FrameGeneratorCapturer::SinkWantsObserver,
+ public test::FakeEncoder {
+ public:
+ struct Bitrate {
+ const absl::optional<DataRate> min;
+ const absl::optional<DataRate> max;
+ };
+ struct TestConfig {
+ const bool active;
+ const Bitrate bitrate;
+ const absl::optional<ScalabilityMode> scalability_mode;
+ };
+ struct Expectation {
+ const uint32_t pixels = 0;
+ const Bitrate eq_bitrate;
+ const Bitrate ne_bitrate;
+ };
+
+ InitEncodeTest(const std::string& payload_name,
+ const std::vector<TestConfig>& configs,
+ const std::vector<Expectation>& expectations)
+ : EndToEndTest(test::CallTest::kDefaultTimeout),
+ FakeEncoder(Clock::GetRealTimeClock()),
+ encoder_factory_(this),
+ payload_name_(payload_name),
+ configs_(configs),
+ expectations_(expectations) {}
+
+ void OnFrameGeneratorCapturerCreated(
+ test::FrameGeneratorCapturer* frame_generator_capturer) override {
+ frame_generator_capturer->SetSinkWantsObserver(this);
+ // Set initial resolution.
+ frame_generator_capturer->ChangeResolution(1280, 720);
+ }
+
+ void OnSinkWantsChanged(rtc::VideoSinkInterface<VideoFrame>* sink,
+ const rtc::VideoSinkWants& wants) override {}
+
+ size_t GetNumVideoStreams() const override {
+ return (payload_name_ == "VP9") ? 1 : configs_.size();
+ }
+
+ void ModifyVideoConfigs(
+ VideoSendStream::Config* send_config,
+ std::vector<VideoReceiveStreamInterface::Config>* receive_configs,
+ VideoEncoderConfig* encoder_config) override {
+ webrtc::VideoEncoder::EncoderInfo encoder_info;
+ send_config->encoder_settings.encoder_factory = &encoder_factory_;
+ send_config->rtp.payload_name = payload_name_;
+ send_config->rtp.payload_type = test::CallTest::kVideoSendPayloadType;
+ const VideoCodecType codec_type = PayloadStringToCodecType(payload_name_);
+ encoder_config->codec_type = codec_type;
+ encoder_config->video_stream_factory =
+ rtc::make_ref_counted<cricket::EncoderStreamFactory>(
+ payload_name_, /*max qp*/ 0, /*screencast*/ false,
+ /*screenshare enabled*/ false, encoder_info);
+ encoder_config->max_bitrate_bps = -1;
+ if (configs_.size() == 1 && configs_[0].bitrate.max)
+ encoder_config->max_bitrate_bps = configs_[0].bitrate.max->bps();
+ if (payload_name_ == "VP9") {
+ // Simulcast layers indicates which spatial layers are active.
+ encoder_config->simulcast_layers.resize(configs_.size());
+ }
+ double scale_factor = 1.0;
+ for (int i = configs_.size() - 1; i >= 0; --i) {
+ VideoStream& stream = encoder_config->simulcast_layers[i];
+ stream.active = configs_[i].active;
+ stream.scalability_mode = configs_[i].scalability_mode;
+ if (configs_[i].bitrate.min)
+ stream.min_bitrate_bps = configs_[i].bitrate.min->bps();
+ if (configs_[i].bitrate.max)
+ stream.max_bitrate_bps = configs_[i].bitrate.max->bps();
+ stream.scale_resolution_down_by = scale_factor;
+ scale_factor *= (payload_name_ == "VP9") ? 1.0 : 2.0;
+ }
+ SetEncoderSpecific(encoder_config, codec_type, configs_.size());
+ }
+
+ int32_t InitEncode(const VideoCodec* codec,
+ const Settings& settings) override {
+ for (const auto& expected : expectations_) {
+ BitrateLimits limits = GetLayerBitrateLimits(expected.pixels, *codec);
+ if (expected.eq_bitrate.min)
+ EXPECT_EQ(*expected.eq_bitrate.min, limits.min);
+ if (expected.eq_bitrate.max)
+ EXPECT_EQ(*expected.eq_bitrate.max, limits.max);
+ EXPECT_NE(expected.ne_bitrate.min, limits.min);
+ EXPECT_NE(expected.ne_bitrate.max, limits.max);
+ }
+ observation_complete_.Set();
+ return 0;
+ }
+
+ VideoEncoder::EncoderInfo GetEncoderInfo() const override {
+ EncoderInfo info = FakeEncoder::GetEncoderInfo();
+ if (!encoder_info_override_.resolution_bitrate_limits().empty()) {
+ info.resolution_bitrate_limits =
+ encoder_info_override_.resolution_bitrate_limits();
+ }
+ return info;
+ }
+
+ void PerformTest() override {
+ ASSERT_TRUE(Wait()) << "Timed out while waiting for InitEncode() call.";
+ }
+
+ private:
+ test::VideoEncoderProxyFactory encoder_factory_;
+ const std::string payload_name_;
+ const std::vector<TestConfig> configs_;
+ const std::vector<Expectation> expectations_;
+ const LibvpxVp8EncoderInfoSettings encoder_info_override_;
+};
+
+TEST_P(ResolutionBitrateLimitsTest, LimitsApplied) {
+ webrtc::test::ScopedFieldTrials field_trials(
+ "WebRTC-GetEncoderInfoOverride/"
+ "frame_size_pixels:921600,"
+ "min_start_bitrate_bps:0,"
+ "min_bitrate_bps:32000,"
+ "max_bitrate_bps:3333000/");
+
+ InitEncodeTest test(payload_name_, {{.active = true}},
+ // Expectations:
+ {{.pixels = 1280 * 720,
+ .eq_bitrate = {DataRate::KilobitsPerSec(32),
+ DataRate::KilobitsPerSec(3333)}}});
+ RunBaseTest(&test);
+}
+
+TEST_F(ResolutionBitrateLimitsWithScalabilityModeTest,
+ OneStreamLimitsAppliedForOneSpatialLayer) {
+ webrtc::test::ScopedFieldTrials field_trials(
+ "WebRTC-GetEncoderInfoOverride/"
+ "frame_size_pixels:921600,"
+ "min_start_bitrate_bps:0,"
+ "min_bitrate_bps:32000,"
+ "max_bitrate_bps:3333000/");
+
+ InitEncodeTest test(
+ "VP9", {{.active = true, .scalability_mode = ScalabilityMode::kL1T1}},
+ // Expectations:
+ {{.pixels = 1280 * 720,
+ .eq_bitrate = {DataRate::KilobitsPerSec(32),
+ DataRate::KilobitsPerSec(3333)}}});
+ RunBaseTest(&test);
+}
+
+TEST_F(ResolutionBitrateLimitsWithScalabilityModeTest,
+ OneStreamLimitsNotAppliedForMultipleSpatialLayers) {
+ webrtc::test::ScopedFieldTrials field_trials(
+ "WebRTC-GetEncoderInfoOverride/"
+ "frame_size_pixels:230400|921600,"
+ "min_start_bitrate_bps:0|0,"
+ "min_bitrate_bps:21000|32000,"
+ "max_bitrate_bps:2222000|3333000/");
+
+ InitEncodeTest test(
+ "VP9", {{.active = true, .scalability_mode = ScalabilityMode::kL2T1}},
+ // Expectations:
+ {{.pixels = 640 * 360,
+ .ne_bitrate = {DataRate::KilobitsPerSec(31),
+ DataRate::KilobitsPerSec(2222)}},
+ {.pixels = 1280 * 720,
+ .ne_bitrate = {DataRate::KilobitsPerSec(32),
+ DataRate::KilobitsPerSec(3333)}}});
+ RunBaseTest(&test);
+}
+
+TEST_P(ResolutionBitrateLimitsTest, EncodingsApplied) {
+ InitEncodeTest test(payload_name_,
+ {{.active = true,
+ .bitrate = {DataRate::KilobitsPerSec(22),
+ DataRate::KilobitsPerSec(3555)}}},
+ // Expectations:
+ {{.pixels = 1280 * 720,
+ .eq_bitrate = {DataRate::KilobitsPerSec(22),
+ DataRate::KilobitsPerSec(3555)}}});
+ RunBaseTest(&test);
+}
+
+TEST_P(ResolutionBitrateLimitsTest, IntersectionApplied) {
+ webrtc::test::ScopedFieldTrials field_trials(
+ "WebRTC-GetEncoderInfoOverride/"
+ "frame_size_pixels:921600,"
+ "min_start_bitrate_bps:0,"
+ "min_bitrate_bps:32000,"
+ "max_bitrate_bps:3333000/");
+
+ InitEncodeTest test(payload_name_,
+ {{.active = true,
+ .bitrate = {DataRate::KilobitsPerSec(22),
+ DataRate::KilobitsPerSec(1555)}}},
+ // Expectations:
+ {{.pixels = 1280 * 720,
+ .eq_bitrate = {DataRate::KilobitsPerSec(32),
+ DataRate::KilobitsPerSec(1555)}}});
+ RunBaseTest(&test);
+}
+
+TEST_P(ResolutionBitrateLimitsTest, LimitsAppliedMiddleActive) {
+ webrtc::test::ScopedFieldTrials field_trials(
+ "WebRTC-GetEncoderInfoOverride/"
+ "frame_size_pixels:230400|921600,"
+ "min_start_bitrate_bps:0|0,"
+ "min_bitrate_bps:21000|32000,"
+ "max_bitrate_bps:2222000|3333000/");
+
+ InitEncodeTest test(payload_name_,
+ {{.active = false}, {.active = true}, {.active = false}},
+ // Expectations:
+ {{.pixels = 640 * 360,
+ .eq_bitrate = {DataRate::KilobitsPerSec(21),
+ DataRate::KilobitsPerSec(2222)}}});
+ RunBaseTest(&test);
+}
+
+TEST_P(ResolutionBitrateLimitsTest, IntersectionAppliedMiddleActive) {
+ webrtc::test::ScopedFieldTrials field_trials(
+ "WebRTC-GetEncoderInfoOverride/"
+ "frame_size_pixels:230400|921600,"
+ "min_start_bitrate_bps:0|0,"
+ "min_bitrate_bps:31000|32000,"
+ "max_bitrate_bps:2222000|3333000/");
+
+ InitEncodeTest test(payload_name_,
+ {{.active = false},
+ {.active = true,
+ .bitrate = {DataRate::KilobitsPerSec(30),
+ DataRate::KilobitsPerSec(1555)}},
+ {.active = false}},
+ // Expectations:
+ {{.pixels = 640 * 360,
+ .eq_bitrate = {DataRate::KilobitsPerSec(31),
+ DataRate::KilobitsPerSec(1555)}}});
+ RunBaseTest(&test);
+}
+
+TEST_P(ResolutionBitrateLimitsTest, DefaultLimitsAppliedMiddleActive) {
+ const absl::optional<VideoEncoder::ResolutionBitrateLimits>
+ kDefaultSinglecastLimits360p =
+ EncoderInfoSettings::GetDefaultSinglecastBitrateLimitsForResolution(
+ PayloadStringToCodecType(payload_name_), 640 * 360);
+
+ InitEncodeTest test(
+ payload_name_, {{.active = false}, {.active = true}, {.active = false}},
+ // Expectations:
+ {{.pixels = 640 * 360,
+ .eq_bitrate = {
+ DataRate::BitsPerSec(kDefaultSinglecastLimits360p->min_bitrate_bps),
+ DataRate::BitsPerSec(
+ kDefaultSinglecastLimits360p->max_bitrate_bps)}}});
+ RunBaseTest(&test);
+}
+
+TEST_F(ResolutionBitrateLimitsWithScalabilityModeTest,
+ DefaultLimitsAppliedForOneSpatialLayer) {
+ const absl::optional<VideoEncoder::ResolutionBitrateLimits>
+ kDefaultSinglecastLimits720p =
+ EncoderInfoSettings::GetDefaultSinglecastBitrateLimitsForResolution(
+ PayloadStringToCodecType("VP9"), 1280 * 720);
+
+ InitEncodeTest test(
+ "VP9",
+ {{.active = true, .scalability_mode = ScalabilityMode::kL1T3},
+ {.active = false}},
+ // Expectations:
+ {{.pixels = 1280 * 720,
+ .eq_bitrate = {
+ DataRate::BitsPerSec(kDefaultSinglecastLimits720p->min_bitrate_bps),
+ DataRate::BitsPerSec(
+ kDefaultSinglecastLimits720p->max_bitrate_bps)}}});
+ RunBaseTest(&test);
+}
+
+TEST_P(ResolutionBitrateLimitsTest, LimitsAppliedHighestActive) {
+ webrtc::test::ScopedFieldTrials field_trials(
+ "WebRTC-GetEncoderInfoOverride/"
+ "frame_size_pixels:230400|921600,"
+ "min_start_bitrate_bps:0|0,"
+ "min_bitrate_bps:31000|32000,"
+ "max_bitrate_bps:2222000|3333000/");
+
+ InitEncodeTest test(payload_name_,
+ {{.active = false}, {.active = false}, {.active = true}},
+ // Expectations:
+ {{.pixels = 1280 * 720,
+ .eq_bitrate = {DataRate::KilobitsPerSec(32),
+ DataRate::KilobitsPerSec(3333)}}});
+ RunBaseTest(&test);
+}
+
+TEST_P(ResolutionBitrateLimitsTest, IntersectionAppliedHighestActive) {
+ webrtc::test::ScopedFieldTrials field_trials(
+ "WebRTC-GetEncoderInfoOverride/"
+ "frame_size_pixels:230400|921600,"
+ "min_start_bitrate_bps:0|0,"
+ "min_bitrate_bps:31000|32000,"
+ "max_bitrate_bps:2222000|3333000/");
+
+ InitEncodeTest test(payload_name_,
+ {{.active = false},
+ {.active = false},
+ {.active = true,
+ .bitrate = {DataRate::KilobitsPerSec(30),
+ DataRate::KilobitsPerSec(1555)}}},
+ // Expectations:
+ {{.pixels = 1280 * 720,
+ .eq_bitrate = {DataRate::KilobitsPerSec(32),
+ DataRate::KilobitsPerSec(1555)}}});
+ RunBaseTest(&test);
+}
+
+TEST_P(ResolutionBitrateLimitsTest, LimitsNotAppliedLowestActive) {
+ webrtc::test::ScopedFieldTrials field_trials(
+ "WebRTC-GetEncoderInfoOverride/"
+ "frame_size_pixels:230400|921600,"
+ "min_start_bitrate_bps:0|0,"
+ "min_bitrate_bps:31000|32000,"
+ "max_bitrate_bps:2222000|3333000/");
+
+ InitEncodeTest test(payload_name_, {{.active = true}, {.active = false}},
+ // Expectations:
+ {{.pixels = 640 * 360,
+ .ne_bitrate = {DataRate::KilobitsPerSec(31),
+ DataRate::KilobitsPerSec(2222)}},
+ {.pixels = 1280 * 720,
+ .ne_bitrate = {DataRate::KilobitsPerSec(32),
+ DataRate::KilobitsPerSec(3333)}}});
+ RunBaseTest(&test);
+}
+
+TEST_F(ResolutionBitrateLimitsWithScalabilityModeTest,
+ LimitsAppliedForVp9OneSpatialLayer) {
+ webrtc::test::ScopedFieldTrials field_trials(
+ "WebRTC-GetEncoderInfoOverride/"
+ "frame_size_pixels:230400|921600,"
+ "min_start_bitrate_bps:0|0,"
+ "min_bitrate_bps:31000|32000,"
+ "max_bitrate_bps:2222000|3333000/");
+
+ InitEncodeTest test(
+ "VP9",
+ {{.active = true, .scalability_mode = ScalabilityMode::kL1T1},
+ {.active = false}},
+ // Expectations:
+ {{.pixels = 1280 * 720,
+ .eq_bitrate = {DataRate::KilobitsPerSec(32),
+ DataRate::KilobitsPerSec(3333)}}});
+ RunBaseTest(&test);
+}
+
+TEST_F(ResolutionBitrateLimitsWithScalabilityModeTest,
+ LimitsNotAppliedForVp9MultipleSpatialLayers) {
+ webrtc::test::ScopedFieldTrials field_trials(
+ "WebRTC-GetEncoderInfoOverride/"
+ "frame_size_pixels:230400|921600,"
+ "min_start_bitrate_bps:0|0,"
+ "min_bitrate_bps:31000|32000,"
+ "max_bitrate_bps:2222000|3333000/");
+
+ InitEncodeTest test(
+ "VP9",
+ {{.active = true, .scalability_mode = ScalabilityMode::kL2T1},
+ {.active = false}},
+ // Expectations:
+ {{.pixels = 640 * 360,
+ .ne_bitrate = {DataRate::KilobitsPerSec(31),
+ DataRate::KilobitsPerSec(2222)}},
+ {.pixels = 1280 * 720,
+ .ne_bitrate = {DataRate::KilobitsPerSec(32),
+ DataRate::KilobitsPerSec(3333)}}});
+ RunBaseTest(&test);
+}
+
+TEST_P(ResolutionBitrateLimitsTest, LimitsNotAppliedSimulcast) {
+ webrtc::test::ScopedFieldTrials field_trials(
+ "WebRTC-GetEncoderInfoOverride/"
+ "frame_size_pixels:230400|921600,"
+ "min_start_bitrate_bps:0|0,"
+ "min_bitrate_bps:31000|32000,"
+ "max_bitrate_bps:2222000|3333000/");
+
+ InitEncodeTest test(payload_name_, {{.active = true}, {.active = true}},
+ // Expectations:
+ {{.pixels = 640 * 360,
+ .ne_bitrate = {DataRate::KilobitsPerSec(31),
+ DataRate::KilobitsPerSec(2222)}},
+ {.pixels = 1280 * 720,
+ .ne_bitrate = {DataRate::KilobitsPerSec(32),
+ DataRate::KilobitsPerSec(3333)}}});
+ RunBaseTest(&test);
+}
+
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/video/end_to_end_tests/retransmission_tests.cc b/third_party/libwebrtc/video/end_to_end_tests/retransmission_tests.cc
new file mode 100644
index 0000000000..45a9dae1e8
--- /dev/null
+++ b/third_party/libwebrtc/video/end_to_end_tests/retransmission_tests.cc
@@ -0,0 +1,513 @@
+/*
+ * Copyright 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <memory>
+
+#include "absl/algorithm/container.h"
+#include "api/task_queue/task_queue_base.h"
+#include "api/test/simulated_network.h"
+#include "api/test/video/function_video_encoder_factory.h"
+#include "api/units/time_delta.h"
+#include "call/fake_network_pipe.h"
+#include "call/simulated_network.h"
+#include "modules/rtp_rtcp/source/rtp_packet.h"
+#include "modules/video_coding/codecs/vp8/include/vp8.h"
+#include "rtc_base/event.h"
+#include "rtc_base/synchronization/mutex.h"
+#include "rtc_base/task_queue_for_test.h"
+#include "test/call_test.h"
+#include "test/field_trial.h"
+#include "test/gtest.h"
+#include "test/rtcp_packet_parser.h"
+
+namespace webrtc {
+namespace {
+enum : int { // The first valid value is 1.
+ kVideoRotationExtensionId = 1,
+};
+} // namespace
+
+class RetransmissionEndToEndTest : public test::CallTest {
+ public:
+ RetransmissionEndToEndTest() {
+ RegisterRtpExtension(RtpExtension(RtpExtension::kVideoRotationUri,
+ kVideoRotationExtensionId));
+ }
+
+ protected:
+ void DecodesRetransmittedFrame(bool enable_rtx, bool enable_red);
+ void ReceivesPliAndRecovers(int rtp_history_ms);
+};
+
+TEST_F(RetransmissionEndToEndTest, ReceivesAndRetransmitsNack) {
+ static const int kNumberOfNacksToObserve = 2;
+ static const int kLossBurstSize = 2;
+ static const int kPacketsBetweenLossBursts = 9;
+ class NackObserver : public test::EndToEndTest {
+ public:
+ NackObserver()
+ : EndToEndTest(kLongTimeout),
+ sent_rtp_packets_(0),
+ packets_left_to_drop_(0),
+ nacks_left_(kNumberOfNacksToObserve) {}
+
+ private:
+ Action OnSendRtp(const uint8_t* packet, size_t length) override {
+ MutexLock lock(&mutex_);
+ RtpPacket rtp_packet;
+ EXPECT_TRUE(rtp_packet.Parse(packet, length));
+
+ // Never drop retransmitted packets.
+ if (dropped_packets_.find(rtp_packet.SequenceNumber()) !=
+ dropped_packets_.end()) {
+ retransmitted_packets_.insert(rtp_packet.SequenceNumber());
+ return SEND_PACKET;
+ }
+
+ if (nacks_left_ <= 0 &&
+ retransmitted_packets_.size() == dropped_packets_.size()) {
+ observation_complete_.Set();
+ }
+
+ ++sent_rtp_packets_;
+
+ // Enough NACKs received, stop dropping packets.
+ if (nacks_left_ <= 0)
+ return SEND_PACKET;
+
+ // Check if it's time for a new loss burst.
+ if (sent_rtp_packets_ % kPacketsBetweenLossBursts == 0)
+ packets_left_to_drop_ = kLossBurstSize;
+
+ // Never drop padding packets as those won't be retransmitted.
+ if (packets_left_to_drop_ > 0 && rtp_packet.padding_size() == 0) {
+ --packets_left_to_drop_;
+ dropped_packets_.insert(rtp_packet.SequenceNumber());
+ return DROP_PACKET;
+ }
+
+ return SEND_PACKET;
+ }
+
+ Action OnReceiveRtcp(const uint8_t* packet, size_t length) override {
+ MutexLock lock(&mutex_);
+ test::RtcpPacketParser parser;
+ EXPECT_TRUE(parser.Parse(packet, length));
+ nacks_left_ -= parser.nack()->num_packets();
+ return SEND_PACKET;
+ }
+
+ void ModifyVideoConfigs(
+ VideoSendStream::Config* send_config,
+ std::vector<VideoReceiveStreamInterface::Config>* receive_configs,
+ VideoEncoderConfig* encoder_config) override {
+ send_config->rtp.nack.rtp_history_ms = kNackRtpHistoryMs;
+ (*receive_configs)[0].rtp.nack.rtp_history_ms = kNackRtpHistoryMs;
+ }
+
+ void PerformTest() override {
+ EXPECT_TRUE(Wait())
+ << "Timed out waiting for packets to be NACKed, retransmitted and "
+ "rendered.";
+ }
+
+ Mutex mutex_;
+ std::set<uint16_t> dropped_packets_;
+ std::set<uint16_t> retransmitted_packets_;
+ uint64_t sent_rtp_packets_;
+ int packets_left_to_drop_;
+ int nacks_left_ RTC_GUARDED_BY(&mutex_);
+ } test;
+
+ RunBaseTest(&test);
+}
+
+TEST_F(RetransmissionEndToEndTest, ReceivesNackAndRetransmitsAudio) {
+ class NackObserver : public test::EndToEndTest {
+ public:
+ NackObserver()
+ : EndToEndTest(kLongTimeout),
+ local_ssrc_(0),
+ remote_ssrc_(0),
+ receive_transport_(nullptr) {}
+
+ private:
+ size_t GetNumVideoStreams() const override { return 0; }
+ size_t GetNumAudioStreams() const override { return 1; }
+
+ Action OnSendRtp(const uint8_t* packet, size_t length) override {
+ RtpPacket rtp_packet;
+ EXPECT_TRUE(rtp_packet.Parse(packet, length));
+
+ if (!sequence_number_to_retransmit_) {
+ sequence_number_to_retransmit_ = rtp_packet.SequenceNumber();
+ return DROP_PACKET;
+
+ // Don't ask for retransmission straight away, may be deduped in pacer.
+ } else if (rtp_packet.SequenceNumber() ==
+ *sequence_number_to_retransmit_) {
+ observation_complete_.Set();
+ } else {
+ // Send a NACK as often as necessary until retransmission is received.
+ rtcp::Nack nack;
+ nack.SetSenderSsrc(local_ssrc_);
+ nack.SetMediaSsrc(remote_ssrc_);
+ uint16_t nack_list[] = {*sequence_number_to_retransmit_};
+ nack.SetPacketIds(nack_list, 1);
+ rtc::Buffer buffer = nack.Build();
+
+ EXPECT_TRUE(receive_transport_->SendRtcp(buffer.data(), buffer.size()));
+ }
+
+ return SEND_PACKET;
+ }
+
+ void ModifyAudioConfigs(AudioSendStream::Config* send_config,
+ std::vector<AudioReceiveStreamInterface::Config>*
+ receive_configs) override {
+ (*receive_configs)[0].rtp.nack.rtp_history_ms = kNackRtpHistoryMs;
+ local_ssrc_ = (*receive_configs)[0].rtp.local_ssrc;
+ remote_ssrc_ = (*receive_configs)[0].rtp.remote_ssrc;
+ receive_transport_ = (*receive_configs)[0].rtcp_send_transport;
+ }
+
+ void PerformTest() override {
+ EXPECT_TRUE(Wait())
+ << "Timed out waiting for packets to be NACKed, retransmitted and "
+ "rendered.";
+ }
+
+ uint32_t local_ssrc_;
+ uint32_t remote_ssrc_;
+ Transport* receive_transport_;
+ absl::optional<uint16_t> sequence_number_to_retransmit_;
+ } test;
+
+ RunBaseTest(&test);
+}
+
+TEST_F(RetransmissionEndToEndTest,
+ StopSendingKeyframeRequestsForInactiveStream) {
+ class KeyframeRequestObserver : public test::EndToEndTest {
+ public:
+ explicit KeyframeRequestObserver(TaskQueueBase* task_queue)
+ : clock_(Clock::GetRealTimeClock()), task_queue_(task_queue) {}
+
+ void OnVideoStreamsCreated(VideoSendStream* send_stream,
+ const std::vector<VideoReceiveStreamInterface*>&
+ receive_streams) override {
+ RTC_DCHECK_EQ(1, receive_streams.size());
+ send_stream_ = send_stream;
+ receive_stream_ = receive_streams[0];
+ }
+
+ Action OnReceiveRtcp(const uint8_t* packet, size_t length) override {
+ test::RtcpPacketParser parser;
+ EXPECT_TRUE(parser.Parse(packet, length));
+ if (parser.pli()->num_packets() > 0)
+ task_queue_->PostTask([this] { Run(); });
+ return SEND_PACKET;
+ }
+
+ bool PollStats() {
+ if (receive_stream_->GetStats().frames_decoded > 0) {
+ frame_decoded_ = true;
+ } else if (clock_->TimeInMilliseconds() - start_time_ < 5000) {
+ task_queue_->PostDelayedTask([this] { Run(); }, TimeDelta::Millis(100));
+ return false;
+ }
+ return true;
+ }
+
+ void PerformTest() override {
+ start_time_ = clock_->TimeInMilliseconds();
+ task_queue_->PostTask([this] { Run(); });
+ test_done_.Wait(rtc::Event::kForever);
+ }
+
+ void Run() {
+ if (!frame_decoded_) {
+ if (PollStats()) {
+ send_stream_->Stop();
+ if (!frame_decoded_) {
+ test_done_.Set();
+ } else {
+ // Now we wait for the PLI packet. Once we receive it, a task
+ // will be posted (see OnReceiveRtcp) and we'll check the stats
+ // once more before signaling that we're done.
+ }
+ }
+ } else {
+ EXPECT_EQ(
+ 1U,
+ receive_stream_->GetStats().rtcp_packet_type_counts.pli_packets);
+ test_done_.Set();
+ }
+ }
+
+ private:
+ Clock* const clock_;
+ VideoSendStream* send_stream_;
+ VideoReceiveStreamInterface* receive_stream_;
+ TaskQueueBase* const task_queue_;
+ rtc::Event test_done_;
+ bool frame_decoded_ = false;
+ int64_t start_time_ = 0;
+ } test(task_queue());
+
+ RunBaseTest(&test);
+}
+
+void RetransmissionEndToEndTest::ReceivesPliAndRecovers(int rtp_history_ms) {
+ static const int kPacketsToDrop = 1;
+
+ class PliObserver : public test::EndToEndTest,
+ public rtc::VideoSinkInterface<VideoFrame> {
+ public:
+ explicit PliObserver(int rtp_history_ms)
+ : EndToEndTest(kLongTimeout),
+ rtp_history_ms_(rtp_history_ms),
+ nack_enabled_(rtp_history_ms > 0),
+ highest_dropped_timestamp_(0),
+ frames_to_drop_(0),
+ received_pli_(false) {}
+
+ private:
+ Action OnSendRtp(const uint8_t* packet, size_t length) override {
+ MutexLock lock(&mutex_);
+ RtpPacket rtp_packet;
+ EXPECT_TRUE(rtp_packet.Parse(packet, length));
+
+ // Drop all retransmitted packets to force a PLI.
+ if (rtp_packet.Timestamp() <= highest_dropped_timestamp_)
+ return DROP_PACKET;
+
+ if (frames_to_drop_ > 0) {
+ highest_dropped_timestamp_ = rtp_packet.Timestamp();
+ --frames_to_drop_;
+ return DROP_PACKET;
+ }
+
+ return SEND_PACKET;
+ }
+
+ Action OnReceiveRtcp(const uint8_t* packet, size_t length) override {
+ MutexLock lock(&mutex_);
+ test::RtcpPacketParser parser;
+ EXPECT_TRUE(parser.Parse(packet, length));
+ if (!nack_enabled_)
+ EXPECT_EQ(0, parser.nack()->num_packets());
+ if (parser.pli()->num_packets() > 0)
+ received_pli_ = true;
+ return SEND_PACKET;
+ }
+
+ void OnFrame(const VideoFrame& video_frame) override {
+ MutexLock lock(&mutex_);
+ if (received_pli_ &&
+ video_frame.timestamp() > highest_dropped_timestamp_) {
+ observation_complete_.Set();
+ }
+ if (!received_pli_)
+ frames_to_drop_ = kPacketsToDrop;
+ }
+
+ void ModifyVideoConfigs(
+ VideoSendStream::Config* send_config,
+ std::vector<VideoReceiveStreamInterface::Config>* receive_configs,
+ VideoEncoderConfig* encoder_config) override {
+ send_config->rtp.nack.rtp_history_ms = rtp_history_ms_;
+ (*receive_configs)[0].rtp.nack.rtp_history_ms = rtp_history_ms_;
+ (*receive_configs)[0].renderer = this;
+ }
+
+ void PerformTest() override {
+ EXPECT_TRUE(Wait()) << "Timed out waiting for PLI to be "
+ "received and a frame to be "
+ "rendered afterwards.";
+ }
+
+ Mutex mutex_;
+ int rtp_history_ms_;
+ bool nack_enabled_;
+ uint32_t highest_dropped_timestamp_ RTC_GUARDED_BY(&mutex_);
+ int frames_to_drop_ RTC_GUARDED_BY(&mutex_);
+ bool received_pli_ RTC_GUARDED_BY(&mutex_);
+ } test(rtp_history_ms);
+
+ RunBaseTest(&test);
+}
+
+TEST_F(RetransmissionEndToEndTest, ReceivesPliAndRecoversWithNack) {
+ ReceivesPliAndRecovers(1000);
+}
+
+TEST_F(RetransmissionEndToEndTest, ReceivesPliAndRecoversWithoutNack) {
+ ReceivesPliAndRecovers(0);
+}
+
+// This test drops second RTP packet with a marker bit set, makes sure it's
+// retransmitted and renders. Retransmission SSRCs are also checked.
+void RetransmissionEndToEndTest::DecodesRetransmittedFrame(bool enable_rtx,
+ bool enable_red) {
+ static const int kDroppedFrameNumber = 10;
+ class RetransmissionObserver : public test::EndToEndTest,
+ public rtc::VideoSinkInterface<VideoFrame> {
+ public:
+ RetransmissionObserver(bool enable_rtx, bool enable_red)
+ : EndToEndTest(kDefaultTimeout),
+ payload_type_(GetPayloadType(false, enable_red)),
+ retransmission_ssrc_(enable_rtx ? kSendRtxSsrcs[0]
+ : kVideoSendSsrcs[0]),
+ retransmission_payload_type_(GetPayloadType(enable_rtx, enable_red)),
+ encoder_factory_([]() { return VP8Encoder::Create(); }),
+ marker_bits_observed_(0),
+ retransmitted_timestamp_(0) {}
+
+ private:
+ Action OnSendRtp(const uint8_t* packet, size_t length) override {
+ MutexLock lock(&mutex_);
+ RtpPacket rtp_packet;
+ EXPECT_TRUE(rtp_packet.Parse(packet, length));
+
+ // Ignore padding-only packets over RTX.
+ if (rtp_packet.PayloadType() != payload_type_) {
+ EXPECT_EQ(retransmission_ssrc_, rtp_packet.Ssrc());
+ if (rtp_packet.payload_size() == 0)
+ return SEND_PACKET;
+ }
+
+ if (rtp_packet.Timestamp() == retransmitted_timestamp_) {
+ EXPECT_EQ(retransmission_ssrc_, rtp_packet.Ssrc());
+ EXPECT_EQ(retransmission_payload_type_, rtp_packet.PayloadType());
+ return SEND_PACKET;
+ }
+
+ // Found the final packet of the frame to inflict loss to, drop this and
+ // expect a retransmission.
+ if (rtp_packet.PayloadType() == payload_type_ && rtp_packet.Marker() &&
+ ++marker_bits_observed_ == kDroppedFrameNumber) {
+ // This should be the only dropped packet.
+ EXPECT_EQ(0u, retransmitted_timestamp_);
+ retransmitted_timestamp_ = rtp_packet.Timestamp();
+ return DROP_PACKET;
+ }
+
+ return SEND_PACKET;
+ }
+
+ void OnFrame(const VideoFrame& frame) override {
+ EXPECT_EQ(kVideoRotation_90, frame.rotation());
+ {
+ MutexLock lock(&mutex_);
+ if (frame.timestamp() == retransmitted_timestamp_)
+ observation_complete_.Set();
+ }
+ orig_renderer_->OnFrame(frame);
+ }
+
+ void ModifyVideoConfigs(
+ VideoSendStream::Config* send_config,
+ std::vector<VideoReceiveStreamInterface::Config>* receive_configs,
+ VideoEncoderConfig* encoder_config) override {
+ send_config->rtp.nack.rtp_history_ms = kNackRtpHistoryMs;
+
+ // Insert ourselves into the rendering pipeline.
+ RTC_DCHECK(!orig_renderer_);
+ orig_renderer_ = (*receive_configs)[0].renderer;
+ RTC_DCHECK(orig_renderer_);
+ // To avoid post-decode frame dropping, disable the prerender buffer.
+ (*receive_configs)[0].enable_prerenderer_smoothing = false;
+ (*receive_configs)[0].renderer = this;
+
+ (*receive_configs)[0].rtp.nack.rtp_history_ms = kNackRtpHistoryMs;
+
+ if (payload_type_ == kRedPayloadType) {
+ send_config->rtp.ulpfec.ulpfec_payload_type = kUlpfecPayloadType;
+ send_config->rtp.ulpfec.red_payload_type = kRedPayloadType;
+ if (retransmission_ssrc_ == kSendRtxSsrcs[0])
+ send_config->rtp.ulpfec.red_rtx_payload_type = kRtxRedPayloadType;
+ (*receive_configs)[0].rtp.ulpfec_payload_type =
+ send_config->rtp.ulpfec.ulpfec_payload_type;
+ (*receive_configs)[0].rtp.red_payload_type =
+ send_config->rtp.ulpfec.red_payload_type;
+ }
+
+ if (retransmission_ssrc_ == kSendRtxSsrcs[0]) {
+ send_config->rtp.rtx.ssrcs.push_back(kSendRtxSsrcs[0]);
+ send_config->rtp.rtx.payload_type = kSendRtxPayloadType;
+ (*receive_configs)[0].rtp.rtx_ssrc = kSendRtxSsrcs[0];
+ (*receive_configs)[0]
+ .rtp.rtx_associated_payload_types[(payload_type_ == kRedPayloadType)
+ ? kRtxRedPayloadType
+ : kSendRtxPayloadType] =
+ payload_type_;
+ }
+ // Configure encoding and decoding with VP8, since generic packetization
+ // doesn't support FEC with NACK.
+ RTC_DCHECK_EQ(1, (*receive_configs)[0].decoders.size());
+ send_config->encoder_settings.encoder_factory = &encoder_factory_;
+ send_config->rtp.payload_name = "VP8";
+ encoder_config->codec_type = kVideoCodecVP8;
+ (*receive_configs)[0].decoders[0].video_format = SdpVideoFormat("VP8");
+ }
+
+ void OnFrameGeneratorCapturerCreated(
+ test::FrameGeneratorCapturer* frame_generator_capturer) override {
+ frame_generator_capturer->SetFakeRotation(kVideoRotation_90);
+ }
+
+ void PerformTest() override {
+ EXPECT_TRUE(Wait())
+ << "Timed out while waiting for retransmission to render.";
+ }
+
+ int GetPayloadType(bool use_rtx, bool use_fec) {
+ if (use_fec) {
+ if (use_rtx)
+ return kRtxRedPayloadType;
+ return kRedPayloadType;
+ }
+ if (use_rtx)
+ return kSendRtxPayloadType;
+ return kFakeVideoSendPayloadType;
+ }
+
+ Mutex mutex_;
+ rtc::VideoSinkInterface<VideoFrame>* orig_renderer_ = nullptr;
+ const int payload_type_;
+ const uint32_t retransmission_ssrc_;
+ const int retransmission_payload_type_;
+ test::FunctionVideoEncoderFactory encoder_factory_;
+ const std::string payload_name_;
+ int marker_bits_observed_;
+ uint32_t retransmitted_timestamp_ RTC_GUARDED_BY(&mutex_);
+ } test(enable_rtx, enable_red);
+
+ RunBaseTest(&test);
+}
+
+TEST_F(RetransmissionEndToEndTest, DecodesRetransmittedFrame) {
+ DecodesRetransmittedFrame(false, false);
+}
+
+TEST_F(RetransmissionEndToEndTest, DecodesRetransmittedFrameOverRtx) {
+ DecodesRetransmittedFrame(true, false);
+}
+
+TEST_F(RetransmissionEndToEndTest, DecodesRetransmittedFrameByRed) {
+ DecodesRetransmittedFrame(false, true);
+}
+
+TEST_F(RetransmissionEndToEndTest, DecodesRetransmittedFrameByRedOverRtx) {
+ DecodesRetransmittedFrame(true, true);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/video/end_to_end_tests/rtp_rtcp_tests.cc b/third_party/libwebrtc/video/end_to_end_tests/rtp_rtcp_tests.cc
new file mode 100644
index 0000000000..32d7cd50ef
--- /dev/null
+++ b/third_party/libwebrtc/video/end_to_end_tests/rtp_rtcp_tests.cc
@@ -0,0 +1,551 @@
+/*
+ * Copyright 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <memory>
+
+#include "api/test/simulated_network.h"
+#include "call/fake_network_pipe.h"
+#include "call/simulated_network.h"
+#include "modules/include/module_common_types_public.h"
+#include "modules/rtp_rtcp/source/rtp_packet.h"
+#include "modules/video_coding/codecs/vp8/include/vp8.h"
+#include "rtc_base/numerics/sequence_number_unwrapper.h"
+#include "rtc_base/synchronization/mutex.h"
+#include "rtc_base/task_queue_for_test.h"
+#include "test/call_test.h"
+#include "test/gtest.h"
+#include "test/rtcp_packet_parser.h"
+
+namespace webrtc {
+namespace {
+enum : int { // The first valid value is 1.
+ kTransportSequenceNumberExtensionId = 1,
+};
+} // namespace
+
+class RtpRtcpEndToEndTest : public test::CallTest {
+ protected:
+ void RespectsRtcpMode(RtcpMode rtcp_mode);
+ void TestRtpStatePreservation(bool use_rtx, bool provoke_rtcpsr_before_rtp);
+};
+
+void RtpRtcpEndToEndTest::RespectsRtcpMode(RtcpMode rtcp_mode) {
+ static const int kNumCompoundRtcpPacketsToObserve = 10;
+ class RtcpModeObserver : public test::EndToEndTest {
+ public:
+ explicit RtcpModeObserver(RtcpMode rtcp_mode)
+ : EndToEndTest(kDefaultTimeout),
+ rtcp_mode_(rtcp_mode),
+ sent_rtp_(0),
+ sent_rtcp_(0) {}
+
+ private:
+ Action OnSendRtp(const uint8_t* packet, size_t length) override {
+ MutexLock lock(&mutex_);
+ if (++sent_rtp_ % 3 == 0)
+ return DROP_PACKET;
+
+ return SEND_PACKET;
+ }
+
+ Action OnReceiveRtcp(const uint8_t* packet, size_t length) override {
+ MutexLock lock(&mutex_);
+ ++sent_rtcp_;
+ test::RtcpPacketParser parser;
+ EXPECT_TRUE(parser.Parse(packet, length));
+
+ EXPECT_EQ(0, parser.sender_report()->num_packets());
+
+ switch (rtcp_mode_) {
+ case RtcpMode::kCompound:
+ // TODO(holmer): We shouldn't send transport feedback alone if
+ // compound RTCP is negotiated.
+ if (parser.receiver_report()->num_packets() == 0 &&
+ parser.transport_feedback()->num_packets() == 0) {
+ ADD_FAILURE() << "Received RTCP packet without receiver report for "
+ "RtcpMode::kCompound.";
+ observation_complete_.Set();
+ }
+
+ if (sent_rtcp_ >= kNumCompoundRtcpPacketsToObserve)
+ observation_complete_.Set();
+
+ break;
+ case RtcpMode::kReducedSize:
+ if (parser.receiver_report()->num_packets() == 0)
+ observation_complete_.Set();
+ break;
+ case RtcpMode::kOff:
+ RTC_DCHECK_NOTREACHED();
+ break;
+ }
+
+ return SEND_PACKET;
+ }
+
+ void ModifyVideoConfigs(
+ VideoSendStream::Config* send_config,
+ std::vector<VideoReceiveStreamInterface::Config>* receive_configs,
+ VideoEncoderConfig* encoder_config) override {
+ send_config->rtp.nack.rtp_history_ms = kNackRtpHistoryMs;
+ (*receive_configs)[0].rtp.nack.rtp_history_ms = kNackRtpHistoryMs;
+ (*receive_configs)[0].rtp.rtcp_mode = rtcp_mode_;
+ }
+
+ void PerformTest() override {
+ EXPECT_TRUE(Wait())
+ << (rtcp_mode_ == RtcpMode::kCompound
+ ? "Timed out before observing enough compound packets."
+ : "Timed out before receiving a non-compound RTCP packet.");
+ }
+
+ RtcpMode rtcp_mode_;
+ Mutex mutex_;
+ // Must be protected since RTCP can be sent by both the process thread
+ // and the pacer thread.
+ int sent_rtp_ RTC_GUARDED_BY(&mutex_);
+ int sent_rtcp_ RTC_GUARDED_BY(&mutex_);
+ } test(rtcp_mode);
+
+ RunBaseTest(&test);
+}
+
+TEST_F(RtpRtcpEndToEndTest, UsesRtcpCompoundMode) {
+ RespectsRtcpMode(RtcpMode::kCompound);
+}
+
+TEST_F(RtpRtcpEndToEndTest, UsesRtcpReducedSizeMode) {
+ RespectsRtcpMode(RtcpMode::kReducedSize);
+}
+
+void RtpRtcpEndToEndTest::TestRtpStatePreservation(
+ bool use_rtx,
+ bool provoke_rtcpsr_before_rtp) {
+ // This test uses other VideoStream settings than the the default settings
+ // implemented in DefaultVideoStreamFactory. Therefore this test implements
+ // its own VideoEncoderConfig::VideoStreamFactoryInterface which is created
+ // in ModifyVideoConfigs.
+ class VideoStreamFactory
+ : public VideoEncoderConfig::VideoStreamFactoryInterface {
+ public:
+ VideoStreamFactory() {}
+
+ private:
+ std::vector<VideoStream> CreateEncoderStreams(
+ int frame_width,
+ int frame_height,
+ const VideoEncoderConfig& encoder_config) override {
+ std::vector<VideoStream> streams =
+ test::CreateVideoStreams(frame_width, frame_height, encoder_config);
+
+ if (encoder_config.number_of_streams > 1) {
+ // Lower bitrates so that all streams send initially.
+ RTC_DCHECK_EQ(3, encoder_config.number_of_streams);
+ for (size_t i = 0; i < encoder_config.number_of_streams; ++i) {
+ streams[i].min_bitrate_bps = 10000;
+ streams[i].target_bitrate_bps = 15000;
+ streams[i].max_bitrate_bps = 20000;
+ }
+ } else {
+ // Use the same total bitrates when sending a single stream to avoid
+ // lowering
+ // the bitrate estimate and requiring a subsequent rampup.
+ streams[0].min_bitrate_bps = 3 * 10000;
+ streams[0].target_bitrate_bps = 3 * 15000;
+ streams[0].max_bitrate_bps = 3 * 20000;
+ }
+ return streams;
+ }
+ };
+
+ class RtpSequenceObserver : public test::RtpRtcpObserver {
+ public:
+ explicit RtpSequenceObserver(bool use_rtx)
+ : test::RtpRtcpObserver(kDefaultTimeout),
+ ssrcs_to_observe_(kNumSimulcastStreams) {
+ for (size_t i = 0; i < kNumSimulcastStreams; ++i) {
+ ssrc_is_rtx_[kVideoSendSsrcs[i]] = false;
+ if (use_rtx)
+ ssrc_is_rtx_[kSendRtxSsrcs[i]] = true;
+ }
+ }
+
+ void ResetExpectedSsrcs(size_t num_expected_ssrcs) {
+ MutexLock lock(&mutex_);
+ ssrc_observed_.clear();
+ ssrcs_to_observe_ = num_expected_ssrcs;
+ }
+
+ private:
+ void ValidateTimestampGap(uint32_t ssrc,
+ uint32_t timestamp,
+ bool only_padding)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_) {
+ static const int32_t kMaxTimestampGap = kDefaultTimeout.ms() * 90;
+ auto timestamp_it = last_observed_timestamp_.find(ssrc);
+ if (timestamp_it == last_observed_timestamp_.end()) {
+ EXPECT_FALSE(only_padding);
+ last_observed_timestamp_[ssrc] = timestamp;
+ } else {
+ // Verify timestamps are reasonably close.
+ uint32_t latest_observed = timestamp_it->second;
+ // Wraparound handling is unnecessary here as long as an int variable
+ // is used to store the result.
+ int32_t timestamp_gap = timestamp - latest_observed;
+ EXPECT_LE(std::abs(timestamp_gap), kMaxTimestampGap)
+ << "Gap in timestamps (" << latest_observed << " -> " << timestamp
+ << ") too large for SSRC: " << ssrc << ".";
+ timestamp_it->second = timestamp;
+ }
+ }
+
+ Action OnSendRtp(const uint8_t* packet, size_t length) override {
+ RtpPacket rtp_packet;
+ EXPECT_TRUE(rtp_packet.Parse(packet, length));
+ const uint32_t ssrc = rtp_packet.Ssrc();
+ const int64_t sequence_number =
+ seq_numbers_unwrapper_.Unwrap(rtp_packet.SequenceNumber());
+ const uint32_t timestamp = rtp_packet.Timestamp();
+ const bool only_padding = rtp_packet.payload_size() == 0;
+
+ EXPECT_TRUE(ssrc_is_rtx_.find(ssrc) != ssrc_is_rtx_.end())
+ << "Received SSRC that wasn't configured: " << ssrc;
+
+ static const int64_t kMaxSequenceNumberGap = 100;
+ std::list<int64_t>* seq_numbers = &last_observed_seq_numbers_[ssrc];
+ if (seq_numbers->empty()) {
+ seq_numbers->push_back(sequence_number);
+ } else {
+ // We shouldn't get replays of previous sequence numbers.
+ for (int64_t observed : *seq_numbers) {
+ EXPECT_NE(observed, sequence_number)
+ << "Received sequence number " << sequence_number << " for SSRC "
+ << ssrc << " 2nd time.";
+ }
+ // Verify sequence numbers are reasonably close.
+ int64_t latest_observed = seq_numbers->back();
+ int64_t sequence_number_gap = sequence_number - latest_observed;
+ EXPECT_LE(std::abs(sequence_number_gap), kMaxSequenceNumberGap)
+ << "Gap in sequence numbers (" << latest_observed << " -> "
+ << sequence_number << ") too large for SSRC: " << ssrc << ".";
+ seq_numbers->push_back(sequence_number);
+ if (seq_numbers->size() >= kMaxSequenceNumberGap) {
+ seq_numbers->pop_front();
+ }
+ }
+
+ if (!ssrc_is_rtx_[ssrc]) {
+ MutexLock lock(&mutex_);
+ ValidateTimestampGap(ssrc, timestamp, only_padding);
+
+ // Wait for media packets on all ssrcs.
+ if (!ssrc_observed_[ssrc] && !only_padding) {
+ ssrc_observed_[ssrc] = true;
+ if (--ssrcs_to_observe_ == 0)
+ observation_complete_.Set();
+ }
+ }
+
+ return SEND_PACKET;
+ }
+
+ Action OnSendRtcp(const uint8_t* packet, size_t length) override {
+ test::RtcpPacketParser rtcp_parser;
+ rtcp_parser.Parse(packet, length);
+ if (rtcp_parser.sender_report()->num_packets() > 0) {
+ uint32_t ssrc = rtcp_parser.sender_report()->sender_ssrc();
+ uint32_t rtcp_timestamp = rtcp_parser.sender_report()->rtp_timestamp();
+
+ MutexLock lock(&mutex_);
+ ValidateTimestampGap(ssrc, rtcp_timestamp, false);
+ }
+ return SEND_PACKET;
+ }
+
+ RtpSequenceNumberUnwrapper seq_numbers_unwrapper_;
+ std::map<uint32_t, std::list<int64_t>> last_observed_seq_numbers_;
+ std::map<uint32_t, uint32_t> last_observed_timestamp_;
+ std::map<uint32_t, bool> ssrc_is_rtx_;
+
+ Mutex mutex_;
+ size_t ssrcs_to_observe_ RTC_GUARDED_BY(mutex_);
+ std::map<uint32_t, bool> ssrc_observed_ RTC_GUARDED_BY(mutex_);
+ } observer(use_rtx);
+
+ VideoEncoderConfig one_stream;
+
+ SendTask(task_queue(), [this, &observer, &one_stream, use_rtx]() {
+ CreateCalls();
+ CreateSendTransport(BuiltInNetworkBehaviorConfig(), &observer);
+ CreateReceiveTransport(BuiltInNetworkBehaviorConfig(), &observer);
+ CreateSendConfig(kNumSimulcastStreams, 0, 0);
+
+ if (use_rtx) {
+ for (size_t i = 0; i < kNumSimulcastStreams; ++i) {
+ GetVideoSendConfig()->rtp.rtx.ssrcs.push_back(kSendRtxSsrcs[i]);
+ }
+ GetVideoSendConfig()->rtp.rtx.payload_type = kSendRtxPayloadType;
+ }
+
+ GetVideoEncoderConfig()->video_stream_factory =
+ rtc::make_ref_counted<VideoStreamFactory>();
+ // Use the same total bitrates when sending a single stream to avoid
+ // lowering the bitrate estimate and requiring a subsequent rampup.
+ one_stream = GetVideoEncoderConfig()->Copy();
+ // one_stream.streams.resize(1);
+ one_stream.number_of_streams = 1;
+ CreateMatchingReceiveConfigs();
+
+ CreateVideoStreams();
+ CreateFrameGeneratorCapturer(30, 1280, 720);
+
+ Start();
+ });
+
+ EXPECT_TRUE(observer.Wait())
+ << "Timed out waiting for all SSRCs to send packets.";
+
+ // Test stream resetting more than once to make sure that the state doesn't
+ // get set once (this could be due to using std::map::insert for instance).
+ for (size_t i = 0; i < 3; ++i) {
+ SendTask(task_queue(), [&]() {
+ DestroyVideoSendStreams();
+
+ // Re-create VideoSendStream with only one stream.
+ CreateVideoSendStream(one_stream);
+ GetVideoSendStream()->Start();
+ if (provoke_rtcpsr_before_rtp) {
+ // Rapid Resync Request forces sending RTCP Sender Report back.
+ // Using this request speeds up this test because then there is no need
+ // to wait for a second for periodic Sender Report.
+ rtcp::RapidResyncRequest force_send_sr_back_request;
+ rtc::Buffer packet = force_send_sr_back_request.Build();
+ static_cast<webrtc::Transport*>(receive_transport_.get())
+ ->SendRtcp(packet.data(), packet.size());
+ }
+ CreateFrameGeneratorCapturer(30, 1280, 720);
+ });
+
+ observer.ResetExpectedSsrcs(1);
+ EXPECT_TRUE(observer.Wait()) << "Timed out waiting for single RTP packet.";
+
+ // Reconfigure back to use all streams.
+ SendTask(task_queue(), [this]() {
+ GetVideoSendStream()->ReconfigureVideoEncoder(
+ GetVideoEncoderConfig()->Copy());
+ });
+ observer.ResetExpectedSsrcs(kNumSimulcastStreams);
+ EXPECT_TRUE(observer.Wait())
+ << "Timed out waiting for all SSRCs to send packets.";
+
+ // Reconfigure down to one stream.
+ SendTask(task_queue(), [this, &one_stream]() {
+ GetVideoSendStream()->ReconfigureVideoEncoder(one_stream.Copy());
+ });
+ observer.ResetExpectedSsrcs(1);
+ EXPECT_TRUE(observer.Wait()) << "Timed out waiting for single RTP packet.";
+
+ // Reconfigure back to use all streams.
+ SendTask(task_queue(), [this]() {
+ GetVideoSendStream()->ReconfigureVideoEncoder(
+ GetVideoEncoderConfig()->Copy());
+ });
+ observer.ResetExpectedSsrcs(kNumSimulcastStreams);
+ EXPECT_TRUE(observer.Wait())
+ << "Timed out waiting for all SSRCs to send packets.";
+ }
+
+ SendTask(task_queue(), [this]() {
+ Stop();
+ DestroyStreams();
+ DestroyCalls();
+ });
+}
+
+TEST_F(RtpRtcpEndToEndTest, RestartingSendStreamPreservesRtpState) {
+ TestRtpStatePreservation(false, false);
+}
+
+TEST_F(RtpRtcpEndToEndTest, RestartingSendStreamPreservesRtpStatesWithRtx) {
+ TestRtpStatePreservation(true, false);
+}
+
+TEST_F(RtpRtcpEndToEndTest,
+ RestartingSendStreamKeepsRtpAndRtcpTimestampsSynced) {
+ TestRtpStatePreservation(true, true);
+}
+
+// See https://bugs.chromium.org/p/webrtc/issues/detail?id=9648.
+TEST_F(RtpRtcpEndToEndTest, DISABLED_TestFlexfecRtpStatePreservation) {
+ class RtpSequenceObserver : public test::RtpRtcpObserver {
+ public:
+ RtpSequenceObserver()
+ : test::RtpRtcpObserver(kDefaultTimeout),
+ num_flexfec_packets_sent_(0) {}
+
+ void ResetPacketCount() {
+ MutexLock lock(&mutex_);
+ num_flexfec_packets_sent_ = 0;
+ }
+
+ private:
+ Action OnSendRtp(const uint8_t* packet, size_t length) override {
+ MutexLock lock(&mutex_);
+
+ RtpPacket rtp_packet;
+ EXPECT_TRUE(rtp_packet.Parse(packet, length));
+ const uint16_t sequence_number = rtp_packet.SequenceNumber();
+ const uint32_t timestamp = rtp_packet.Timestamp();
+ const uint32_t ssrc = rtp_packet.Ssrc();
+
+ if (ssrc == kVideoSendSsrcs[0] || ssrc == kSendRtxSsrcs[0]) {
+ return SEND_PACKET;
+ }
+ EXPECT_EQ(kFlexfecSendSsrc, ssrc) << "Unknown SSRC sent.";
+
+ ++num_flexfec_packets_sent_;
+
+ // If this is the first packet, we have nothing to compare to.
+ if (!last_observed_sequence_number_) {
+ last_observed_sequence_number_.emplace(sequence_number);
+ last_observed_timestamp_.emplace(timestamp);
+
+ return SEND_PACKET;
+ }
+
+ // Verify continuity and monotonicity of RTP sequence numbers.
+ EXPECT_EQ(static_cast<uint16_t>(*last_observed_sequence_number_ + 1),
+ sequence_number);
+ last_observed_sequence_number_.emplace(sequence_number);
+
+ // Timestamps should be non-decreasing...
+ const bool timestamp_is_same_or_newer =
+ timestamp == *last_observed_timestamp_ ||
+ IsNewerTimestamp(timestamp, *last_observed_timestamp_);
+ EXPECT_TRUE(timestamp_is_same_or_newer);
+ // ...but reasonably close in time.
+ const int k10SecondsInRtpTimestampBase = 10 * kVideoPayloadTypeFrequency;
+ EXPECT_TRUE(IsNewerTimestamp(
+ *last_observed_timestamp_ + k10SecondsInRtpTimestampBase, timestamp));
+ last_observed_timestamp_.emplace(timestamp);
+
+ // Pass test when enough packets have been let through.
+ if (num_flexfec_packets_sent_ >= 10) {
+ observation_complete_.Set();
+ }
+
+ return SEND_PACKET;
+ }
+
+ absl::optional<uint16_t> last_observed_sequence_number_
+ RTC_GUARDED_BY(mutex_);
+ absl::optional<uint32_t> last_observed_timestamp_ RTC_GUARDED_BY(mutex_);
+ size_t num_flexfec_packets_sent_ RTC_GUARDED_BY(mutex_);
+ Mutex mutex_;
+ } observer;
+
+ static constexpr int kFrameMaxWidth = 320;
+ static constexpr int kFrameMaxHeight = 180;
+ static constexpr int kFrameRate = 15;
+
+ test::FunctionVideoEncoderFactory encoder_factory(
+ []() { return VP8Encoder::Create(); });
+
+ SendTask(task_queue(), [&]() {
+ CreateCalls();
+
+ BuiltInNetworkBehaviorConfig lossy_delayed_link;
+ lossy_delayed_link.loss_percent = 2;
+ lossy_delayed_link.queue_delay_ms = 50;
+
+ CreateSendTransport(lossy_delayed_link, &observer);
+ CreateReceiveTransport(BuiltInNetworkBehaviorConfig(), &observer);
+
+ // For reduced flakyness, we use a real VP8 encoder together with NACK
+ // and RTX.
+ const int kNumVideoStreams = 1;
+ const int kNumFlexfecStreams = 1;
+ CreateSendConfig(kNumVideoStreams, 0, kNumFlexfecStreams);
+
+ GetVideoSendConfig()->encoder_settings.encoder_factory = &encoder_factory;
+ GetVideoSendConfig()->rtp.payload_name = "VP8";
+ GetVideoSendConfig()->rtp.payload_type = kVideoSendPayloadType;
+ GetVideoSendConfig()->rtp.nack.rtp_history_ms = kNackRtpHistoryMs;
+ GetVideoSendConfig()->rtp.rtx.ssrcs.push_back(kSendRtxSsrcs[0]);
+ GetVideoSendConfig()->rtp.rtx.payload_type = kSendRtxPayloadType;
+ GetVideoEncoderConfig()->codec_type = kVideoCodecVP8;
+
+ CreateMatchingReceiveConfigs();
+ video_receive_configs_[0].rtp.nack.rtp_history_ms = kNackRtpHistoryMs;
+ video_receive_configs_[0].rtp.rtx_ssrc = kSendRtxSsrcs[0];
+ video_receive_configs_[0]
+ .rtp.rtx_associated_payload_types[kSendRtxPayloadType] =
+ kVideoSendPayloadType;
+
+ // The matching FlexFEC receive config is not created by
+ // CreateMatchingReceiveConfigs since this is not a test::BaseTest.
+ // Set up the receive config manually instead.
+ FlexfecReceiveStream::Config flexfec_receive_config(
+ receive_transport_.get());
+ flexfec_receive_config.payload_type =
+ GetVideoSendConfig()->rtp.flexfec.payload_type;
+ flexfec_receive_config.rtp.remote_ssrc =
+ GetVideoSendConfig()->rtp.flexfec.ssrc;
+ flexfec_receive_config.protected_media_ssrcs =
+ GetVideoSendConfig()->rtp.flexfec.protected_media_ssrcs;
+ flexfec_receive_config.rtp.local_ssrc = kReceiverLocalVideoSsrc;
+ flexfec_receive_config.rtp.extensions.emplace_back(
+ RtpExtension::kTransportSequenceNumberUri,
+ kTransportSequenceNumberExtensionId);
+ flexfec_receive_configs_.push_back(flexfec_receive_config);
+
+ CreateFlexfecStreams();
+ CreateVideoStreams();
+
+ // RTCP might be disabled if the network is "down".
+ sender_call_->SignalChannelNetworkState(MediaType::VIDEO, kNetworkUp);
+ receiver_call_->SignalChannelNetworkState(MediaType::VIDEO, kNetworkUp);
+
+ CreateFrameGeneratorCapturer(kFrameRate, kFrameMaxWidth, kFrameMaxHeight);
+
+ Start();
+ });
+
+ // Initial test.
+ EXPECT_TRUE(observer.Wait()) << "Timed out waiting for packets.";
+
+ SendTask(task_queue(), [this, &observer]() {
+ // Ensure monotonicity when the VideoSendStream is restarted.
+ Stop();
+ observer.ResetPacketCount();
+ Start();
+ });
+
+ EXPECT_TRUE(observer.Wait()) << "Timed out waiting for packets.";
+
+ SendTask(task_queue(), [this, &observer]() {
+ // Ensure monotonicity when the VideoSendStream is recreated.
+ DestroyVideoSendStreams();
+ observer.ResetPacketCount();
+ CreateVideoSendStreams();
+ GetVideoSendStream()->Start();
+ CreateFrameGeneratorCapturer(kFrameRate, kFrameMaxWidth, kFrameMaxHeight);
+ });
+
+ EXPECT_TRUE(observer.Wait()) << "Timed out waiting for packets.";
+
+ // Cleanup.
+ SendTask(task_queue(), [this]() {
+ Stop();
+ DestroyStreams();
+ DestroyCalls();
+ });
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/video/end_to_end_tests/ssrc_tests.cc b/third_party/libwebrtc/video/end_to_end_tests/ssrc_tests.cc
new file mode 100644
index 0000000000..edacde115a
--- /dev/null
+++ b/third_party/libwebrtc/video/end_to_end_tests/ssrc_tests.cc
@@ -0,0 +1,325 @@
+/*
+ * Copyright 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <memory>
+
+#include "api/test/simulated_network.h"
+#include "call/fake_network_pipe.h"
+#include "call/packet_receiver.h"
+#include "call/simulated_network.h"
+#include "modules/rtp_rtcp/source/rtp_packet.h"
+#include "modules/rtp_rtcp/source/rtp_packet_received.h"
+#include "modules/rtp_rtcp/source/rtp_util.h"
+#include "rtc_base/task_queue_for_test.h"
+#include "test/call_test.h"
+#include "test/gtest.h"
+#include "test/rtcp_packet_parser.h"
+
+namespace webrtc {
+class SsrcEndToEndTest : public test::CallTest {
+ public:
+ SsrcEndToEndTest() {
+ RegisterRtpExtension(
+ RtpExtension(RtpExtension::kTransportSequenceNumberUri, 1));
+ }
+
+ protected:
+ void TestSendsSetSsrcs(size_t num_ssrcs, bool send_single_ssrc_first);
+};
+
+TEST_F(SsrcEndToEndTest, ReceiverUsesLocalSsrc) {
+ class SyncRtcpObserver : public test::EndToEndTest {
+ public:
+ SyncRtcpObserver() : EndToEndTest(kDefaultTimeout) {}
+
+ Action OnReceiveRtcp(const uint8_t* packet, size_t length) override {
+ test::RtcpPacketParser parser;
+ EXPECT_TRUE(parser.Parse(packet, length));
+ EXPECT_EQ(kReceiverLocalVideoSsrc, parser.sender_ssrc());
+ observation_complete_.Set();
+
+ return SEND_PACKET;
+ }
+
+ void PerformTest() override {
+ EXPECT_TRUE(Wait())
+ << "Timed out while waiting for a receiver RTCP packet to be sent.";
+ }
+ } test;
+
+ RunBaseTest(&test);
+}
+
+TEST_F(SsrcEndToEndTest, UnknownRtpPacketTriggersUndemuxablePacketHandler) {
+ class PacketInputObserver : public PacketReceiver {
+ public:
+ explicit PacketInputObserver(PacketReceiver* receiver)
+ : receiver_(receiver) {}
+
+ bool Wait() {
+ return undemuxable_packet_handler_triggered_.Wait(kDefaultTimeout);
+ }
+
+ private:
+ void DeliverRtpPacket(
+ MediaType media_type,
+ RtpPacketReceived packet,
+ OnUndemuxablePacketHandler undemuxable_packet_handler) override {
+ PacketReceiver::OnUndemuxablePacketHandler handler =
+ [this](const RtpPacketReceived& packet) {
+ undemuxable_packet_handler_triggered_.Set();
+ // No need to re-attempt deliver the packet.
+ return false;
+ };
+ receiver_->DeliverRtpPacket(media_type, std::move(packet),
+ std::move(handler));
+ }
+ void DeliverRtcpPacket(rtc::CopyOnWriteBuffer packet) override {}
+
+ PacketReceiver* receiver_;
+ rtc::Event undemuxable_packet_handler_triggered_;
+ };
+
+ std::unique_ptr<test::DirectTransport> send_transport;
+ std::unique_ptr<test::DirectTransport> receive_transport;
+ std::unique_ptr<PacketInputObserver> input_observer;
+
+ SendTask(
+ task_queue(),
+ [this, &send_transport, &receive_transport, &input_observer]() {
+ CreateCalls();
+
+ send_transport = std::make_unique<test::DirectTransport>(
+ task_queue(),
+ std::make_unique<FakeNetworkPipe>(
+ Clock::GetRealTimeClock(), std::make_unique<SimulatedNetwork>(
+ BuiltInNetworkBehaviorConfig())),
+ sender_call_.get(), payload_type_map_, GetRegisteredExtensions(),
+ GetRegisteredExtensions());
+ receive_transport = std::make_unique<test::DirectTransport>(
+ task_queue(),
+ std::make_unique<FakeNetworkPipe>(
+ Clock::GetRealTimeClock(), std::make_unique<SimulatedNetwork>(
+ BuiltInNetworkBehaviorConfig())),
+ receiver_call_.get(), payload_type_map_, GetRegisteredExtensions(),
+ GetRegisteredExtensions());
+ input_observer =
+ std::make_unique<PacketInputObserver>(receiver_call_->Receiver());
+ send_transport->SetReceiver(input_observer.get());
+ receive_transport->SetReceiver(sender_call_->Receiver());
+
+ CreateSendConfig(1, 0, 0, send_transport.get());
+ CreateMatchingReceiveConfigs(receive_transport.get());
+
+ CreateVideoStreams();
+ CreateFrameGeneratorCapturer(kDefaultFramerate, kDefaultWidth,
+ kDefaultHeight);
+ Start();
+
+ receiver_call_->DestroyVideoReceiveStream(video_receive_streams_[0]);
+ video_receive_streams_.clear();
+ });
+
+ // Wait() waits for a received packet.
+ EXPECT_TRUE(input_observer->Wait());
+
+ SendTask(task_queue(), [this, &send_transport, &receive_transport]() {
+ Stop();
+ DestroyStreams();
+ send_transport.reset();
+ receive_transport.reset();
+ DestroyCalls();
+ });
+}
+
+void SsrcEndToEndTest::TestSendsSetSsrcs(size_t num_ssrcs,
+ bool send_single_ssrc_first) {
+ class SendsSetSsrcs : public test::EndToEndTest {
+ public:
+ SendsSetSsrcs(const uint32_t* ssrcs,
+ size_t num_ssrcs,
+ bool send_single_ssrc_first,
+ TaskQueueBase* task_queue)
+ : EndToEndTest(kDefaultTimeout),
+ num_ssrcs_(num_ssrcs),
+ send_single_ssrc_first_(send_single_ssrc_first),
+ ssrcs_to_observe_(num_ssrcs),
+ expect_single_ssrc_(send_single_ssrc_first),
+ send_stream_(nullptr),
+ task_queue_(task_queue) {
+ for (size_t i = 0; i < num_ssrcs; ++i)
+ valid_ssrcs_[ssrcs[i]] = true;
+ }
+
+ private:
+ Action OnSendRtp(const uint8_t* packet, size_t length) override {
+ RtpPacket rtp_packet;
+ EXPECT_TRUE(rtp_packet.Parse(packet, length));
+
+ EXPECT_TRUE(valid_ssrcs_[rtp_packet.Ssrc()])
+ << "Received unknown SSRC: " << rtp_packet.Ssrc();
+
+ if (!valid_ssrcs_[rtp_packet.Ssrc()])
+ observation_complete_.Set();
+
+ if (!is_observed_[rtp_packet.Ssrc()]) {
+ is_observed_[rtp_packet.Ssrc()] = true;
+ --ssrcs_to_observe_;
+ if (expect_single_ssrc_) {
+ expect_single_ssrc_ = false;
+ observation_complete_.Set();
+ }
+ }
+
+ if (ssrcs_to_observe_ == 0)
+ observation_complete_.Set();
+
+ return SEND_PACKET;
+ }
+
+ size_t GetNumVideoStreams() const override { return num_ssrcs_; }
+
+ void ModifyVideoConfigs(
+ VideoSendStream::Config* send_config,
+ std::vector<VideoReceiveStreamInterface::Config>* receive_configs,
+ VideoEncoderConfig* encoder_config) override {
+ // Set low simulcast bitrates to not have to wait for bandwidth ramp-up.
+ encoder_config->max_bitrate_bps = 50000;
+ for (auto& layer : encoder_config->simulcast_layers) {
+ layer.min_bitrate_bps = 10000;
+ layer.target_bitrate_bps = 15000;
+ layer.max_bitrate_bps = 20000;
+ }
+ video_encoder_config_all_streams_ = encoder_config->Copy();
+ if (send_single_ssrc_first_)
+ encoder_config->number_of_streams = 1;
+ }
+
+ void OnVideoStreamsCreated(VideoSendStream* send_stream,
+ const std::vector<VideoReceiveStreamInterface*>&
+ receive_streams) override {
+ send_stream_ = send_stream;
+ }
+
+ void PerformTest() override {
+ EXPECT_TRUE(Wait()) << "Timed out while waiting for "
+ << (send_single_ssrc_first_ ? "first SSRC."
+ : "SSRCs.");
+
+ if (send_single_ssrc_first_) {
+ // Set full simulcast and continue with the rest of the SSRCs.
+ SendTask(task_queue_, [&]() {
+ send_stream_->ReconfigureVideoEncoder(
+ std::move(video_encoder_config_all_streams_));
+ });
+ EXPECT_TRUE(Wait()) << "Timed out while waiting on additional SSRCs.";
+ }
+ }
+
+ private:
+ std::map<uint32_t, bool> valid_ssrcs_;
+ std::map<uint32_t, bool> is_observed_;
+
+ const size_t num_ssrcs_;
+ const bool send_single_ssrc_first_;
+
+ size_t ssrcs_to_observe_;
+ bool expect_single_ssrc_;
+
+ VideoSendStream* send_stream_;
+ VideoEncoderConfig video_encoder_config_all_streams_;
+ TaskQueueBase* task_queue_;
+ } test(kVideoSendSsrcs, num_ssrcs, send_single_ssrc_first, task_queue());
+
+ RunBaseTest(&test);
+}
+
+TEST_F(SsrcEndToEndTest, SendsSetSsrc) {
+ TestSendsSetSsrcs(1, false);
+}
+
+TEST_F(SsrcEndToEndTest, SendsSetSimulcastSsrcs) {
+ TestSendsSetSsrcs(kNumSimulcastStreams, false);
+}
+
+TEST_F(SsrcEndToEndTest, CanSwitchToUseAllSsrcs) {
+ TestSendsSetSsrcs(kNumSimulcastStreams, true);
+}
+
+TEST_F(SsrcEndToEndTest, DISABLED_RedundantPayloadsTransmittedOnAllSsrcs) {
+ class ObserveRedundantPayloads : public test::EndToEndTest {
+ public:
+ ObserveRedundantPayloads()
+ : EndToEndTest(kDefaultTimeout),
+ ssrcs_to_observe_(kNumSimulcastStreams) {
+ for (size_t i = 0; i < kNumSimulcastStreams; ++i) {
+ registered_rtx_ssrc_[kSendRtxSsrcs[i]] = true;
+ }
+ }
+
+ private:
+ Action OnSendRtp(const uint8_t* packet, size_t length) override {
+ RtpPacket rtp_packet;
+ EXPECT_TRUE(rtp_packet.Parse(packet, length));
+
+ if (!registered_rtx_ssrc_[rtp_packet.Ssrc()])
+ return SEND_PACKET;
+
+ const bool packet_is_redundant_payload = rtp_packet.payload_size() > 0;
+
+ if (!packet_is_redundant_payload)
+ return SEND_PACKET;
+
+ if (!observed_redundant_retransmission_[rtp_packet.Ssrc()]) {
+ observed_redundant_retransmission_[rtp_packet.Ssrc()] = true;
+ if (--ssrcs_to_observe_ == 0)
+ observation_complete_.Set();
+ }
+
+ return SEND_PACKET;
+ }
+
+ size_t GetNumVideoStreams() const override { return kNumSimulcastStreams; }
+
+ void ModifyVideoConfigs(
+ VideoSendStream::Config* send_config,
+ std::vector<VideoReceiveStreamInterface::Config>* receive_configs,
+ VideoEncoderConfig* encoder_config) override {
+ // Set low simulcast bitrates to not have to wait for bandwidth ramp-up.
+ encoder_config->max_bitrate_bps = 50000;
+ for (auto& layer : encoder_config->simulcast_layers) {
+ layer.min_bitrate_bps = 10000;
+ layer.target_bitrate_bps = 15000;
+ layer.max_bitrate_bps = 20000;
+ }
+ send_config->rtp.rtx.payload_type = kSendRtxPayloadType;
+
+ for (size_t i = 0; i < kNumSimulcastStreams; ++i)
+ send_config->rtp.rtx.ssrcs.push_back(kSendRtxSsrcs[i]);
+
+ // Significantly higher than max bitrates for all video streams -> forcing
+ // padding to trigger redundant padding on all RTX SSRCs.
+ encoder_config->min_transmit_bitrate_bps = 100000;
+ }
+
+ void PerformTest() override {
+ EXPECT_TRUE(Wait())
+ << "Timed out while waiting for redundant payloads on all SSRCs.";
+ }
+
+ private:
+ size_t ssrcs_to_observe_;
+ std::map<uint32_t, bool> observed_redundant_retransmission_;
+ std::map<uint32_t, bool> registered_rtx_ssrc_;
+ } test;
+
+ RunBaseTest(&test);
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/video/end_to_end_tests/stats_tests.cc b/third_party/libwebrtc/video/end_to_end_tests/stats_tests.cc
new file mode 100644
index 0000000000..62acca3b5f
--- /dev/null
+++ b/third_party/libwebrtc/video/end_to_end_tests/stats_tests.cc
@@ -0,0 +1,733 @@
+/*
+ * Copyright 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <memory>
+
+#include "absl/algorithm/container.h"
+#include "absl/types/optional.h"
+#include "api/task_queue/task_queue_base.h"
+#include "api/test/simulated_network.h"
+#include "api/test/video/function_video_encoder_factory.h"
+#include "call/fake_network_pipe.h"
+#include "call/simulated_network.h"
+#include "modules/rtp_rtcp/source/rtp_packet.h"
+#include "modules/video_coding/include/video_coding_defines.h"
+#include "rtc_base/strings/string_builder.h"
+#include "rtc_base/synchronization/mutex.h"
+#include "rtc_base/task_queue_for_test.h"
+#include "system_wrappers/include/metrics.h"
+#include "system_wrappers/include/sleep.h"
+#include "test/call_test.h"
+#include "test/fake_encoder.h"
+#include "test/gtest.h"
+#include "test/rtcp_packet_parser.h"
+
+namespace webrtc {
+namespace {
+enum : int { // The first valid value is 1.
+ kVideoContentTypeExtensionId = 1,
+};
+} // namespace
+
+class StatsEndToEndTest : public test::CallTest {
+ public:
+ StatsEndToEndTest() {
+ RegisterRtpExtension(RtpExtension(RtpExtension::kVideoContentTypeUri,
+ kVideoContentTypeExtensionId));
+ }
+};
+
+TEST_F(StatsEndToEndTest, GetStats) {
+ static const int kStartBitrateBps = 3000000;
+ static const int kExpectedRenderDelayMs = 20;
+
+ class StatsObserver : public test::EndToEndTest {
+ public:
+ StatsObserver()
+ : EndToEndTest(kLongTimeout), encoder_factory_([]() {
+ return std::make_unique<test::DelayedEncoder>(
+ Clock::GetRealTimeClock(), 10);
+ }) {}
+
+ private:
+ Action OnSendRtp(const uint8_t* packet, size_t length) override {
+ // Drop every 25th packet => 4% loss.
+ static const int kPacketLossFrac = 25;
+ RtpPacket header;
+ if (header.Parse(packet, length) &&
+ expected_send_ssrcs_.find(header.Ssrc()) !=
+ expected_send_ssrcs_.end() &&
+ header.SequenceNumber() % kPacketLossFrac == 0) {
+ return DROP_PACKET;
+ }
+ check_stats_event_.Set();
+ return SEND_PACKET;
+ }
+
+ Action OnSendRtcp(const uint8_t* packet, size_t length) override {
+ check_stats_event_.Set();
+ return SEND_PACKET;
+ }
+
+ Action OnReceiveRtp(const uint8_t* packet, size_t length) override {
+ check_stats_event_.Set();
+ return SEND_PACKET;
+ }
+
+ Action OnReceiveRtcp(const uint8_t* packet, size_t length) override {
+ check_stats_event_.Set();
+ return SEND_PACKET;
+ }
+
+ bool CheckReceiveStats() {
+ for (size_t i = 0; i < receive_streams_.size(); ++i) {
+ VideoReceiveStreamInterface::Stats stats =
+ receive_streams_[i]->GetStats();
+ EXPECT_EQ(expected_receive_ssrcs_[i], stats.ssrc);
+
+ // Make sure all fields have been populated.
+ // TODO(pbos): Use CompoundKey if/when we ever know that all stats are
+ // always filled for all receivers.
+ receive_stats_filled_["IncomingRate"] |=
+ stats.network_frame_rate != 0 || stats.total_bitrate_bps != 0;
+
+ send_stats_filled_["DecoderImplementationName"] |=
+ stats.decoder_implementation_name ==
+ test::FakeDecoder::kImplementationName;
+ receive_stats_filled_["PowerEfficientDecoder"] =
+ stats.power_efficient_decoder.has_value();
+ receive_stats_filled_["RenderDelayAsHighAsExpected"] |=
+ stats.render_delay_ms >= kExpectedRenderDelayMs;
+
+ receive_stats_filled_["FrameCallback"] |= stats.decode_frame_rate != 0;
+
+ receive_stats_filled_["FrameRendered"] |= stats.render_frame_rate != 0;
+
+ receive_stats_filled_["StatisticsUpdated"] |=
+ stats.rtp_stats.packets_lost != 0 || stats.rtp_stats.jitter != 0;
+
+ receive_stats_filled_["DataCountersUpdated"] |=
+ stats.rtp_stats.packet_counter.payload_bytes != 0 ||
+ stats.rtp_stats.packet_counter.header_bytes != 0 ||
+ stats.rtp_stats.packet_counter.packets != 0 ||
+ stats.rtp_stats.packet_counter.padding_bytes != 0;
+
+ receive_stats_filled_["CodecStats"] |= stats.target_delay_ms != 0;
+
+ receive_stats_filled_["FrameCounts"] |=
+ stats.frame_counts.key_frames != 0 ||
+ stats.frame_counts.delta_frames != 0;
+
+ receive_stats_filled_["CName"] |= !stats.c_name.empty();
+
+ receive_stats_filled_["RtcpPacketTypeCount"] |=
+ stats.rtcp_packet_type_counts.fir_packets != 0 ||
+ stats.rtcp_packet_type_counts.nack_packets != 0 ||
+ stats.rtcp_packet_type_counts.pli_packets != 0 ||
+ stats.rtcp_packet_type_counts.nack_requests != 0 ||
+ stats.rtcp_packet_type_counts.unique_nack_requests != 0;
+
+ RTC_DCHECK(stats.current_payload_type == -1 ||
+ stats.current_payload_type == kFakeVideoSendPayloadType);
+ receive_stats_filled_["IncomingPayloadType"] |=
+ stats.current_payload_type == kFakeVideoSendPayloadType;
+ }
+
+ return AllStatsFilled(receive_stats_filled_);
+ }
+
+ bool CheckSendStats() {
+ RTC_DCHECK(send_stream_);
+
+ VideoSendStream::Stats stats;
+ SendTask(task_queue_, [&]() { stats = send_stream_->GetStats(); });
+
+ size_t expected_num_streams =
+ kNumSimulcastStreams + expected_send_ssrcs_.size();
+ send_stats_filled_["NumStreams"] |=
+ stats.substreams.size() == expected_num_streams;
+
+ send_stats_filled_["CpuOveruseMetrics"] |=
+ stats.avg_encode_time_ms != 0 && stats.encode_usage_percent != 0 &&
+ stats.total_encode_time_ms != 0;
+
+ send_stats_filled_["EncoderImplementationName"] |=
+ stats.encoder_implementation_name ==
+ test::FakeEncoder::kImplementationName;
+
+ send_stats_filled_["PowerEfficientEncoder"] |=
+ stats.power_efficient_encoder == true;
+
+ for (const auto& kv : stats.substreams) {
+ if (expected_send_ssrcs_.find(kv.first) == expected_send_ssrcs_.end())
+ continue; // Probably RTX.
+
+ send_stats_filled_[CompoundKey("CapturedFrameRate", kv.first)] |=
+ stats.input_frame_rate != 0;
+
+ const VideoSendStream::StreamStats& stream_stats = kv.second;
+
+ send_stats_filled_[CompoundKey("StatisticsUpdated", kv.first)] |=
+ stream_stats.report_block_data.has_value();
+
+ send_stats_filled_[CompoundKey("DataCountersUpdated", kv.first)] |=
+ stream_stats.rtp_stats.fec.packets != 0 ||
+ stream_stats.rtp_stats.transmitted.padding_bytes != 0 ||
+ stream_stats.rtp_stats.retransmitted.packets != 0 ||
+ stream_stats.rtp_stats.transmitted.packets != 0;
+
+ send_stats_filled_[CompoundKey("BitrateStatisticsObserver.Total",
+ kv.first)] |=
+ stream_stats.total_bitrate_bps != 0;
+
+ send_stats_filled_[CompoundKey("BitrateStatisticsObserver.Retransmit",
+ kv.first)] |=
+ stream_stats.retransmit_bitrate_bps != 0;
+
+ send_stats_filled_[CompoundKey("FrameCountObserver", kv.first)] |=
+ stream_stats.frame_counts.delta_frames != 0 ||
+ stream_stats.frame_counts.key_frames != 0;
+
+ send_stats_filled_[CompoundKey("OutgoingRate", kv.first)] |=
+ stats.encode_frame_rate != 0;
+
+ send_stats_filled_[CompoundKey("Delay", kv.first)] |=
+ stream_stats.avg_delay_ms != 0 || stream_stats.max_delay_ms != 0;
+
+ // TODO(pbos): Use CompoundKey when the test makes sure that all SSRCs
+ // report dropped packets.
+ send_stats_filled_["RtcpPacketTypeCount"] |=
+ stream_stats.rtcp_packet_type_counts.fir_packets != 0 ||
+ stream_stats.rtcp_packet_type_counts.nack_packets != 0 ||
+ stream_stats.rtcp_packet_type_counts.pli_packets != 0 ||
+ stream_stats.rtcp_packet_type_counts.nack_requests != 0 ||
+ stream_stats.rtcp_packet_type_counts.unique_nack_requests != 0;
+ }
+
+ return AllStatsFilled(send_stats_filled_);
+ }
+
+ std::string CompoundKey(const char* name, uint32_t ssrc) {
+ rtc::StringBuilder oss;
+ oss << name << "_" << ssrc;
+ return oss.Release();
+ }
+
+ bool AllStatsFilled(const std::map<std::string, bool>& stats_map) {
+ for (const auto& stat : stats_map) {
+ if (!stat.second)
+ return false;
+ }
+ return true;
+ }
+
+ BuiltInNetworkBehaviorConfig GetSendTransportConfig() const override {
+ BuiltInNetworkBehaviorConfig network_config;
+ network_config.loss_percent = 5;
+ return network_config;
+ }
+
+ void ModifySenderBitrateConfig(
+ BitrateConstraints* bitrate_config) override {
+ bitrate_config->start_bitrate_bps = kStartBitrateBps;
+ }
+
+ void ModifyVideoConfigs(
+ VideoSendStream::Config* send_config,
+ std::vector<VideoReceiveStreamInterface::Config>* receive_configs,
+ VideoEncoderConfig* encoder_config) override {
+ // Set low simulcast bitrates to not have to wait for bandwidth ramp-up.
+ encoder_config->max_bitrate_bps = 50000;
+ for (auto& layer : encoder_config->simulcast_layers) {
+ layer.min_bitrate_bps = 10000;
+ layer.target_bitrate_bps = 15000;
+ layer.max_bitrate_bps = 20000;
+ }
+
+ send_config->rtp.c_name = "SomeCName";
+ send_config->rtp.nack.rtp_history_ms = kNackRtpHistoryMs;
+ send_config->rtp.rtx.payload_type = kSendRtxPayloadType;
+
+ const std::vector<uint32_t>& ssrcs = send_config->rtp.ssrcs;
+ for (size_t i = 0; i < ssrcs.size(); ++i) {
+ expected_send_ssrcs_.insert(ssrcs[i]);
+ expected_receive_ssrcs_.push_back(
+ (*receive_configs)[i].rtp.remote_ssrc);
+ (*receive_configs)[i].render_delay_ms = kExpectedRenderDelayMs;
+ (*receive_configs)[i].rtp.nack.rtp_history_ms = kNackRtpHistoryMs;
+
+ (*receive_configs)[i].rtp.rtx_ssrc = kSendRtxSsrcs[i];
+ (*receive_configs)[i]
+ .rtp.rtx_associated_payload_types[kSendRtxPayloadType] =
+ kFakeVideoSendPayloadType;
+ }
+
+ for (size_t i = 0; i < kNumSimulcastStreams; ++i)
+ send_config->rtp.rtx.ssrcs.push_back(kSendRtxSsrcs[i]);
+
+ // Use a delayed encoder to make sure we see CpuOveruseMetrics stats that
+ // are non-zero.
+ send_config->encoder_settings.encoder_factory = &encoder_factory_;
+ }
+
+ size_t GetNumVideoStreams() const override { return kNumSimulcastStreams; }
+
+ void OnVideoStreamsCreated(VideoSendStream* send_stream,
+ const std::vector<VideoReceiveStreamInterface*>&
+ receive_streams) override {
+ send_stream_ = send_stream;
+ receive_streams_ = receive_streams;
+ task_queue_ = TaskQueueBase::Current();
+ }
+
+ void PerformTest() override {
+ Clock* clock = Clock::GetRealTimeClock();
+ int64_t now_ms = clock->TimeInMilliseconds();
+ int64_t stop_time_ms = now_ms + test::CallTest::kLongTimeout.ms();
+ bool receive_ok = false;
+ bool send_ok = false;
+
+ while (now_ms < stop_time_ms) {
+ if (!receive_ok && task_queue_) {
+ SendTask(task_queue_, [&]() { receive_ok = CheckReceiveStats(); });
+ }
+ if (!send_ok)
+ send_ok = CheckSendStats();
+
+ if (receive_ok && send_ok)
+ return;
+
+ int64_t time_until_timeout_ms = stop_time_ms - now_ms;
+ if (time_until_timeout_ms > 0)
+ check_stats_event_.Wait(TimeDelta::Millis(time_until_timeout_ms));
+ now_ms = clock->TimeInMilliseconds();
+ }
+
+ ADD_FAILURE() << "Timed out waiting for filled stats.";
+ for (const auto& kv : receive_stats_filled_) {
+ if (!kv.second) {
+ ADD_FAILURE() << "Missing receive stats: " << kv.first;
+ }
+ }
+ for (const auto& kv : send_stats_filled_) {
+ if (!kv.second) {
+ ADD_FAILURE() << "Missing send stats: " << kv.first;
+ }
+ }
+ }
+
+ test::FunctionVideoEncoderFactory encoder_factory_;
+ std::vector<VideoReceiveStreamInterface*> receive_streams_;
+ std::map<std::string, bool> receive_stats_filled_;
+
+ VideoSendStream* send_stream_ = nullptr;
+ std::map<std::string, bool> send_stats_filled_;
+
+ std::vector<uint32_t> expected_receive_ssrcs_;
+ std::set<uint32_t> expected_send_ssrcs_;
+
+ rtc::Event check_stats_event_;
+ TaskQueueBase* task_queue_ = nullptr;
+ } test;
+
+ RunBaseTest(&test);
+}
+
+TEST_F(StatsEndToEndTest, TimingFramesAreReported) {
+ static const int kExtensionId = 5;
+ RegisterRtpExtension(
+ RtpExtension(RtpExtension::kVideoTimingUri, kExtensionId));
+
+ class StatsObserver : public test::EndToEndTest {
+ public:
+ StatsObserver() : EndToEndTest(kLongTimeout) {}
+
+ private:
+ void ModifyVideoConfigs(
+ VideoSendStream::Config* send_config,
+ std::vector<VideoReceiveStreamInterface::Config>* receive_configs,
+ VideoEncoderConfig* encoder_config) override {
+ send_config->rtp.extensions.clear();
+ send_config->rtp.extensions.push_back(
+ RtpExtension(RtpExtension::kVideoTimingUri, kExtensionId));
+ for (auto& receive_config : *receive_configs) {
+ receive_config.rtp.extensions.clear();
+ receive_config.rtp.extensions.push_back(
+ RtpExtension(RtpExtension::kVideoTimingUri, kExtensionId));
+ }
+ }
+
+ void OnVideoStreamsCreated(VideoSendStream* send_stream,
+ const std::vector<VideoReceiveStreamInterface*>&
+ receive_streams) override {
+ receive_streams_ = receive_streams;
+ task_queue_ = TaskQueueBase::Current();
+ }
+
+ void PerformTest() override {
+ // No frames reported initially.
+ SendTask(task_queue_, [&]() {
+ for (const auto& receive_stream : receive_streams_) {
+ EXPECT_FALSE(receive_stream->GetStats().timing_frame_info);
+ }
+ });
+ // Wait for at least one timing frame to be sent with 100ms grace period.
+ SleepMs(kDefaultTimingFramesDelayMs + 100);
+ // Check that timing frames are reported for each stream.
+ SendTask(task_queue_, [&]() {
+ for (const auto& receive_stream : receive_streams_) {
+ EXPECT_TRUE(receive_stream->GetStats().timing_frame_info);
+ }
+ });
+ }
+
+ std::vector<VideoReceiveStreamInterface*> receive_streams_;
+ TaskQueueBase* task_queue_ = nullptr;
+ } test;
+
+ RunBaseTest(&test);
+}
+
+TEST_F(StatsEndToEndTest, TestReceivedRtpPacketStats) {
+ static const size_t kNumRtpPacketsToSend = 5;
+ class ReceivedRtpStatsObserver : public test::EndToEndTest {
+ public:
+ explicit ReceivedRtpStatsObserver(TaskQueueBase* task_queue)
+ : EndToEndTest(kDefaultTimeout), task_queue_(task_queue) {}
+
+ private:
+ void OnVideoStreamsCreated(VideoSendStream* send_stream,
+ const std::vector<VideoReceiveStreamInterface*>&
+ receive_streams) override {
+ receive_stream_ = receive_streams[0];
+ }
+
+ void OnStreamsStopped() override { task_safety_flag_->SetNotAlive(); }
+
+ Action OnSendRtp(const uint8_t* packet, size_t length) override {
+ if (sent_rtp_ >= kNumRtpPacketsToSend) {
+ // Need to check the stats on the correct thread.
+ task_queue_->PostTask(SafeTask(task_safety_flag_, [this]() {
+ VideoReceiveStreamInterface::Stats stats =
+ receive_stream_->GetStats();
+ if (kNumRtpPacketsToSend == stats.rtp_stats.packet_counter.packets) {
+ observation_complete_.Set();
+ }
+ }));
+ return DROP_PACKET;
+ }
+ ++sent_rtp_;
+ return SEND_PACKET;
+ }
+
+ void PerformTest() override {
+ EXPECT_TRUE(Wait())
+ << "Timed out while verifying number of received RTP packets.";
+ }
+
+ VideoReceiveStreamInterface* receive_stream_ = nullptr;
+ uint32_t sent_rtp_ = 0;
+ TaskQueueBase* const task_queue_;
+ rtc::scoped_refptr<PendingTaskSafetyFlag> task_safety_flag_ =
+ PendingTaskSafetyFlag::CreateDetached();
+ } test(task_queue());
+
+ RunBaseTest(&test);
+}
+
+#if defined(WEBRTC_WIN)
+// Disabled due to flakiness on Windows (bugs.webrtc.org/7483).
+#define MAYBE_ContentTypeSwitches DISABLED_ContentTypeSwitches
+#else
+#define MAYBE_ContentTypeSwitches ContentTypeSwitches
+#endif
+TEST_F(StatsEndToEndTest, MAYBE_ContentTypeSwitches) {
+ class StatsObserver : public test::BaseTest,
+ public rtc::VideoSinkInterface<VideoFrame> {
+ public:
+ StatsObserver() : BaseTest(kLongTimeout), num_frames_received_(0) {}
+
+ bool ShouldCreateReceivers() const override { return true; }
+
+ void OnFrame(const VideoFrame& video_frame) override {
+ // The RTT is needed to estimate `ntp_time_ms` which is used by
+ // end-to-end delay stats. Therefore, start counting received frames once
+ // `ntp_time_ms` is valid.
+ if (video_frame.ntp_time_ms() > 0 &&
+ Clock::GetRealTimeClock()->CurrentNtpInMilliseconds() >=
+ video_frame.ntp_time_ms()) {
+ MutexLock lock(&mutex_);
+ ++num_frames_received_;
+ }
+ }
+
+ Action OnSendRtp(const uint8_t* packet, size_t length) override {
+ if (MinNumberOfFramesReceived())
+ observation_complete_.Set();
+ return SEND_PACKET;
+ }
+
+ bool MinNumberOfFramesReceived() const {
+ // Have some room for frames with wrong content type during switch.
+ const int kMinRequiredHistogramSamples = 200 + 50;
+ MutexLock lock(&mutex_);
+ return num_frames_received_ > kMinRequiredHistogramSamples;
+ }
+
+ // May be called several times.
+ void PerformTest() override {
+ EXPECT_TRUE(Wait()) << "Timed out waiting for enough packets.";
+ // Reset frame counter so next PerformTest() call will do something.
+ {
+ MutexLock lock(&mutex_);
+ num_frames_received_ = 0;
+ }
+ }
+
+ mutable Mutex mutex_;
+ int num_frames_received_ RTC_GUARDED_BY(&mutex_);
+ } test;
+
+ metrics::Reset();
+
+ Call::Config send_config(send_event_log_.get());
+ test.ModifySenderBitrateConfig(&send_config.bitrate_config);
+ Call::Config recv_config(recv_event_log_.get());
+ test.ModifyReceiverBitrateConfig(&recv_config.bitrate_config);
+
+ VideoEncoderConfig encoder_config_with_screenshare;
+
+ SendTask(
+ task_queue(), [this, &test, &send_config, &recv_config,
+ &encoder_config_with_screenshare]() {
+ CreateSenderCall(send_config);
+ CreateReceiverCall(recv_config);
+ CreateReceiveTransport(test.GetReceiveTransportConfig(), &test);
+ CreateSendTransport(test.GetReceiveTransportConfig(), &test);
+
+ receiver_call_->SignalChannelNetworkState(MediaType::VIDEO, kNetworkUp);
+ CreateSendConfig(1, 0, 0);
+ CreateMatchingReceiveConfigs();
+
+ // Modify send and receive configs.
+ GetVideoSendConfig()->rtp.nack.rtp_history_ms = kNackRtpHistoryMs;
+ video_receive_configs_[0].rtp.nack.rtp_history_ms = kNackRtpHistoryMs;
+ video_receive_configs_[0].renderer = &test;
+ // RTT needed for RemoteNtpTimeEstimator for the receive stream.
+ video_receive_configs_[0].rtp.rtcp_xr.receiver_reference_time_report =
+ true;
+ // Start with realtime video.
+ GetVideoEncoderConfig()->content_type =
+ VideoEncoderConfig::ContentType::kRealtimeVideo;
+ // Encoder config for the second part of the test uses screenshare.
+ encoder_config_with_screenshare = GetVideoEncoderConfig()->Copy();
+ encoder_config_with_screenshare.content_type =
+ VideoEncoderConfig::ContentType::kScreen;
+
+ CreateVideoStreams();
+ CreateFrameGeneratorCapturer(kDefaultFramerate, kDefaultWidth,
+ kDefaultHeight);
+ Start();
+ });
+
+ test.PerformTest();
+
+ // Replace old send stream.
+ SendTask(task_queue(), [this, &encoder_config_with_screenshare]() {
+ DestroyVideoSendStreams();
+ CreateVideoSendStream(encoder_config_with_screenshare);
+ SetVideoDegradation(DegradationPreference::BALANCED);
+ GetVideoSendStream()->Start();
+ });
+
+ // Continue to run test but now with screenshare.
+ test.PerformTest();
+
+ SendTask(task_queue(), [this]() {
+ Stop();
+ DestroyStreams();
+ DestroyCalls();
+ });
+
+ // Verify that stats have been updated for both screenshare and video.
+ EXPECT_METRIC_EQ(1, metrics::NumSamples("WebRTC.Video.EndToEndDelayInMs"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.Screenshare.EndToEndDelayInMs"));
+ EXPECT_METRIC_EQ(1, metrics::NumSamples("WebRTC.Video.EndToEndDelayMaxInMs"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.Screenshare.EndToEndDelayMaxInMs"));
+ EXPECT_METRIC_EQ(1, metrics::NumSamples("WebRTC.Video.InterframeDelayInMs"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.Screenshare.InterframeDelayInMs"));
+ EXPECT_METRIC_EQ(1,
+ metrics::NumSamples("WebRTC.Video.InterframeDelayMaxInMs"));
+ EXPECT_METRIC_EQ(1, metrics::NumSamples(
+ "WebRTC.Video.Screenshare.InterframeDelayMaxInMs"));
+}
+
+TEST_F(StatsEndToEndTest, VerifyNackStats) {
+ static const int kPacketNumberToDrop = 200;
+ class NackObserver : public test::EndToEndTest {
+ public:
+ explicit NackObserver(TaskQueueBase* task_queue)
+ : EndToEndTest(kLongTimeout), task_queue_(task_queue) {}
+
+ private:
+ Action OnSendRtp(const uint8_t* packet, size_t length) override {
+ {
+ MutexLock lock(&mutex_);
+ if (++sent_rtp_packets_ == kPacketNumberToDrop) {
+ RtpPacket header;
+ EXPECT_TRUE(header.Parse(packet, length));
+ dropped_rtp_packet_ = header.SequenceNumber();
+ return DROP_PACKET;
+ }
+ }
+ task_queue_->PostTask(
+ SafeTask(task_safety_flag_, [this]() { VerifyStats(); }));
+ return SEND_PACKET;
+ }
+
+ Action OnReceiveRtcp(const uint8_t* packet, size_t length) override {
+ MutexLock lock(&mutex_);
+ test::RtcpPacketParser rtcp_parser;
+ rtcp_parser.Parse(packet, length);
+ const std::vector<uint16_t>& nacks = rtcp_parser.nack()->packet_ids();
+ if (!nacks.empty() && absl::c_linear_search(nacks, dropped_rtp_packet_)) {
+ dropped_rtp_packet_requested_ = true;
+ }
+ return SEND_PACKET;
+ }
+
+ void VerifyStats() {
+ MutexLock lock(&mutex_);
+ if (!dropped_rtp_packet_requested_)
+ return;
+ int send_stream_nack_packets = 0;
+ int receive_stream_nack_packets = 0;
+ VideoSendStream::Stats stats = send_stream_->GetStats();
+ for (const auto& kv : stats.substreams) {
+ const VideoSendStream::StreamStats& stream_stats = kv.second;
+ send_stream_nack_packets +=
+ stream_stats.rtcp_packet_type_counts.nack_packets;
+ }
+ for (const auto& receive_stream : receive_streams_) {
+ VideoReceiveStreamInterface::Stats stats = receive_stream->GetStats();
+ receive_stream_nack_packets +=
+ stats.rtcp_packet_type_counts.nack_packets;
+ }
+ if (send_stream_nack_packets >= 1 && receive_stream_nack_packets >= 1) {
+ // NACK packet sent on receive stream and received on sent stream.
+ if (MinMetricRunTimePassed())
+ observation_complete_.Set();
+ }
+ }
+
+ bool MinMetricRunTimePassed() {
+ int64_t now_ms = Clock::GetRealTimeClock()->TimeInMilliseconds();
+ if (!start_runtime_ms_)
+ start_runtime_ms_ = now_ms;
+
+ int64_t elapsed_sec = (now_ms - *start_runtime_ms_) / 1000;
+ return elapsed_sec > metrics::kMinRunTimeInSeconds;
+ }
+
+ void ModifyVideoConfigs(
+ VideoSendStream::Config* send_config,
+ std::vector<VideoReceiveStreamInterface::Config>* receive_configs,
+ VideoEncoderConfig* encoder_config) override {
+ send_config->rtp.nack.rtp_history_ms = kNackRtpHistoryMs;
+ (*receive_configs)[0].rtp.nack.rtp_history_ms = kNackRtpHistoryMs;
+ }
+
+ void OnVideoStreamsCreated(VideoSendStream* send_stream,
+ const std::vector<VideoReceiveStreamInterface*>&
+ receive_streams) override {
+ send_stream_ = send_stream;
+ receive_streams_ = receive_streams;
+ }
+
+ void OnStreamsStopped() override { task_safety_flag_->SetNotAlive(); }
+
+ void PerformTest() override {
+ EXPECT_TRUE(Wait()) << "Timed out waiting for packet to be NACKed.";
+ }
+
+ Mutex mutex_;
+ uint64_t sent_rtp_packets_ RTC_GUARDED_BY(&mutex_) = 0;
+ uint16_t dropped_rtp_packet_ RTC_GUARDED_BY(&mutex_) = 0;
+ bool dropped_rtp_packet_requested_ RTC_GUARDED_BY(&mutex_) = false;
+ std::vector<VideoReceiveStreamInterface*> receive_streams_;
+ VideoSendStream* send_stream_ = nullptr;
+ absl::optional<int64_t> start_runtime_ms_;
+ TaskQueueBase* const task_queue_;
+ rtc::scoped_refptr<PendingTaskSafetyFlag> task_safety_flag_ =
+ PendingTaskSafetyFlag::CreateDetached();
+ } test(task_queue());
+
+ metrics::Reset();
+ RunBaseTest(&test);
+
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.UniqueNackRequestsSentInPercent"));
+ EXPECT_METRIC_EQ(1, metrics::NumSamples(
+ "WebRTC.Video.UniqueNackRequestsReceivedInPercent"));
+ EXPECT_METRIC_GT(metrics::MinSample("WebRTC.Video.NackPacketsSentPerMinute"),
+ 0);
+}
+
+TEST_F(StatsEndToEndTest, CallReportsRttForSender) {
+ static const int kSendDelayMs = 30;
+ static const int kReceiveDelayMs = 70;
+
+ SendTask(task_queue(), [this]() {
+ BuiltInNetworkBehaviorConfig config;
+ config.queue_delay_ms = kSendDelayMs;
+ CreateCalls();
+ CreateSendTransport(config, /*observer*/ nullptr);
+
+ config.queue_delay_ms = kReceiveDelayMs;
+ CreateReceiveTransport(config, /*observer*/ nullptr);
+
+ CreateSendConfig(1, 0, 0);
+ CreateMatchingReceiveConfigs();
+
+ CreateVideoStreams();
+ CreateFrameGeneratorCapturer(kDefaultFramerate, kDefaultWidth,
+ kDefaultHeight);
+ receiver_call_->SignalChannelNetworkState(MediaType::VIDEO, kNetworkUp);
+ Start();
+ });
+
+ int64_t start_time_ms = clock_->TimeInMilliseconds();
+ while (true) {
+ Call::Stats stats;
+ SendTask(task_queue(),
+ [this, &stats]() { stats = sender_call_->GetStats(); });
+ ASSERT_GE(start_time_ms + kDefaultTimeout.ms(),
+ clock_->TimeInMilliseconds())
+ << "No RTT stats before timeout!";
+ if (stats.rtt_ms != -1) {
+ // To avoid failures caused by rounding or minor ntp clock adjustments,
+ // relax expectation by 1ms.
+ constexpr int kAllowedErrorMs = 1;
+ EXPECT_GE(stats.rtt_ms, kSendDelayMs + kReceiveDelayMs - kAllowedErrorMs);
+ break;
+ }
+ SleepMs(10);
+ }
+
+ SendTask(task_queue(), [this]() {
+ Stop();
+ DestroyStreams();
+ DestroyCalls();
+ });
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/video/end_to_end_tests/transport_feedback_tests.cc b/third_party/libwebrtc/video/end_to_end_tests/transport_feedback_tests.cc
new file mode 100644
index 0000000000..f6e20498e3
--- /dev/null
+++ b/third_party/libwebrtc/video/end_to_end_tests/transport_feedback_tests.cc
@@ -0,0 +1,493 @@
+/*
+ * Copyright 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <memory>
+#include <vector>
+
+#include "api/rtp_parameters.h"
+#include "api/task_queue/task_queue_base.h"
+#include "api/units/time_delta.h"
+#include "call/call.h"
+#include "call/fake_network_pipe.h"
+#include "call/simulated_network.h"
+#include "modules/rtp_rtcp/source/byte_io.h"
+#include "modules/rtp_rtcp/source/rtp_header_extensions.h"
+#include "modules/rtp_rtcp/source/rtp_packet.h"
+#include "rtc_base/numerics/sequence_number_unwrapper.h"
+#include "rtc_base/synchronization/mutex.h"
+#include "test/call_test.h"
+#include "test/field_trial.h"
+#include "test/gtest.h"
+#include "test/rtcp_packet_parser.h"
+#include "video/end_to_end_tests/multi_stream_tester.h"
+
+namespace webrtc {
+namespace {
+enum : int { // The first valid value is 1.
+ kTransportSequenceNumberExtensionId = 1,
+};
+} // namespace
+
+TEST(TransportFeedbackMultiStreamTest, AssignsTransportSequenceNumbers) {
+ static constexpr int kSendRtxPayloadType = 98;
+ static constexpr TimeDelta kDefaultTimeout = TimeDelta::Seconds(30);
+ static constexpr int kNackRtpHistoryMs = 1000;
+ static constexpr uint32_t kSendRtxSsrcs[MultiStreamTester::kNumStreams] = {
+ 0xBADCAFD, 0xBADCAFE, 0xBADCAFF};
+
+ class RtpExtensionHeaderObserver : public test::DirectTransport {
+ public:
+ RtpExtensionHeaderObserver(
+ TaskQueueBase* task_queue,
+ Call* sender_call,
+ const std::map<uint32_t, uint32_t>& ssrc_map,
+ const std::map<uint8_t, MediaType>& payload_type_map,
+ rtc::ArrayView<const RtpExtension> audio_extensions,
+ rtc::ArrayView<const RtpExtension> video_extensions)
+ : DirectTransport(task_queue,
+ std::make_unique<FakeNetworkPipe>(
+ Clock::GetRealTimeClock(),
+ std::make_unique<SimulatedNetwork>(
+ BuiltInNetworkBehaviorConfig())),
+ sender_call,
+ payload_type_map,
+ audio_extensions,
+ video_extensions),
+ rtx_to_media_ssrcs_(ssrc_map),
+ rtx_padding_observed_(false),
+ retransmit_observed_(false),
+ started_(false) {
+ extensions_.Register<TransportSequenceNumber>(
+ kTransportSequenceNumberExtensionId);
+ }
+ virtual ~RtpExtensionHeaderObserver() {}
+
+ bool SendRtp(const uint8_t* data,
+ size_t length,
+ const PacketOptions& options) override {
+ {
+ MutexLock lock(&lock_);
+
+ if (IsDone())
+ return false;
+
+ if (started_) {
+ RtpPacket rtp_packet(&extensions_);
+ EXPECT_TRUE(rtp_packet.Parse(data, length));
+ bool drop_packet = false;
+
+ uint16_t transport_sequence_number = 0;
+ EXPECT_TRUE(rtp_packet.GetExtension<TransportSequenceNumber>(
+ &transport_sequence_number));
+ EXPECT_EQ(options.packet_id, transport_sequence_number);
+ if (!streams_observed_.empty()) {
+ // Unwrap packet id and verify uniqueness.
+ int64_t packet_id = unwrapper_.Unwrap(options.packet_id);
+ EXPECT_TRUE(received_packed_ids_.insert(packet_id).second);
+ }
+
+ // Drop (up to) every 17th packet, so we get retransmits.
+ // Only drop media, do not drop padding packets.
+ if (rtp_packet.PayloadType() != kSendRtxPayloadType &&
+ rtp_packet.payload_size() > 0 &&
+ transport_sequence_number % 17 == 0) {
+ dropped_seq_[rtp_packet.Ssrc()].insert(rtp_packet.SequenceNumber());
+ drop_packet = true;
+ }
+
+ if (rtp_packet.payload_size() == 0) {
+ // Ignore padding packets.
+ } else if (rtp_packet.PayloadType() == kSendRtxPayloadType) {
+ uint16_t original_sequence_number =
+ ByteReader<uint16_t>::ReadBigEndian(
+ rtp_packet.payload().data());
+ uint32_t original_ssrc =
+ rtx_to_media_ssrcs_.find(rtp_packet.Ssrc())->second;
+ std::set<uint16_t>* seq_no_map = &dropped_seq_[original_ssrc];
+ auto it = seq_no_map->find(original_sequence_number);
+ if (it != seq_no_map->end()) {
+ retransmit_observed_ = true;
+ seq_no_map->erase(it);
+ } else {
+ rtx_padding_observed_ = true;
+ }
+ } else {
+ streams_observed_.insert(rtp_packet.Ssrc());
+ }
+
+ if (IsDone())
+ done_.Set();
+
+ if (drop_packet)
+ return true;
+ }
+ }
+
+ return test::DirectTransport::SendRtp(data, length, options);
+ }
+
+ bool IsDone() {
+ bool observed_types_ok =
+ streams_observed_.size() == MultiStreamTester::kNumStreams &&
+ retransmit_observed_ && rtx_padding_observed_;
+ if (!observed_types_ok)
+ return false;
+ // We should not have any gaps in the sequence number range.
+ size_t seqno_range =
+ *received_packed_ids_.rbegin() - *received_packed_ids_.begin() + 1;
+ return seqno_range == received_packed_ids_.size();
+ }
+
+ bool Wait() {
+ {
+ // Can't be sure until this point that rtx_to_media_ssrcs_ etc have
+ // been initialized and are OK to read.
+ MutexLock lock(&lock_);
+ started_ = true;
+ }
+ return done_.Wait(kDefaultTimeout);
+ }
+
+ private:
+ Mutex lock_;
+ rtc::Event done_;
+ RtpHeaderExtensionMap extensions_;
+ RtpSequenceNumberUnwrapper unwrapper_;
+ std::set<int64_t> received_packed_ids_;
+ std::set<uint32_t> streams_observed_;
+ std::map<uint32_t, std::set<uint16_t>> dropped_seq_;
+ const std::map<uint32_t, uint32_t>& rtx_to_media_ssrcs_;
+ bool rtx_padding_observed_;
+ bool retransmit_observed_;
+ bool started_;
+ };
+
+ class TransportSequenceNumberTester : public MultiStreamTester {
+ public:
+ TransportSequenceNumberTester() : observer_(nullptr) {}
+ ~TransportSequenceNumberTester() override = default;
+
+ protected:
+ void Wait() override {
+ RTC_DCHECK(observer_);
+ EXPECT_TRUE(observer_->Wait());
+ }
+
+ void UpdateSendConfig(
+ size_t stream_index,
+ VideoSendStream::Config* send_config,
+ VideoEncoderConfig* encoder_config,
+ test::FrameGeneratorCapturer** frame_generator) override {
+ send_config->rtp.extensions.clear();
+ send_config->rtp.extensions.push_back(
+ RtpExtension(RtpExtension::kTransportSequenceNumberUri,
+ kTransportSequenceNumberExtensionId));
+
+ // Force some padding to be sent. Note that since we do send media
+ // packets we can not guarantee that a padding only packet is sent.
+ // Instead, padding will most likely be send as an RTX packet.
+ const int kPaddingBitrateBps = 50000;
+ encoder_config->max_bitrate_bps = 200000;
+ encoder_config->min_transmit_bitrate_bps =
+ encoder_config->max_bitrate_bps + kPaddingBitrateBps;
+
+ // Configure RTX for redundant payload padding.
+ send_config->rtp.nack.rtp_history_ms = kNackRtpHistoryMs;
+ send_config->rtp.rtx.ssrcs.push_back(kSendRtxSsrcs[stream_index]);
+ send_config->rtp.rtx.payload_type = kSendRtxPayloadType;
+ rtx_to_media_ssrcs_[kSendRtxSsrcs[stream_index]] =
+ send_config->rtp.ssrcs[0];
+ }
+
+ void UpdateReceiveConfig(
+ size_t stream_index,
+ VideoReceiveStreamInterface::Config* receive_config) override {
+ receive_config->rtp.nack.rtp_history_ms = kNackRtpHistoryMs;
+ receive_config->rtp.extensions.clear();
+ receive_config->rtp.extensions.push_back(
+ RtpExtension(RtpExtension::kTransportSequenceNumberUri,
+ kTransportSequenceNumberExtensionId));
+ receive_config->renderer = &fake_renderer_;
+ }
+
+ std::unique_ptr<test::DirectTransport> CreateSendTransport(
+ TaskQueueBase* task_queue,
+ Call* sender_call) override {
+ std::map<uint8_t, MediaType> payload_type_map =
+ MultiStreamTester::payload_type_map_;
+ RTC_DCHECK(payload_type_map.find(kSendRtxPayloadType) ==
+ payload_type_map.end());
+ payload_type_map[kSendRtxPayloadType] = MediaType::VIDEO;
+ std::vector<RtpExtension> extensions = {
+ RtpExtension(RtpExtension::kTransportSequenceNumberUri,
+ kTransportSequenceNumberExtensionId)};
+ auto observer = std::make_unique<RtpExtensionHeaderObserver>(
+ task_queue, sender_call, rtx_to_media_ssrcs_, payload_type_map,
+ extensions, extensions);
+ observer_ = observer.get();
+ return observer;
+ }
+
+ private:
+ test::FakeVideoRenderer fake_renderer_;
+ std::map<uint32_t, uint32_t> rtx_to_media_ssrcs_;
+ RtpExtensionHeaderObserver* observer_;
+ } tester;
+
+ tester.RunTest();
+}
+
+class TransportFeedbackEndToEndTest : public test::CallTest {
+ public:
+ TransportFeedbackEndToEndTest() {
+ RegisterRtpExtension(RtpExtension(RtpExtension::kTransportSequenceNumberUri,
+ kTransportSequenceNumberExtensionId));
+ }
+};
+
+class TransportFeedbackTester : public test::EndToEndTest {
+ public:
+ TransportFeedbackTester(size_t num_video_streams, size_t num_audio_streams)
+ : EndToEndTest(::webrtc::TransportFeedbackEndToEndTest::kDefaultTimeout),
+ num_video_streams_(num_video_streams),
+ num_audio_streams_(num_audio_streams),
+ receiver_call_(nullptr) {
+ // Only one stream of each supported for now.
+ EXPECT_LE(num_video_streams, 1u);
+ EXPECT_LE(num_audio_streams, 1u);
+ }
+
+ protected:
+ Action OnSendRtcp(const uint8_t* data, size_t length) override {
+ EXPECT_FALSE(HasTransportFeedback(data, length));
+ return SEND_PACKET;
+ }
+
+ Action OnReceiveRtcp(const uint8_t* data, size_t length) override {
+ if (HasTransportFeedback(data, length))
+ observation_complete_.Set();
+ return SEND_PACKET;
+ }
+
+ bool HasTransportFeedback(const uint8_t* data, size_t length) const {
+ test::RtcpPacketParser parser;
+ EXPECT_TRUE(parser.Parse(data, length));
+ return parser.transport_feedback()->num_packets() > 0;
+ }
+
+ void PerformTest() override {
+ EXPECT_TRUE(observation_complete_.Wait(test::CallTest::kDefaultTimeout));
+ }
+
+ void OnCallsCreated(Call* sender_call, Call* receiver_call) override {
+ receiver_call_ = receiver_call;
+ }
+
+ size_t GetNumVideoStreams() const override { return num_video_streams_; }
+ size_t GetNumAudioStreams() const override { return num_audio_streams_; }
+
+ void ModifyAudioConfigs(AudioSendStream::Config* send_config,
+ std::vector<AudioReceiveStreamInterface::Config>*
+ receive_configs) override {
+ send_config->rtp.extensions.clear();
+ send_config->rtp.extensions.push_back(
+ RtpExtension(RtpExtension::kTransportSequenceNumberUri,
+ kTransportSequenceNumberExtensionId));
+ (*receive_configs)[0].rtp.extensions.clear();
+ (*receive_configs)[0].rtp.extensions = send_config->rtp.extensions;
+ }
+
+ private:
+ const size_t num_video_streams_;
+ const size_t num_audio_streams_;
+ Call* receiver_call_;
+};
+
+TEST_F(TransportFeedbackEndToEndTest, VideoReceivesTransportFeedback) {
+ TransportFeedbackTester test(1, 0);
+ RunBaseTest(&test);
+}
+TEST_F(TransportFeedbackEndToEndTest, AudioReceivesTransportFeedback) {
+ TransportFeedbackTester test(0, 1);
+ RunBaseTest(&test);
+}
+
+TEST_F(TransportFeedbackEndToEndTest, AudioVideoReceivesTransportFeedback) {
+ TransportFeedbackTester test(1, 1);
+ RunBaseTest(&test);
+}
+
+TEST_F(TransportFeedbackEndToEndTest,
+ StopsAndResumesMediaWhenCongestionWindowFull) {
+ test::ScopedFieldTrials override_field_trials(
+ "WebRTC-CongestionWindow/QueueSize:250/");
+
+ class TransportFeedbackTester : public test::EndToEndTest {
+ public:
+ TransportFeedbackTester(size_t num_video_streams, size_t num_audio_streams)
+ : EndToEndTest(
+ ::webrtc::TransportFeedbackEndToEndTest::kDefaultTimeout),
+ num_video_streams_(num_video_streams),
+ num_audio_streams_(num_audio_streams),
+ media_sent_(0),
+ media_sent_before_(0),
+ padding_sent_(0) {
+ // Only one stream of each supported for now.
+ EXPECT_LE(num_video_streams, 1u);
+ EXPECT_LE(num_audio_streams, 1u);
+ }
+
+ protected:
+ Action OnSendRtp(const uint8_t* packet, size_t length) override {
+ RtpPacket rtp_packet;
+ EXPECT_TRUE(rtp_packet.Parse(packet, length));
+ const bool only_padding = rtp_packet.payload_size() == 0;
+ MutexLock lock(&mutex_);
+ // Padding is expected in congested state to probe for connectivity when
+ // packets has been dropped.
+ if (only_padding) {
+ media_sent_before_ = media_sent_;
+ ++padding_sent_;
+ } else {
+ ++media_sent_;
+ if (padding_sent_ == 0) {
+ ++media_sent_before_;
+ EXPECT_LT(media_sent_, 40)
+ << "Media sent without feedback when congestion window is full.";
+ } else if (media_sent_ > media_sent_before_) {
+ observation_complete_.Set();
+ }
+ }
+ return SEND_PACKET;
+ }
+
+ Action OnReceiveRtcp(const uint8_t* data, size_t length) override {
+ MutexLock lock(&mutex_);
+ // To fill up the congestion window we drop feedback on packets after 20
+ // packets have been sent. This means that any packets that has not yet
+ // received feedback after that will be considered as oustanding data and
+ // therefore filling up the congestion window. In the congested state, the
+ // pacer should send padding packets to trigger feedback in case all
+ // feedback of previous traffic was lost. This test listens for the
+ // padding packets and when 2 padding packets have been received, feedback
+ // will be let trough again. This should cause the pacer to continue
+ // sending meadia yet again.
+ if (media_sent_ > 20 && HasTransportFeedback(data, length) &&
+ padding_sent_ < 2) {
+ return DROP_PACKET;
+ }
+ return SEND_PACKET;
+ }
+
+ bool HasTransportFeedback(const uint8_t* data, size_t length) const {
+ test::RtcpPacketParser parser;
+ EXPECT_TRUE(parser.Parse(data, length));
+ return parser.transport_feedback()->num_packets() > 0;
+ }
+ void ModifySenderBitrateConfig(
+ BitrateConstraints* bitrate_config) override {
+ bitrate_config->max_bitrate_bps = 300000;
+ }
+
+ void PerformTest() override {
+ constexpr TimeDelta kFailureTimeout = TimeDelta::Seconds(10);
+ EXPECT_TRUE(observation_complete_.Wait(kFailureTimeout))
+ << "Stream not continued after congestion window full.";
+ }
+
+ size_t GetNumVideoStreams() const override { return num_video_streams_; }
+ size_t GetNumAudioStreams() const override { return num_audio_streams_; }
+
+ private:
+ const size_t num_video_streams_;
+ const size_t num_audio_streams_;
+ Mutex mutex_;
+ int media_sent_ RTC_GUARDED_BY(mutex_);
+ int media_sent_before_ RTC_GUARDED_BY(mutex_);
+ int padding_sent_ RTC_GUARDED_BY(mutex_);
+ } test(1, 0);
+ RunBaseTest(&test);
+}
+
+TEST_F(TransportFeedbackEndToEndTest, TransportSeqNumOnAudioAndVideo) {
+ static constexpr size_t kMinPacketsToWaitFor = 50;
+ class TransportSequenceNumberTest : public test::EndToEndTest {
+ public:
+ TransportSequenceNumberTest()
+ : EndToEndTest(kDefaultTimeout),
+ video_observed_(false),
+ audio_observed_(false) {
+ extensions_.Register<TransportSequenceNumber>(
+ kTransportSequenceNumberExtensionId);
+ }
+
+ size_t GetNumVideoStreams() const override { return 1; }
+ size_t GetNumAudioStreams() const override { return 1; }
+
+ void ModifyAudioConfigs(AudioSendStream::Config* send_config,
+ std::vector<AudioReceiveStreamInterface::Config>*
+ receive_configs) override {
+ send_config->rtp.extensions.clear();
+ send_config->rtp.extensions.push_back(
+ RtpExtension(RtpExtension::kTransportSequenceNumberUri,
+ kTransportSequenceNumberExtensionId));
+ (*receive_configs)[0].rtp.extensions.clear();
+ (*receive_configs)[0].rtp.extensions = send_config->rtp.extensions;
+ }
+
+ Action OnSendRtp(const uint8_t* packet, size_t length) override {
+ RtpPacket rtp_packet(&extensions_);
+ EXPECT_TRUE(rtp_packet.Parse(packet, length));
+ uint16_t transport_sequence_number = 0;
+ EXPECT_TRUE(rtp_packet.GetExtension<TransportSequenceNumber>(
+ &transport_sequence_number));
+ // Unwrap packet id and verify uniqueness.
+ int64_t packet_id = unwrapper_.Unwrap(transport_sequence_number);
+ EXPECT_TRUE(received_packet_ids_.insert(packet_id).second);
+
+ if (rtp_packet.Ssrc() == kVideoSendSsrcs[0])
+ video_observed_ = true;
+ if (rtp_packet.Ssrc() == kAudioSendSsrc)
+ audio_observed_ = true;
+ if (audio_observed_ && video_observed_ &&
+ received_packet_ids_.size() >= kMinPacketsToWaitFor) {
+ size_t packet_id_range =
+ *received_packet_ids_.rbegin() - *received_packet_ids_.begin() + 1;
+ EXPECT_EQ(received_packet_ids_.size(), packet_id_range);
+ observation_complete_.Set();
+ }
+ return SEND_PACKET;
+ }
+
+ void PerformTest() override {
+ EXPECT_TRUE(Wait()) << "Timed out while waiting for audio and video "
+ "packets with transport sequence number.";
+ }
+
+ void ExpectSuccessful() {
+ EXPECT_TRUE(video_observed_);
+ EXPECT_TRUE(audio_observed_);
+ EXPECT_GE(received_packet_ids_.size(), kMinPacketsToWaitFor);
+ }
+
+ private:
+ bool video_observed_;
+ bool audio_observed_;
+ RtpSequenceNumberUnwrapper unwrapper_;
+ std::set<int64_t> received_packet_ids_;
+ RtpHeaderExtensionMap extensions_;
+ } test;
+
+ RunBaseTest(&test);
+ // Double check conditions for successful test to produce better error
+ // message when the test fail.
+ test.ExpectSuccessful();
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/video/frame_cadence_adapter.cc b/third_party/libwebrtc/video/frame_cadence_adapter.cc
new file mode 100644
index 0000000000..efffa9672a
--- /dev/null
+++ b/third_party/libwebrtc/video/frame_cadence_adapter.cc
@@ -0,0 +1,803 @@
+/*
+ * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video/frame_cadence_adapter.h"
+
+#include <atomic>
+#include <deque>
+#include <memory>
+#include <utility>
+#include <vector>
+
+#include "absl/algorithm/container.h"
+#include "absl/base/attributes.h"
+#include "api/sequence_checker.h"
+#include "api/task_queue/pending_task_safety_flag.h"
+#include "api/task_queue/task_queue_base.h"
+#include "api/units/time_delta.h"
+#include "api/units/timestamp.h"
+#include "api/video/video_frame.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/race_checker.h"
+#include "rtc_base/rate_statistics.h"
+#include "rtc_base/synchronization/mutex.h"
+#include "rtc_base/system/no_unique_address.h"
+#include "rtc_base/task_utils/repeating_task.h"
+#include "rtc_base/thread_annotations.h"
+#include "rtc_base/time_utils.h"
+#include "system_wrappers/include/clock.h"
+#include "system_wrappers/include/metrics.h"
+#include "system_wrappers/include/ntp_time.h"
+
+namespace webrtc {
+namespace {
+
+// Abstracts concrete modes of the cadence adapter.
+class AdapterMode {
+ public:
+ virtual ~AdapterMode() = default;
+
+ // Called on the worker thread for every frame that enters.
+ virtual void OnFrame(Timestamp post_time,
+ int frames_scheduled_for_processing,
+ const VideoFrame& frame) = 0;
+
+ // Returns the currently estimated input framerate.
+ virtual absl::optional<uint32_t> GetInputFrameRateFps() = 0;
+
+ // Updates the frame rate.
+ virtual void UpdateFrameRate() = 0;
+};
+
+// Implements a pass-through adapter. Single-threaded.
+class PassthroughAdapterMode : public AdapterMode {
+ public:
+ PassthroughAdapterMode(Clock* clock,
+ FrameCadenceAdapterInterface::Callback* callback)
+ : clock_(clock), callback_(callback) {
+ sequence_checker_.Detach();
+ }
+
+ // Adapter overrides.
+ void OnFrame(Timestamp post_time,
+ int frames_scheduled_for_processing,
+ const VideoFrame& frame) override {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+ callback_->OnFrame(post_time, frames_scheduled_for_processing, frame);
+ }
+
+ absl::optional<uint32_t> GetInputFrameRateFps() override {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+ return input_framerate_.Rate(clock_->TimeInMilliseconds());
+ }
+
+ void UpdateFrameRate() override {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+ input_framerate_.Update(1, clock_->TimeInMilliseconds());
+ }
+
+ private:
+ Clock* const clock_;
+ FrameCadenceAdapterInterface::Callback* const callback_;
+ RTC_NO_UNIQUE_ADDRESS SequenceChecker sequence_checker_;
+ // Input frame rate statistics for use when not in zero-hertz mode.
+ RateStatistics input_framerate_ RTC_GUARDED_BY(sequence_checker_){
+ FrameCadenceAdapterInterface::kFrameRateAveragingWindowSizeMs, 1000};
+};
+
+// Implements a frame cadence adapter supporting zero-hertz input.
+class ZeroHertzAdapterMode : public AdapterMode {
+ public:
+ ZeroHertzAdapterMode(TaskQueueBase* queue,
+ Clock* clock,
+ FrameCadenceAdapterInterface::Callback* callback,
+ double max_fps);
+
+ // Reconfigures according to parameters.
+ // All spatial layer trackers are initialized as unconverged by this method.
+ void ReconfigureParameters(
+ const FrameCadenceAdapterInterface::ZeroHertzModeParams& params);
+
+ // Updates spatial layer quality convergence status.
+ void UpdateLayerQualityConvergence(size_t spatial_index,
+ bool quality_converged);
+
+ // Updates spatial layer enabled status.
+ void UpdateLayerStatus(size_t spatial_index, bool enabled);
+
+ // Adapter overrides.
+ void OnFrame(Timestamp post_time,
+ int frames_scheduled_for_processing,
+ const VideoFrame& frame) override;
+ absl::optional<uint32_t> GetInputFrameRateFps() override;
+ void UpdateFrameRate() override {}
+
+ // Notified on dropped frames.
+ void OnDiscardedFrame();
+
+ // Conditionally requests a refresh frame via
+ // Callback::RequestRefreshFrame.
+ void ProcessKeyFrameRequest();
+
+ private:
+ // The tracking state of each spatial layer. Used for determining when to
+ // stop repeating frames.
+ struct SpatialLayerTracker {
+ // If unset, the layer is disabled. Otherwise carries the quality
+ // convergence status of the layer.
+ absl::optional<bool> quality_converged;
+ };
+ // The state of a scheduled repeat.
+ struct ScheduledRepeat {
+ ScheduledRepeat(Timestamp origin,
+ int64_t origin_timestamp_us,
+ int64_t origin_ntp_time_ms)
+ : scheduled(origin),
+ idle(false),
+ origin(origin),
+ origin_timestamp_us(origin_timestamp_us),
+ origin_ntp_time_ms(origin_ntp_time_ms) {}
+ // The instant when the repeat was scheduled.
+ Timestamp scheduled;
+ // True if the repeat was scheduled as an idle repeat (long), false
+ // otherwise.
+ bool idle;
+ // The moment we decided to start repeating.
+ Timestamp origin;
+ // The timestamp_us of the frame when we started repeating.
+ int64_t origin_timestamp_us;
+ // The ntp_times_ms of the frame when we started repeating.
+ int64_t origin_ntp_time_ms;
+ };
+
+ // Returns true if all spatial layers can be considered to be converged in
+ // terms of quality.
+ // Convergence means QP has dropped to a low-enough level to warrant ceasing
+ // to send identical frames at high frequency.
+ bool HasQualityConverged() const RTC_RUN_ON(sequence_checker_);
+ // Resets quality convergence information. HasQualityConverged() returns false
+ // after this call.
+ void ResetQualityConvergenceInfo() RTC_RUN_ON(sequence_checker_);
+ // Processes incoming frames on a delayed cadence.
+ void ProcessOnDelayedCadence() RTC_RUN_ON(sequence_checker_);
+ // Schedules a later repeat with delay depending on state of layer trackers.
+ // If true is passed in `idle_repeat`, the repeat is going to be
+ // kZeroHertzIdleRepeatRatePeriod. Otherwise it'll be the value of
+ // `frame_delay`.
+ void ScheduleRepeat(int frame_id, bool idle_repeat)
+ RTC_RUN_ON(sequence_checker_);
+ // Repeats a frame in the abscence of incoming frames. Slows down when quality
+ // convergence is attained, and stops the cadence terminally when new frames
+ // have arrived.
+ void ProcessRepeatedFrameOnDelayedCadence(int frame_id)
+ RTC_RUN_ON(sequence_checker_);
+ // Sends a frame, updating the timestamp to the current time.
+ void SendFrameNow(const VideoFrame& frame) const
+ RTC_RUN_ON(sequence_checker_);
+ // Returns the repeat duration depending on if it's an idle repeat or not.
+ TimeDelta RepeatDuration(bool idle_repeat) const
+ RTC_RUN_ON(sequence_checker_);
+ // Unless timer already running, starts repeatedly requesting refresh frames
+ // after a grace_period. If a frame appears before the grace_period has
+ // passed, the request is cancelled.
+ void MaybeStartRefreshFrameRequester() RTC_RUN_ON(sequence_checker_);
+
+ TaskQueueBase* const queue_;
+ Clock* const clock_;
+ FrameCadenceAdapterInterface::Callback* const callback_;
+
+ // The configured max_fps.
+ // TODO(crbug.com/1255737): support max_fps updates.
+ const double max_fps_;
+ // How much the incoming frame sequence is delayed by.
+ const TimeDelta frame_delay_ = TimeDelta::Seconds(1) / max_fps_;
+
+ RTC_NO_UNIQUE_ADDRESS SequenceChecker sequence_checker_;
+ // A queue of incoming frames and repeated frames.
+ std::deque<VideoFrame> queued_frames_ RTC_GUARDED_BY(sequence_checker_);
+ // The current frame ID to use when starting to repeat frames. This is used
+ // for cancelling deferred repeated frame processing happening.
+ int current_frame_id_ RTC_GUARDED_BY(sequence_checker_) = 0;
+ // Has content when we are repeating frames.
+ absl::optional<ScheduledRepeat> scheduled_repeat_
+ RTC_GUARDED_BY(sequence_checker_);
+ // Convergent state of each of the configured simulcast layers.
+ std::vector<SpatialLayerTracker> layer_trackers_
+ RTC_GUARDED_BY(sequence_checker_);
+ // Repeating task handle used for requesting refresh frames until arrival, as
+ // they can be dropped in various places in the capture pipeline.
+ RepeatingTaskHandle refresh_frame_requester_
+ RTC_GUARDED_BY(sequence_checker_);
+
+ ScopedTaskSafety safety_;
+};
+
+class FrameCadenceAdapterImpl : public FrameCadenceAdapterInterface {
+ public:
+ FrameCadenceAdapterImpl(Clock* clock,
+ TaskQueueBase* queue,
+ const FieldTrialsView& field_trials);
+ ~FrameCadenceAdapterImpl();
+
+ // FrameCadenceAdapterInterface overrides.
+ void Initialize(Callback* callback) override;
+ void SetZeroHertzModeEnabled(
+ absl::optional<ZeroHertzModeParams> params) override;
+ absl::optional<uint32_t> GetInputFrameRateFps() override;
+ void UpdateFrameRate() override;
+ void UpdateLayerQualityConvergence(size_t spatial_index,
+ bool quality_converged) override;
+ void UpdateLayerStatus(size_t spatial_index, bool enabled) override;
+ void ProcessKeyFrameRequest() override;
+
+ // VideoFrameSink overrides.
+ void OnFrame(const VideoFrame& frame) override;
+ void OnDiscardedFrame() override;
+ void OnConstraintsChanged(
+ const VideoTrackSourceConstraints& constraints) override;
+
+ private:
+ // Called from OnFrame in zero-hertz mode.
+ void OnFrameOnMainQueue(Timestamp post_time,
+ int frames_scheduled_for_processing,
+ const VideoFrame& frame) RTC_RUN_ON(queue_);
+
+ // Returns true under all of the following conditions:
+ // - constraints min fps set to 0
+ // - constraints max fps set and greater than 0,
+ // - field trial enabled
+ // - zero-hertz mode enabled
+ bool IsZeroHertzScreenshareEnabled() const RTC_RUN_ON(queue_);
+
+ // Handles adapter creation on configuration changes.
+ void MaybeReconfigureAdapters(bool was_zero_hertz_enabled) RTC_RUN_ON(queue_);
+
+ // Called to report on constraint UMAs.
+ void MaybeReportFrameRateConstraintUmas() RTC_RUN_ON(queue_);
+
+ Clock* const clock_;
+ TaskQueueBase* const queue_;
+
+ // True if we support frame entry for screenshare with a minimum frequency of
+ // 0 Hz.
+ const bool zero_hertz_screenshare_enabled_;
+
+ // The two possible modes we're under.
+ absl::optional<PassthroughAdapterMode> passthrough_adapter_;
+ absl::optional<ZeroHertzAdapterMode> zero_hertz_adapter_;
+ // If set, zero-hertz mode has been enabled.
+ absl::optional<ZeroHertzModeParams> zero_hertz_params_;
+ // Cache for the current adapter mode.
+ AdapterMode* current_adapter_mode_ = nullptr;
+
+ // Timestamp for statistics reporting.
+ absl::optional<Timestamp> zero_hertz_adapter_created_timestamp_
+ RTC_GUARDED_BY(queue_);
+
+ // Set up during Initialize.
+ Callback* callback_ = nullptr;
+
+ // The source's constraints.
+ absl::optional<VideoTrackSourceConstraints> source_constraints_
+ RTC_GUARDED_BY(queue_);
+
+ // Race checker for incoming frames. This is the network thread in chromium,
+ // but may vary from test contexts.
+ rtc::RaceChecker incoming_frame_race_checker_;
+ bool has_reported_screenshare_frame_rate_umas_ RTC_GUARDED_BY(queue_) = false;
+
+ // Number of frames that are currently scheduled for processing on the
+ // `queue_`.
+ std::atomic<int> frames_scheduled_for_processing_{0};
+
+ ScopedTaskSafetyDetached safety_;
+};
+
+ZeroHertzAdapterMode::ZeroHertzAdapterMode(
+ TaskQueueBase* queue,
+ Clock* clock,
+ FrameCadenceAdapterInterface::Callback* callback,
+ double max_fps)
+ : queue_(queue), clock_(clock), callback_(callback), max_fps_(max_fps) {
+ sequence_checker_.Detach();
+ MaybeStartRefreshFrameRequester();
+}
+
+void ZeroHertzAdapterMode::ReconfigureParameters(
+ const FrameCadenceAdapterInterface::ZeroHertzModeParams& params) {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+ RTC_DLOG(LS_INFO) << __func__ << " this " << this << " num_simulcast_layers "
+ << params.num_simulcast_layers;
+
+ // Start as unconverged.
+ layer_trackers_.clear();
+ layer_trackers_.resize(params.num_simulcast_layers,
+ SpatialLayerTracker{false});
+}
+
+void ZeroHertzAdapterMode::UpdateLayerQualityConvergence(
+ size_t spatial_index,
+ bool quality_converged) {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+ RTC_LOG(LS_INFO) << __func__ << " this " << this << " layer " << spatial_index
+ << " quality has converged: " << quality_converged;
+ if (spatial_index >= layer_trackers_.size())
+ return;
+ if (layer_trackers_[spatial_index].quality_converged.has_value())
+ layer_trackers_[spatial_index].quality_converged = quality_converged;
+}
+
+void ZeroHertzAdapterMode::UpdateLayerStatus(size_t spatial_index,
+ bool enabled) {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+ if (spatial_index >= layer_trackers_.size())
+ return;
+ if (enabled) {
+ if (!layer_trackers_[spatial_index].quality_converged.has_value()) {
+ // Assume quality has not converged until hearing otherwise.
+ layer_trackers_[spatial_index].quality_converged = false;
+ }
+ } else {
+ layer_trackers_[spatial_index].quality_converged = absl::nullopt;
+ }
+ RTC_LOG(LS_INFO)
+ << __func__ << " this " << this << " layer " << spatial_index
+ << (enabled
+ ? (layer_trackers_[spatial_index].quality_converged.has_value()
+ ? " enabled."
+ : " enabled and it's assumed quality has not converged.")
+ : " disabled.");
+}
+
+void ZeroHertzAdapterMode::OnFrame(Timestamp post_time,
+ int frames_scheduled_for_processing,
+ const VideoFrame& frame) {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+ RTC_DLOG(LS_VERBOSE) << "ZeroHertzAdapterMode::" << __func__ << " this "
+ << this;
+ refresh_frame_requester_.Stop();
+
+ // Assume all enabled layers are unconverged after frame entry.
+ ResetQualityConvergenceInfo();
+
+ // Remove stored repeating frame if needed.
+ if (scheduled_repeat_.has_value()) {
+ RTC_DCHECK(queued_frames_.size() == 1);
+ RTC_DLOG(LS_VERBOSE) << __func__ << " this " << this
+ << " cancel repeat and restart with original";
+ queued_frames_.pop_front();
+ }
+
+ // Store the frame in the queue and schedule deferred processing.
+ queued_frames_.push_back(frame);
+ current_frame_id_++;
+ scheduled_repeat_ = absl::nullopt;
+ queue_->PostDelayedHighPrecisionTask(
+ SafeTask(safety_.flag(),
+ [this] {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+ ProcessOnDelayedCadence();
+ }),
+ frame_delay_);
+}
+
+void ZeroHertzAdapterMode::OnDiscardedFrame() {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+ RTC_DLOG(LS_VERBOSE) << "ZeroHertzAdapterMode::" << __func__;
+
+ // Under zero hertz source delivery, a discarded frame ending a sequence of
+ // frames which happened to contain important information can be seen as a
+ // capture freeze. Avoid this by starting requesting refresh frames after a
+ // grace period.
+ MaybeStartRefreshFrameRequester();
+}
+
+absl::optional<uint32_t> ZeroHertzAdapterMode::GetInputFrameRateFps() {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+ return max_fps_;
+}
+
+void ZeroHertzAdapterMode::ProcessKeyFrameRequest() {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+
+ // If we're new and don't have a frame, there's no need to request refresh
+ // frames as this was being triggered for us when zero-hz mode was set up.
+ //
+ // The next frame encoded will be a key frame. Reset quality convergence so we
+ // don't get idle repeats shortly after, because key frames need a lot of
+ // refinement frames.
+ ResetQualityConvergenceInfo();
+
+ // If we're not repeating, or we're repeating with short duration, we will
+ // very soon send out a frame and don't need a refresh frame.
+ if (!scheduled_repeat_.has_value() || !scheduled_repeat_->idle) {
+ RTC_LOG(LS_INFO) << __func__ << " this " << this
+ << " not requesting refresh frame because of recently "
+ "incoming frame or short repeating.";
+ return;
+ }
+
+ // If the repeat is scheduled within a short (i.e. frame_delay_) interval, we
+ // will very soon send out a frame and don't need a refresh frame.
+ Timestamp now = clock_->CurrentTime();
+ if (scheduled_repeat_->scheduled + RepeatDuration(/*idle_repeat=*/true) -
+ now <=
+ frame_delay_) {
+ RTC_LOG(LS_INFO) << __func__ << " this " << this
+ << " not requesting refresh frame because of soon "
+ "happening idle repeat";
+ return;
+ }
+
+ // Cancel the current repeat and reschedule a short repeat now. No need for a
+ // new refresh frame.
+ RTC_LOG(LS_INFO) << __func__ << " this " << this
+ << " not requesting refresh frame and scheduling a short "
+ "repeat due to key frame request";
+ ScheduleRepeat(++current_frame_id_, /*idle_repeat=*/false);
+ return;
+}
+
+bool ZeroHertzAdapterMode::HasQualityConverged() const {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+ // 1. Define ourselves as unconverged with no spatial layers configured. This
+ // is to keep short repeating until the layer configuration comes.
+ // 2. Unset layers implicitly imply that they're converged to support
+ // disabling layers when they're not needed.
+ const bool quality_converged =
+ !layer_trackers_.empty() &&
+ absl::c_all_of(layer_trackers_, [](const SpatialLayerTracker& tracker) {
+ return tracker.quality_converged.value_or(true);
+ });
+ return quality_converged;
+}
+
+void ZeroHertzAdapterMode::ResetQualityConvergenceInfo() {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+ RTC_DLOG(LS_INFO) << __func__ << " this " << this;
+ for (auto& layer_tracker : layer_trackers_) {
+ if (layer_tracker.quality_converged.has_value())
+ layer_tracker.quality_converged = false;
+ }
+}
+
+void ZeroHertzAdapterMode::ProcessOnDelayedCadence() {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+ RTC_DCHECK(!queued_frames_.empty());
+ RTC_DLOG(LS_VERBOSE) << __func__ << " this " << this;
+
+ SendFrameNow(queued_frames_.front());
+
+ // If there were two or more frames stored, we do not have to schedule repeats
+ // of the front frame.
+ if (queued_frames_.size() > 1) {
+ queued_frames_.pop_front();
+ return;
+ }
+
+ // There's only one frame to send. Schedule a repeat sequence, which is
+ // cancelled by `current_frame_id_` getting incremented should new frames
+ // arrive.
+ ScheduleRepeat(current_frame_id_, HasQualityConverged());
+}
+
+void ZeroHertzAdapterMode::ScheduleRepeat(int frame_id, bool idle_repeat) {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+ RTC_DLOG(LS_VERBOSE) << __func__ << " this " << this << " frame_id "
+ << frame_id;
+ Timestamp now = clock_->CurrentTime();
+ if (!scheduled_repeat_.has_value()) {
+ scheduled_repeat_.emplace(now, queued_frames_.front().timestamp_us(),
+ queued_frames_.front().ntp_time_ms());
+ }
+ scheduled_repeat_->scheduled = now;
+ scheduled_repeat_->idle = idle_repeat;
+
+ TimeDelta repeat_delay = RepeatDuration(idle_repeat);
+ queue_->PostDelayedHighPrecisionTask(
+ SafeTask(safety_.flag(),
+ [this, frame_id] {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+ ProcessRepeatedFrameOnDelayedCadence(frame_id);
+ }),
+ repeat_delay);
+}
+
+void ZeroHertzAdapterMode::ProcessRepeatedFrameOnDelayedCadence(int frame_id) {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+ RTC_DLOG(LS_VERBOSE) << __func__ << " this " << this << " frame_id "
+ << frame_id;
+ RTC_DCHECK(!queued_frames_.empty());
+
+ // Cancel this invocation if new frames turned up.
+ if (frame_id != current_frame_id_)
+ return;
+ RTC_DCHECK(scheduled_repeat_.has_value());
+
+ VideoFrame& frame = queued_frames_.front();
+
+ // Since this is a repeated frame, nothing changed compared to before.
+ VideoFrame::UpdateRect empty_update_rect;
+ empty_update_rect.MakeEmptyUpdate();
+ frame.set_update_rect(empty_update_rect);
+
+ // Adjust timestamps of the frame of the repeat, accounting for the actual
+ // delay since we started repeating.
+ //
+ // NOTE: No need to update the RTP timestamp as the VideoStreamEncoder
+ // overwrites it based on its chosen NTP timestamp source.
+ TimeDelta total_delay = clock_->CurrentTime() - scheduled_repeat_->origin;
+ if (frame.timestamp_us() > 0) {
+ frame.set_timestamp_us(scheduled_repeat_->origin_timestamp_us +
+ total_delay.us());
+ }
+ if (frame.ntp_time_ms()) {
+ frame.set_ntp_time_ms(scheduled_repeat_->origin_ntp_time_ms +
+ total_delay.ms());
+ }
+ SendFrameNow(frame);
+
+ // Schedule another repeat.
+ ScheduleRepeat(frame_id, HasQualityConverged());
+}
+
+void ZeroHertzAdapterMode::SendFrameNow(const VideoFrame& frame) const {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+ RTC_DLOG(LS_VERBOSE) << __func__ << " this " << this << " timestamp "
+ << frame.timestamp() << " timestamp_us "
+ << frame.timestamp_us() << " ntp_time_ms "
+ << frame.ntp_time_ms();
+ // TODO(crbug.com/1255737): figure out if frames_scheduled_for_processing
+ // makes sense to compute in this implementation.
+ callback_->OnFrame(/*post_time=*/clock_->CurrentTime(),
+ /*frames_scheduled_for_processing=*/1, frame);
+}
+
+TimeDelta ZeroHertzAdapterMode::RepeatDuration(bool idle_repeat) const {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+ return idle_repeat
+ ? FrameCadenceAdapterInterface::kZeroHertzIdleRepeatRatePeriod
+ : frame_delay_;
+}
+
+void ZeroHertzAdapterMode::MaybeStartRefreshFrameRequester() {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+ RTC_DLOG(LS_VERBOSE) << __func__;
+ if (!refresh_frame_requester_.Running()) {
+ refresh_frame_requester_ = RepeatingTaskHandle::DelayedStart(
+ queue_,
+ FrameCadenceAdapterInterface::kOnDiscardedFrameRefreshFramePeriod *
+ frame_delay_,
+ [this] {
+ RTC_DLOG(LS_VERBOSE) << __func__ << " RequestRefreshFrame";
+ if (callback_)
+ callback_->RequestRefreshFrame();
+ return frame_delay_;
+ });
+ }
+}
+
+FrameCadenceAdapterImpl::FrameCadenceAdapterImpl(
+ Clock* clock,
+ TaskQueueBase* queue,
+ const FieldTrialsView& field_trials)
+ : clock_(clock),
+ queue_(queue),
+ zero_hertz_screenshare_enabled_(
+ !field_trials.IsDisabled("WebRTC-ZeroHertzScreenshare")) {}
+
+FrameCadenceAdapterImpl::~FrameCadenceAdapterImpl() {
+ RTC_DLOG(LS_VERBOSE) << __func__ << " this " << this;
+}
+
+void FrameCadenceAdapterImpl::Initialize(Callback* callback) {
+ callback_ = callback;
+ passthrough_adapter_.emplace(clock_, callback);
+ current_adapter_mode_ = &passthrough_adapter_.value();
+}
+
+void FrameCadenceAdapterImpl::SetZeroHertzModeEnabled(
+ absl::optional<ZeroHertzModeParams> params) {
+ RTC_DCHECK_RUN_ON(queue_);
+ bool was_zero_hertz_enabled = zero_hertz_params_.has_value();
+ if (params.has_value() && !was_zero_hertz_enabled)
+ has_reported_screenshare_frame_rate_umas_ = false;
+ zero_hertz_params_ = params;
+ MaybeReconfigureAdapters(was_zero_hertz_enabled);
+}
+
+absl::optional<uint32_t> FrameCadenceAdapterImpl::GetInputFrameRateFps() {
+ RTC_DCHECK_RUN_ON(queue_);
+ return current_adapter_mode_->GetInputFrameRateFps();
+}
+
+void FrameCadenceAdapterImpl::UpdateFrameRate() {
+ RTC_DCHECK_RUN_ON(queue_);
+ // The frame rate need not be updated for the zero-hertz adapter. The
+ // passthrough adapter however uses it. Always pass frames into the
+ // passthrough to keep the estimation alive should there be an adapter switch.
+ passthrough_adapter_->UpdateFrameRate();
+}
+
+void FrameCadenceAdapterImpl::UpdateLayerQualityConvergence(
+ size_t spatial_index,
+ bool quality_converged) {
+ if (zero_hertz_adapter_.has_value())
+ zero_hertz_adapter_->UpdateLayerQualityConvergence(spatial_index,
+ quality_converged);
+}
+
+void FrameCadenceAdapterImpl::UpdateLayerStatus(size_t spatial_index,
+ bool enabled) {
+ if (zero_hertz_adapter_.has_value())
+ zero_hertz_adapter_->UpdateLayerStatus(spatial_index, enabled);
+}
+
+void FrameCadenceAdapterImpl::ProcessKeyFrameRequest() {
+ RTC_DCHECK_RUN_ON(queue_);
+ if (zero_hertz_adapter_)
+ zero_hertz_adapter_->ProcessKeyFrameRequest();
+}
+
+void FrameCadenceAdapterImpl::OnFrame(const VideoFrame& frame) {
+ // This method is called on the network thread under Chromium, or other
+ // various contexts in test.
+ RTC_DCHECK_RUNS_SERIALIZED(&incoming_frame_race_checker_);
+ RTC_DLOG(LS_VERBOSE) << "FrameCadenceAdapterImpl::" << __func__ << " this "
+ << this;
+
+ // Local time in webrtc time base.
+ Timestamp post_time = clock_->CurrentTime();
+ frames_scheduled_for_processing_.fetch_add(1, std::memory_order_relaxed);
+ queue_->PostTask(SafeTask(safety_.flag(), [this, post_time, frame] {
+ RTC_DCHECK_RUN_ON(queue_);
+ if (zero_hertz_adapter_created_timestamp_.has_value()) {
+ TimeDelta time_until_first_frame =
+ clock_->CurrentTime() - *zero_hertz_adapter_created_timestamp_;
+ zero_hertz_adapter_created_timestamp_ = absl::nullopt;
+ RTC_HISTOGRAM_COUNTS_10000(
+ "WebRTC.Screenshare.ZeroHz.TimeUntilFirstFrameMs",
+ time_until_first_frame.ms());
+ }
+
+ const int frames_scheduled_for_processing =
+ frames_scheduled_for_processing_.fetch_sub(1,
+ std::memory_order_relaxed);
+ OnFrameOnMainQueue(post_time, frames_scheduled_for_processing,
+ std::move(frame));
+ MaybeReportFrameRateConstraintUmas();
+ }));
+}
+
+void FrameCadenceAdapterImpl::OnDiscardedFrame() {
+ callback_->OnDiscardedFrame();
+ queue_->PostTask(SafeTask(safety_.flag(), [this] {
+ RTC_DCHECK_RUN_ON(queue_);
+ if (zero_hertz_adapter_) {
+ zero_hertz_adapter_->OnDiscardedFrame();
+ }
+ }));
+}
+
+void FrameCadenceAdapterImpl::OnConstraintsChanged(
+ const VideoTrackSourceConstraints& constraints) {
+ RTC_LOG(LS_INFO) << __func__ << " this " << this << " min_fps "
+ << constraints.min_fps.value_or(-1) << " max_fps "
+ << constraints.max_fps.value_or(-1);
+ queue_->PostTask(SafeTask(safety_.flag(), [this, constraints] {
+ RTC_DCHECK_RUN_ON(queue_);
+ bool was_zero_hertz_enabled = IsZeroHertzScreenshareEnabled();
+ source_constraints_ = constraints;
+ MaybeReconfigureAdapters(was_zero_hertz_enabled);
+ }));
+}
+
+void FrameCadenceAdapterImpl::OnFrameOnMainQueue(
+ Timestamp post_time,
+ int frames_scheduled_for_processing,
+ const VideoFrame& frame) {
+ RTC_DCHECK_RUN_ON(queue_);
+ current_adapter_mode_->OnFrame(post_time, frames_scheduled_for_processing,
+ frame);
+}
+
+bool FrameCadenceAdapterImpl::IsZeroHertzScreenshareEnabled() const {
+ RTC_DCHECK_RUN_ON(queue_);
+ return zero_hertz_screenshare_enabled_ && source_constraints_.has_value() &&
+ source_constraints_->max_fps.value_or(-1) > 0 &&
+ source_constraints_->min_fps.value_or(-1) == 0 &&
+ zero_hertz_params_.has_value();
+}
+
+void FrameCadenceAdapterImpl::MaybeReconfigureAdapters(
+ bool was_zero_hertz_enabled) {
+ RTC_DCHECK_RUN_ON(queue_);
+ bool is_zero_hertz_enabled = IsZeroHertzScreenshareEnabled();
+ if (is_zero_hertz_enabled) {
+ if (!was_zero_hertz_enabled) {
+ zero_hertz_adapter_.emplace(queue_, clock_, callback_,
+ source_constraints_->max_fps.value());
+ RTC_LOG(LS_INFO) << "Zero hertz mode activated.";
+ zero_hertz_adapter_created_timestamp_ = clock_->CurrentTime();
+ }
+ zero_hertz_adapter_->ReconfigureParameters(zero_hertz_params_.value());
+ current_adapter_mode_ = &zero_hertz_adapter_.value();
+ } else {
+ if (was_zero_hertz_enabled)
+ zero_hertz_adapter_ = absl::nullopt;
+ current_adapter_mode_ = &passthrough_adapter_.value();
+ }
+}
+
+void FrameCadenceAdapterImpl::MaybeReportFrameRateConstraintUmas() {
+ RTC_DCHECK_RUN_ON(queue_);
+ if (has_reported_screenshare_frame_rate_umas_)
+ return;
+ has_reported_screenshare_frame_rate_umas_ = true;
+ if (!zero_hertz_params_.has_value())
+ return;
+ RTC_HISTOGRAM_BOOLEAN("WebRTC.Screenshare.FrameRateConstraints.Exists",
+ source_constraints_.has_value());
+ if (!source_constraints_.has_value())
+ return;
+ RTC_HISTOGRAM_BOOLEAN("WebRTC.Screenshare.FrameRateConstraints.Min.Exists",
+ source_constraints_->min_fps.has_value());
+ if (source_constraints_->min_fps.has_value()) {
+ RTC_HISTOGRAM_COUNTS_100(
+ "WebRTC.Screenshare.FrameRateConstraints.Min.Value",
+ source_constraints_->min_fps.value());
+ }
+ RTC_HISTOGRAM_BOOLEAN("WebRTC.Screenshare.FrameRateConstraints.Max.Exists",
+ source_constraints_->max_fps.has_value());
+ if (source_constraints_->max_fps.has_value()) {
+ RTC_HISTOGRAM_COUNTS_100(
+ "WebRTC.Screenshare.FrameRateConstraints.Max.Value",
+ source_constraints_->max_fps.value());
+ }
+ if (!source_constraints_->min_fps.has_value()) {
+ if (source_constraints_->max_fps.has_value()) {
+ RTC_HISTOGRAM_COUNTS_100(
+ "WebRTC.Screenshare.FrameRateConstraints.MinUnset.Max",
+ source_constraints_->max_fps.value());
+ }
+ } else if (source_constraints_->max_fps.has_value()) {
+ if (source_constraints_->min_fps.value() <
+ source_constraints_->max_fps.value()) {
+ RTC_HISTOGRAM_COUNTS_100(
+ "WebRTC.Screenshare.FrameRateConstraints.MinLessThanMax.Min",
+ source_constraints_->min_fps.value());
+ RTC_HISTOGRAM_COUNTS_100(
+ "WebRTC.Screenshare.FrameRateConstraints.MinLessThanMax.Max",
+ source_constraints_->max_fps.value());
+ }
+ // Multi-dimensional histogram for min and max FPS making it possible to
+ // uncover min and max combinations. See
+ // https://chromium.googlesource.com/chromium/src.git/+/HEAD/tools/metrics/histograms/README.md#multidimensional-histograms
+ constexpr int kMaxBucketCount =
+ 60 * /*max min_fps=*/60 + /*max max_fps=*/60 - 1;
+ RTC_HISTOGRAM_ENUMERATION_SPARSE(
+ "WebRTC.Screenshare.FrameRateConstraints.60MinPlusMaxMinusOne",
+ source_constraints_->min_fps.value() * 60 +
+ source_constraints_->max_fps.value() - 1,
+ /*boundary=*/kMaxBucketCount);
+ }
+}
+
+} // namespace
+
+std::unique_ptr<FrameCadenceAdapterInterface>
+FrameCadenceAdapterInterface::Create(Clock* clock,
+ TaskQueueBase* queue,
+ const FieldTrialsView& field_trials) {
+ return std::make_unique<FrameCadenceAdapterImpl>(clock, queue, field_trials);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/video/frame_cadence_adapter.h b/third_party/libwebrtc/video/frame_cadence_adapter.h
new file mode 100644
index 0000000000..d0eab7e770
--- /dev/null
+++ b/third_party/libwebrtc/video/frame_cadence_adapter.h
@@ -0,0 +1,122 @@
+/*
+ * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VIDEO_FRAME_CADENCE_ADAPTER_H_
+#define VIDEO_FRAME_CADENCE_ADAPTER_H_
+
+#include <memory>
+
+#include "absl/base/attributes.h"
+#include "api/field_trials_view.h"
+#include "api/task_queue/task_queue_base.h"
+#include "api/units/time_delta.h"
+#include "api/video/video_frame.h"
+#include "api/video/video_sink_interface.h"
+#include "rtc_base/synchronization/mutex.h"
+#include "rtc_base/thread_annotations.h"
+#include "system_wrappers/include/clock.h"
+
+namespace webrtc {
+
+// A sink adapter implementing mutations to the received frame cadence.
+// With the exception of the constructor and the methods overridden in
+// VideoSinkInterface, the rest of the interface to this class (including dtor)
+// needs to happen on the queue passed in Create.
+class FrameCadenceAdapterInterface
+ : public rtc::VideoSinkInterface<VideoFrame> {
+ public:
+ // Averaging window spanning 90 frames at default 30fps, matching old media
+ // optimization module defaults.
+ // TODO(crbug.com/1255737): Use TimeDelta.
+ static constexpr int64_t kFrameRateAveragingWindowSizeMs = (1000 / 30) * 90;
+ // In zero-hertz mode, the idle repeat rate is a compromise between
+ // RTP receiver keyframe-requesting timeout (3s), other backend limitations
+ // and some worst case RTT.
+ static constexpr TimeDelta kZeroHertzIdleRepeatRatePeriod =
+ TimeDelta::Millis(1000);
+ // The number of frame periods to wait for new frames until starting to
+ // request refresh frames.
+ static constexpr int kOnDiscardedFrameRefreshFramePeriod = 3;
+
+ struct ZeroHertzModeParams {
+ // The number of simulcast layers used in this configuration.
+ size_t num_simulcast_layers = 0;
+ };
+
+ // Callback interface used to inform instance owners.
+ class Callback {
+ public:
+ virtual ~Callback() = default;
+
+ // Called when a frame arrives on the |queue| specified in Create.
+ //
+ // The |post_time| parameter indicates the current time sampled when
+ // FrameCadenceAdapterInterface::OnFrame was called.
+ //
+ // |frames_scheduled_for_processing| indicates how many frames that have
+ // been scheduled for processing. During sequential conditions where
+ // FrameCadenceAdapterInterface::OnFrame is invoked and subsequently ending
+ // up in this callback, this value will read 1. Otherwise if the
+ // |queue| gets stalled for some reason, the value will increase
+ // beyond 1.
+ virtual void OnFrame(Timestamp post_time,
+ int frames_scheduled_for_processing,
+ const VideoFrame& frame) = 0;
+
+ // Called when the source has discarded a frame.
+ virtual void OnDiscardedFrame() = 0;
+
+ // Called when the adapter needs the source to send a refresh frame.
+ virtual void RequestRefreshFrame() = 0;
+ };
+
+ // Factory function creating a production instance. Deletion of the returned
+ // instance needs to happen on the same sequence that Create() was called on.
+ // Frames arriving in FrameCadenceAdapterInterface::OnFrame are posted to
+ // Callback::OnFrame on the |queue|.
+ static std::unique_ptr<FrameCadenceAdapterInterface> Create(
+ Clock* clock,
+ TaskQueueBase* queue,
+ const FieldTrialsView& field_trials);
+
+ // Call before using the rest of the API.
+ virtual void Initialize(Callback* callback) = 0;
+
+ // Pass zero hertz parameters in |params| as a prerequisite to enable
+ // zero-hertz operation. If absl:::nullopt is passed, the cadence adapter will
+ // switch to passthrough mode.
+ virtual void SetZeroHertzModeEnabled(
+ absl::optional<ZeroHertzModeParams> params) = 0;
+
+ // Returns the input framerate. This is measured by RateStatistics when
+ // zero-hertz mode is off, and returns the max framerate in zero-hertz mode.
+ virtual absl::optional<uint32_t> GetInputFrameRateFps() = 0;
+
+ // Updates frame rate. This is done unconditionally irrespective of adapter
+ // mode.
+ virtual void UpdateFrameRate() = 0;
+
+ // Updates quality convergence status for an enabled spatial layer.
+ // Convergence means QP has dropped to a low-enough level to warrant ceasing
+ // to send identical frames at high frequency.
+ virtual void UpdateLayerQualityConvergence(size_t spatial_index,
+ bool converged) = 0;
+
+ // Updates spatial layer enabled status.
+ virtual void UpdateLayerStatus(size_t spatial_index, bool enabled) = 0;
+
+ // Conditionally requests a refresh frame via
+ // Callback::RequestRefreshFrame.
+ virtual void ProcessKeyFrameRequest() = 0;
+};
+
+} // namespace webrtc
+
+#endif // VIDEO_FRAME_CADENCE_ADAPTER_H_
diff --git a/third_party/libwebrtc/video/frame_cadence_adapter_gn/moz.build b/third_party/libwebrtc/video/frame_cadence_adapter_gn/moz.build
new file mode 100644
index 0000000000..2f7abf4617
--- /dev/null
+++ b/third_party/libwebrtc/video/frame_cadence_adapter_gn/moz.build
@@ -0,0 +1,232 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+DEFINES["WEBRTC_STRICT_FIELD_TRIALS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "!/third_party/libwebrtc/gen",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/video/frame_cadence_adapter.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "crypt32",
+ "iphlpapi",
+ "secur32",
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "mips32":
+
+ DEFINES["MIPS32_LE"] = True
+ DEFINES["MIPS_FPU_LE"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "mips64":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("frame_cadence_adapter_gn")
diff --git a/third_party/libwebrtc/video/frame_cadence_adapter_unittest.cc b/third_party/libwebrtc/video/frame_cadence_adapter_unittest.cc
new file mode 100644
index 0000000000..afc675ffde
--- /dev/null
+++ b/third_party/libwebrtc/video/frame_cadence_adapter_unittest.cc
@@ -0,0 +1,1101 @@
+/*
+ * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video/frame_cadence_adapter.h"
+
+#include <utility>
+#include <vector>
+
+#include "absl/functional/any_invocable.h"
+#include "api/task_queue/default_task_queue_factory.h"
+#include "api/task_queue/task_queue_base.h"
+#include "api/task_queue/task_queue_factory.h"
+#include "api/units/time_delta.h"
+#include "api/units/timestamp.h"
+#include "api/video/nv12_buffer.h"
+#include "api/video/video_frame.h"
+#include "rtc_base/event.h"
+#include "rtc_base/rate_statistics.h"
+#include "rtc_base/time_utils.h"
+#include "system_wrappers/include/metrics.h"
+#include "system_wrappers/include/ntp_time.h"
+#include "system_wrappers/include/sleep.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+#include "test/scoped_key_value_config.h"
+#include "test/time_controller/simulated_time_controller.h"
+
+namespace webrtc {
+namespace {
+
+using ::testing::_;
+using ::testing::ElementsAre;
+using ::testing::Invoke;
+using ::testing::Mock;
+using ::testing::Pair;
+using ::testing::Values;
+
+VideoFrame CreateFrame() {
+ return VideoFrame::Builder()
+ .set_video_frame_buffer(
+ rtc::make_ref_counted<NV12Buffer>(/*width=*/16, /*height=*/16))
+ .build();
+}
+
+VideoFrame CreateFrameWithTimestamps(
+ GlobalSimulatedTimeController* time_controller) {
+ return VideoFrame::Builder()
+ .set_video_frame_buffer(
+ rtc::make_ref_counted<NV12Buffer>(/*width=*/16, /*height=*/16))
+ .set_ntp_time_ms(time_controller->GetClock()->CurrentNtpInMilliseconds())
+ .set_timestamp_us(time_controller->GetClock()->CurrentTime().us())
+ .build();
+}
+
+std::unique_ptr<FrameCadenceAdapterInterface> CreateAdapter(
+ const FieldTrialsView& field_trials,
+ Clock* clock) {
+ return FrameCadenceAdapterInterface::Create(clock, TaskQueueBase::Current(),
+ field_trials);
+}
+
+class MockCallback : public FrameCadenceAdapterInterface::Callback {
+ public:
+ MOCK_METHOD(void, OnFrame, (Timestamp, int, const VideoFrame&), (override));
+ MOCK_METHOD(void, OnDiscardedFrame, (), (override));
+ MOCK_METHOD(void, RequestRefreshFrame, (), (override));
+};
+
+class ZeroHertzFieldTrialDisabler : public test::ScopedKeyValueConfig {
+ public:
+ ZeroHertzFieldTrialDisabler()
+ : test::ScopedKeyValueConfig("WebRTC-ZeroHertzScreenshare/Disabled/") {}
+};
+
+class ZeroHertzFieldTrialEnabler : public test::ScopedKeyValueConfig {
+ public:
+ ZeroHertzFieldTrialEnabler()
+ : test::ScopedKeyValueConfig("WebRTC-ZeroHertzScreenshare/Enabled/") {}
+};
+
+TEST(FrameCadenceAdapterTest,
+ ForwardsFramesOnConstructionAndUnderDisabledFieldTrial) {
+ GlobalSimulatedTimeController time_controller(Timestamp::Millis(1));
+ ZeroHertzFieldTrialDisabler disabled_field_trials;
+ test::ScopedKeyValueConfig no_field_trials;
+ for (int i = 0; i != 2; i++) {
+ MockCallback callback;
+ auto adapter =
+ CreateAdapter(i == 0 ? disabled_field_trials : no_field_trials,
+ time_controller.GetClock());
+ adapter->Initialize(&callback);
+ VideoFrame frame = CreateFrame();
+ EXPECT_CALL(callback, OnFrame).Times(1);
+ adapter->OnFrame(frame);
+ time_controller.AdvanceTime(TimeDelta::Zero());
+ Mock::VerifyAndClearExpectations(&callback);
+ EXPECT_CALL(callback, OnDiscardedFrame).Times(1);
+ adapter->OnDiscardedFrame();
+ Mock::VerifyAndClearExpectations(&callback);
+ }
+}
+
+TEST(FrameCadenceAdapterTest, CountsOutstandingFramesToProcess) {
+ test::ScopedKeyValueConfig no_field_trials;
+ GlobalSimulatedTimeController time_controller(Timestamp::Millis(1));
+ MockCallback callback;
+ auto adapter = CreateAdapter(no_field_trials, time_controller.GetClock());
+ adapter->Initialize(&callback);
+ EXPECT_CALL(callback, OnFrame(_, 2, _)).Times(1);
+ EXPECT_CALL(callback, OnFrame(_, 1, _)).Times(1);
+ auto frame = CreateFrame();
+ adapter->OnFrame(frame);
+ adapter->OnFrame(frame);
+ time_controller.AdvanceTime(TimeDelta::Zero());
+ EXPECT_CALL(callback, OnFrame(_, 1, _)).Times(1);
+ adapter->OnFrame(frame);
+ time_controller.AdvanceTime(TimeDelta::Zero());
+}
+
+TEST(FrameCadenceAdapterTest, FrameRateFollowsRateStatisticsByDefault) {
+ test::ScopedKeyValueConfig no_field_trials;
+ GlobalSimulatedTimeController time_controller(Timestamp::Zero());
+ auto adapter = CreateAdapter(no_field_trials, time_controller.GetClock());
+ adapter->Initialize(nullptr);
+
+ // Create an "oracle" rate statistics which should be followed on a sequence
+ // of frames.
+ RateStatistics rate(
+ FrameCadenceAdapterInterface::kFrameRateAveragingWindowSizeMs, 1000);
+
+ for (int frame = 0; frame != 10; ++frame) {
+ time_controller.AdvanceTime(TimeDelta::Millis(10));
+ rate.Update(1, time_controller.GetClock()->TimeInMilliseconds());
+ adapter->UpdateFrameRate();
+ EXPECT_EQ(rate.Rate(time_controller.GetClock()->TimeInMilliseconds()),
+ adapter->GetInputFrameRateFps())
+ << " failed for frame " << frame;
+ }
+}
+
+TEST(FrameCadenceAdapterTest,
+ FrameRateFollowsRateStatisticsWhenFeatureDisabled) {
+ ZeroHertzFieldTrialDisabler feature_disabler;
+ GlobalSimulatedTimeController time_controller(Timestamp::Zero());
+ auto adapter = CreateAdapter(feature_disabler, time_controller.GetClock());
+ adapter->Initialize(nullptr);
+
+ // Create an "oracle" rate statistics which should be followed on a sequence
+ // of frames.
+ RateStatistics rate(
+ FrameCadenceAdapterInterface::kFrameRateAveragingWindowSizeMs, 1000);
+
+ for (int frame = 0; frame != 10; ++frame) {
+ time_controller.AdvanceTime(TimeDelta::Millis(10));
+ rate.Update(1, time_controller.GetClock()->TimeInMilliseconds());
+ adapter->UpdateFrameRate();
+ EXPECT_EQ(rate.Rate(time_controller.GetClock()->TimeInMilliseconds()),
+ adapter->GetInputFrameRateFps())
+ << " failed for frame " << frame;
+ }
+}
+
+TEST(FrameCadenceAdapterTest, FrameRateFollowsMaxFpsWhenZeroHertzActivated) {
+ ZeroHertzFieldTrialEnabler enabler;
+ GlobalSimulatedTimeController time_controller(Timestamp::Zero());
+ auto adapter = CreateAdapter(enabler, time_controller.GetClock());
+ adapter->Initialize(nullptr);
+ adapter->SetZeroHertzModeEnabled(
+ FrameCadenceAdapterInterface::ZeroHertzModeParams{});
+ adapter->OnConstraintsChanged(VideoTrackSourceConstraints{0, 1});
+ for (int frame = 0; frame != 10; ++frame) {
+ time_controller.AdvanceTime(TimeDelta::Millis(10));
+ adapter->UpdateFrameRate();
+ EXPECT_EQ(adapter->GetInputFrameRateFps(), 1u);
+ }
+}
+
+TEST(FrameCadenceAdapterTest,
+ FrameRateFollowsRateStatisticsAfterZeroHertzDeactivated) {
+ ZeroHertzFieldTrialEnabler enabler;
+ GlobalSimulatedTimeController time_controller(Timestamp::Zero());
+ auto adapter = CreateAdapter(enabler, time_controller.GetClock());
+ adapter->Initialize(nullptr);
+ adapter->SetZeroHertzModeEnabled(
+ FrameCadenceAdapterInterface::ZeroHertzModeParams{});
+ adapter->OnConstraintsChanged(VideoTrackSourceConstraints{0, 1});
+ RateStatistics rate(
+ FrameCadenceAdapterInterface::kFrameRateAveragingWindowSizeMs, 1000);
+ constexpr int MAX = 10;
+ for (int frame = 0; frame != MAX; ++frame) {
+ time_controller.AdvanceTime(TimeDelta::Millis(10));
+ rate.Update(1, time_controller.GetClock()->TimeInMilliseconds());
+ adapter->UpdateFrameRate();
+ }
+ // Turn off zero hertz on the next-last frame; after the last frame we
+ // should see a value that tracks the rate oracle.
+ adapter->SetZeroHertzModeEnabled(absl::nullopt);
+ // Last frame.
+ time_controller.AdvanceTime(TimeDelta::Millis(10));
+ rate.Update(1, time_controller.GetClock()->TimeInMilliseconds());
+ adapter->UpdateFrameRate();
+
+ EXPECT_EQ(rate.Rate(time_controller.GetClock()->TimeInMilliseconds()),
+ adapter->GetInputFrameRateFps());
+}
+
+TEST(FrameCadenceAdapterTest, ForwardsFramesDelayed) {
+ ZeroHertzFieldTrialEnabler enabler;
+ MockCallback callback;
+ GlobalSimulatedTimeController time_controller(Timestamp::Zero());
+ auto adapter = CreateAdapter(enabler, time_controller.GetClock());
+ adapter->Initialize(&callback);
+ adapter->SetZeroHertzModeEnabled(
+ FrameCadenceAdapterInterface::ZeroHertzModeParams{});
+ adapter->OnConstraintsChanged(VideoTrackSourceConstraints{0, 1});
+ constexpr int kNumFrames = 3;
+ NtpTime original_ntp_time = time_controller.GetClock()->CurrentNtpTime();
+ auto frame = CreateFrameWithTimestamps(&time_controller);
+ int64_t original_timestamp_us = frame.timestamp_us();
+ for (int index = 0; index != kNumFrames; ++index) {
+ EXPECT_CALL(callback, OnFrame).Times(0);
+ adapter->OnFrame(frame);
+ EXPECT_CALL(callback, OnFrame)
+ .WillOnce(Invoke([&](Timestamp post_time, int,
+ const VideoFrame& frame) {
+ EXPECT_EQ(post_time, time_controller.GetClock()->CurrentTime());
+ EXPECT_EQ(frame.timestamp_us(),
+ original_timestamp_us + index * rtc::kNumMicrosecsPerSec);
+ EXPECT_EQ(frame.ntp_time_ms(), original_ntp_time.ToMs() +
+ index * rtc::kNumMillisecsPerSec);
+ }));
+ time_controller.AdvanceTime(TimeDelta::Seconds(1));
+ frame = CreateFrameWithTimestamps(&time_controller);
+ }
+}
+
+TEST(FrameCadenceAdapterTest, RepeatsFramesDelayed) {
+ // Logic in the frame cadence adapter avoids modifying frame NTP and render
+ // timestamps if these timestamps looks unset, which is the case when the
+ // clock is initialized running from 0. For this reason we choose the
+ // `time_controller` initialization constant to something arbitrary which is
+ // not 0.
+ ZeroHertzFieldTrialEnabler enabler;
+ MockCallback callback;
+ GlobalSimulatedTimeController time_controller(Timestamp::Millis(47892223));
+ auto adapter = CreateAdapter(enabler, time_controller.GetClock());
+ adapter->Initialize(&callback);
+ adapter->SetZeroHertzModeEnabled(
+ FrameCadenceAdapterInterface::ZeroHertzModeParams{});
+ adapter->OnConstraintsChanged(VideoTrackSourceConstraints{0, 1});
+ NtpTime original_ntp_time = time_controller.GetClock()->CurrentNtpTime();
+
+ // Send one frame, expect 2 subsequent repeats.
+ auto frame = CreateFrameWithTimestamps(&time_controller);
+ int64_t original_timestamp_us = frame.timestamp_us();
+ adapter->OnFrame(frame);
+
+ EXPECT_CALL(callback, OnFrame)
+ .WillOnce(Invoke([&](Timestamp post_time, int, const VideoFrame& frame) {
+ EXPECT_EQ(post_time, time_controller.GetClock()->CurrentTime());
+ EXPECT_EQ(frame.timestamp_us(), original_timestamp_us);
+ EXPECT_EQ(frame.ntp_time_ms(), original_ntp_time.ToMs());
+ }));
+ time_controller.AdvanceTime(TimeDelta::Seconds(1));
+ Mock::VerifyAndClearExpectations(&callback);
+
+ EXPECT_CALL(callback, OnFrame)
+ .WillOnce(Invoke([&](Timestamp post_time, int, const VideoFrame& frame) {
+ EXPECT_EQ(post_time, time_controller.GetClock()->CurrentTime());
+ EXPECT_EQ(frame.timestamp_us(),
+ original_timestamp_us + rtc::kNumMicrosecsPerSec);
+ EXPECT_EQ(frame.ntp_time_ms(),
+ original_ntp_time.ToMs() + rtc::kNumMillisecsPerSec);
+ }));
+ time_controller.AdvanceTime(TimeDelta::Seconds(1));
+ Mock::VerifyAndClearExpectations(&callback);
+
+ EXPECT_CALL(callback, OnFrame)
+ .WillOnce(Invoke([&](Timestamp post_time, int, const VideoFrame& frame) {
+ EXPECT_EQ(post_time, time_controller.GetClock()->CurrentTime());
+ EXPECT_EQ(frame.timestamp_us(),
+ original_timestamp_us + 2 * rtc::kNumMicrosecsPerSec);
+ EXPECT_EQ(frame.ntp_time_ms(),
+ original_ntp_time.ToMs() + 2 * rtc::kNumMillisecsPerSec);
+ }));
+ time_controller.AdvanceTime(TimeDelta::Seconds(1));
+}
+
+TEST(FrameCadenceAdapterTest,
+ RepeatsFramesWithoutTimestampsWithUnsetTimestamps) {
+ // Logic in the frame cadence adapter avoids modifying frame NTP and render
+ // timestamps if these timestamps looks unset, which is the case when the
+ // clock is initialized running from 0. In this test we deliberately don't set
+ // it to zero, but select unset timestamps in the frames (via CreateFrame())
+ // and verify that the timestamp modifying logic doesn't depend on the current
+ // time.
+ ZeroHertzFieldTrialEnabler enabler;
+ MockCallback callback;
+ GlobalSimulatedTimeController time_controller(Timestamp::Millis(4711));
+ auto adapter = CreateAdapter(enabler, time_controller.GetClock());
+ adapter->Initialize(&callback);
+ adapter->SetZeroHertzModeEnabled(
+ FrameCadenceAdapterInterface::ZeroHertzModeParams{});
+ adapter->OnConstraintsChanged(VideoTrackSourceConstraints{0, 1});
+
+ // Send one frame, expect a repeat.
+ adapter->OnFrame(CreateFrame());
+ EXPECT_CALL(callback, OnFrame)
+ .WillOnce(Invoke([&](Timestamp post_time, int, const VideoFrame& frame) {
+ EXPECT_EQ(post_time, time_controller.GetClock()->CurrentTime());
+ EXPECT_EQ(frame.timestamp_us(), 0);
+ EXPECT_EQ(frame.ntp_time_ms(), 0);
+ }));
+ time_controller.AdvanceTime(TimeDelta::Seconds(1));
+ Mock::VerifyAndClearExpectations(&callback);
+ EXPECT_CALL(callback, OnFrame)
+ .WillOnce(Invoke([&](Timestamp post_time, int, const VideoFrame& frame) {
+ EXPECT_EQ(post_time, time_controller.GetClock()->CurrentTime());
+ EXPECT_EQ(frame.timestamp_us(), 0);
+ EXPECT_EQ(frame.ntp_time_ms(), 0);
+ }));
+ time_controller.AdvanceTime(TimeDelta::Seconds(1));
+}
+
+TEST(FrameCadenceAdapterTest, StopsRepeatingFramesDelayed) {
+ // At 1s, the initially scheduled frame appears.
+ // At 2s, the repeated initial frame appears.
+ // At 2.5s, we schedule another new frame.
+ // At 3.5s, we receive this frame.
+ ZeroHertzFieldTrialEnabler enabler;
+ MockCallback callback;
+ GlobalSimulatedTimeController time_controller(Timestamp::Zero());
+ auto adapter = CreateAdapter(enabler, time_controller.GetClock());
+ adapter->Initialize(&callback);
+ adapter->SetZeroHertzModeEnabled(
+ FrameCadenceAdapterInterface::ZeroHertzModeParams{});
+ adapter->OnConstraintsChanged(VideoTrackSourceConstraints{0, 1});
+ NtpTime original_ntp_time = time_controller.GetClock()->CurrentNtpTime();
+
+ // Send one frame, expect 1 subsequent repeat.
+ adapter->OnFrame(CreateFrameWithTimestamps(&time_controller));
+ EXPECT_CALL(callback, OnFrame).Times(2);
+ time_controller.AdvanceTime(TimeDelta::Seconds(2.5));
+ Mock::VerifyAndClearExpectations(&callback);
+
+ // Send the new frame at 2.5s, which should appear after 3.5s.
+ adapter->OnFrame(CreateFrameWithTimestamps(&time_controller));
+ EXPECT_CALL(callback, OnFrame)
+ .WillOnce(Invoke([&](Timestamp, int, const VideoFrame& frame) {
+ EXPECT_EQ(frame.timestamp_us(), 5 * rtc::kNumMicrosecsPerSec / 2);
+ EXPECT_EQ(frame.ntp_time_ms(),
+ original_ntp_time.ToMs() + 5u * rtc::kNumMillisecsPerSec / 2);
+ }));
+ time_controller.AdvanceTime(TimeDelta::Seconds(1));
+}
+
+TEST(FrameCadenceAdapterTest, RequestsRefreshFrameOnKeyFrameRequestWhenNew) {
+ ZeroHertzFieldTrialEnabler enabler;
+ MockCallback callback;
+ GlobalSimulatedTimeController time_controller(Timestamp::Zero());
+ auto adapter = CreateAdapter(enabler, time_controller.GetClock());
+ adapter->Initialize(&callback);
+ adapter->SetZeroHertzModeEnabled(
+ FrameCadenceAdapterInterface::ZeroHertzModeParams{});
+ constexpr int kMaxFps = 10;
+ adapter->OnConstraintsChanged(VideoTrackSourceConstraints{0, kMaxFps});
+ EXPECT_CALL(callback, RequestRefreshFrame);
+ time_controller.AdvanceTime(
+ TimeDelta::Seconds(1) *
+ FrameCadenceAdapterInterface::kOnDiscardedFrameRefreshFramePeriod /
+ kMaxFps);
+ adapter->ProcessKeyFrameRequest();
+}
+
+TEST(FrameCadenceAdapterTest, IgnoresKeyFrameRequestShortlyAfterFrame) {
+ ZeroHertzFieldTrialEnabler enabler;
+ MockCallback callback;
+ GlobalSimulatedTimeController time_controller(Timestamp::Zero());
+ auto adapter = CreateAdapter(enabler, time_controller.GetClock());
+ adapter->Initialize(&callback);
+ adapter->SetZeroHertzModeEnabled(
+ FrameCadenceAdapterInterface::ZeroHertzModeParams{});
+ adapter->OnConstraintsChanged(VideoTrackSourceConstraints{0, 10});
+ adapter->OnFrame(CreateFrame());
+ time_controller.AdvanceTime(TimeDelta::Zero());
+ EXPECT_CALL(callback, RequestRefreshFrame).Times(0);
+ adapter->ProcessKeyFrameRequest();
+}
+
+TEST(FrameCadenceAdapterTest, RequestsRefreshFramesUntilArrival) {
+ ZeroHertzFieldTrialEnabler enabler;
+ MockCallback callback;
+ GlobalSimulatedTimeController time_controller(Timestamp::Zero());
+ auto adapter = CreateAdapter(enabler, time_controller.GetClock());
+ adapter->Initialize(&callback);
+ adapter->SetZeroHertzModeEnabled(
+ FrameCadenceAdapterInterface::ZeroHertzModeParams{});
+ constexpr int kMaxFps = 10;
+ adapter->OnConstraintsChanged(VideoTrackSourceConstraints{0, kMaxFps});
+
+ // We should see max_fps + 1 -
+ // FrameCadenceAdapterInterface::kOnDiscardedFrameRefreshFramePeriod refresh
+ // frame requests during the one second we wait until we send a single frame,
+ // after which refresh frame requests should cease (we should see no such
+ // requests during a second).
+ EXPECT_CALL(callback, RequestRefreshFrame)
+ .Times(kMaxFps + 1 -
+ FrameCadenceAdapterInterface::kOnDiscardedFrameRefreshFramePeriod);
+ time_controller.AdvanceTime(TimeDelta::Seconds(1));
+ Mock::VerifyAndClearExpectations(&callback);
+ adapter->OnFrame(CreateFrame());
+ EXPECT_CALL(callback, RequestRefreshFrame).Times(0);
+ time_controller.AdvanceTime(TimeDelta::Seconds(1));
+}
+
+TEST(FrameCadenceAdapterTest, RequestsRefreshAfterFrameDrop) {
+ ZeroHertzFieldTrialEnabler enabler;
+ MockCallback callback;
+ GlobalSimulatedTimeController time_controller(Timestamp::Zero());
+ auto adapter = CreateAdapter(enabler, time_controller.GetClock());
+ adapter->Initialize(&callback);
+ adapter->SetZeroHertzModeEnabled(
+ FrameCadenceAdapterInterface::ZeroHertzModeParams{});
+ constexpr int kMaxFps = 10;
+ adapter->OnConstraintsChanged(VideoTrackSourceConstraints{0, kMaxFps});
+
+ EXPECT_CALL(callback, RequestRefreshFrame).Times(0);
+
+ // Send a frame through to cancel the initial delayed timer waiting for first
+ // frame entry.
+ adapter->OnFrame(CreateFrame());
+ time_controller.AdvanceTime(TimeDelta::Seconds(1));
+ Mock::VerifyAndClearExpectations(&callback);
+
+ // Send a dropped frame indication without any following frames received.
+ // After FrameCadenceAdapterInterface::kOnDiscardedFrameRefreshFramePeriod
+ // frame periods, we should receive a first refresh request.
+ adapter->OnDiscardedFrame();
+ EXPECT_CALL(callback, RequestRefreshFrame);
+ time_controller.AdvanceTime(
+ TimeDelta::Seconds(1) *
+ FrameCadenceAdapterInterface::kOnDiscardedFrameRefreshFramePeriod /
+ kMaxFps);
+ Mock::VerifyAndClearExpectations(&callback);
+
+ // We will now receive a refresh frame request for every frame period.
+ EXPECT_CALL(callback, RequestRefreshFrame).Times(kMaxFps);
+ time_controller.AdvanceTime(TimeDelta::Seconds(1));
+ Mock::VerifyAndClearExpectations(&callback);
+
+ // After a frame is passed the requests will cease.
+ EXPECT_CALL(callback, RequestRefreshFrame).Times(0);
+ adapter->OnFrame(CreateFrame());
+ time_controller.AdvanceTime(TimeDelta::Seconds(1));
+}
+
+TEST(FrameCadenceAdapterTest, OmitsRefreshAfterFrameDropWithTimelyFrameEntry) {
+ ZeroHertzFieldTrialEnabler enabler;
+ MockCallback callback;
+ GlobalSimulatedTimeController time_controller(Timestamp::Zero());
+ auto adapter = CreateAdapter(enabler, time_controller.GetClock());
+ adapter->Initialize(&callback);
+ adapter->SetZeroHertzModeEnabled(
+ FrameCadenceAdapterInterface::ZeroHertzModeParams{});
+ constexpr int kMaxFps = 10;
+ adapter->OnConstraintsChanged(VideoTrackSourceConstraints{0, kMaxFps});
+
+ // Send a frame through to cancel the initial delayed timer waiting for first
+ // frame entry.
+ EXPECT_CALL(callback, RequestRefreshFrame).Times(0);
+ adapter->OnFrame(CreateFrame());
+ time_controller.AdvanceTime(TimeDelta::Seconds(1));
+ Mock::VerifyAndClearExpectations(&callback);
+
+ // Send a frame drop indication. No refresh frames should be requested
+ // until FrameCadenceAdapterInterface::kOnDiscardedFrameRefreshFramePeriod
+ // intervals pass. Stop short of this.
+ EXPECT_CALL(callback, RequestRefreshFrame).Times(0);
+ adapter->OnDiscardedFrame();
+ time_controller.AdvanceTime(
+ TimeDelta::Seconds(1) *
+ FrameCadenceAdapterInterface::kOnDiscardedFrameRefreshFramePeriod /
+ kMaxFps -
+ TimeDelta::Micros(1));
+ Mock::VerifyAndClearExpectations(&callback);
+
+ // Send a frame. The timer to request the refresh frame should be cancelled by
+ // the reception, so no refreshes should be requested.
+ EXPECT_CALL(callback, RequestRefreshFrame).Times(0);
+ adapter->OnFrame(CreateFrame());
+ time_controller.AdvanceTime(TimeDelta::Seconds(1));
+ Mock::VerifyAndClearExpectations(&callback);
+}
+
+TEST(FrameCadenceAdapterTest, AcceptsUnconfiguredLayerFeedback) {
+ // This is a regression test for bugs.webrtc.org/14417.
+ ZeroHertzFieldTrialEnabler enabler;
+ MockCallback callback;
+ GlobalSimulatedTimeController time_controller(Timestamp::Zero());
+ auto adapter = CreateAdapter(enabler, time_controller.GetClock());
+ adapter->Initialize(&callback);
+ adapter->SetZeroHertzModeEnabled(
+ FrameCadenceAdapterInterface::ZeroHertzModeParams{.num_simulcast_layers =
+ 1});
+ constexpr int kMaxFps = 10;
+ adapter->OnConstraintsChanged(VideoTrackSourceConstraints{0, kMaxFps});
+ time_controller.AdvanceTime(TimeDelta::Zero());
+
+ adapter->UpdateLayerQualityConvergence(2, false);
+ adapter->UpdateLayerStatus(2, false);
+}
+
+class FrameCadenceAdapterSimulcastLayersParamTest
+ : public ::testing::TestWithParam<int> {
+ public:
+ static constexpr int kMaxFpsHz = 8;
+ static constexpr TimeDelta kMinFrameDelay =
+ TimeDelta::Millis(1000 / kMaxFpsHz);
+ static constexpr TimeDelta kIdleFrameDelay =
+ FrameCadenceAdapterInterface::kZeroHertzIdleRepeatRatePeriod;
+
+ FrameCadenceAdapterSimulcastLayersParamTest() {
+ adapter_->Initialize(&callback_);
+ adapter_->OnConstraintsChanged(VideoTrackSourceConstraints{0, kMaxFpsHz});
+ time_controller_.AdvanceTime(TimeDelta::Zero());
+ adapter_->SetZeroHertzModeEnabled(
+ FrameCadenceAdapterInterface::ZeroHertzModeParams{});
+ const size_t num_spatial_layers = GetParam();
+ adapter_->SetZeroHertzModeEnabled(
+ FrameCadenceAdapterInterface::ZeroHertzModeParams{num_spatial_layers});
+ }
+
+ int NumSpatialLayers() const { return GetParam(); }
+
+ protected:
+ ZeroHertzFieldTrialEnabler enabler_;
+ MockCallback callback_;
+ GlobalSimulatedTimeController time_controller_{Timestamp::Zero()};
+ const std::unique_ptr<FrameCadenceAdapterInterface> adapter_{
+ CreateAdapter(enabler_, time_controller_.GetClock())};
+};
+
+TEST_P(FrameCadenceAdapterSimulcastLayersParamTest,
+ LayerReconfigurationResetsConvergenceInfo) {
+ // Assumes layer reconfiguration has just happened.
+ // Verify the state is unconverged.
+ adapter_->OnFrame(CreateFrame());
+ EXPECT_CALL(callback_, OnFrame).Times(kMaxFpsHz);
+ time_controller_.AdvanceTime(kMaxFpsHz * kMinFrameDelay);
+}
+
+TEST_P(FrameCadenceAdapterSimulcastLayersParamTest,
+ IgnoresKeyFrameRequestWhileShortRepeating) {
+ // Plot:
+ // 1. 0 * kMinFrameDelay: Start unconverged. Frame -> adapter.
+ // 2. 1 * kMinFrameDelay: Frame -> callback.
+ // 3. 2 * kMinFrameDelay: 1st short repeat.
+ // Since we're unconverged we assume the process continues.
+ adapter_->OnFrame(CreateFrame());
+ time_controller_.AdvanceTime(2 * kMinFrameDelay);
+ EXPECT_CALL(callback_, RequestRefreshFrame).Times(0);
+ adapter_->ProcessKeyFrameRequest();
+
+ // Expect short repeating as ususal.
+ EXPECT_CALL(callback_, OnFrame).Times(8);
+ time_controller_.AdvanceTime(8 * kMinFrameDelay);
+}
+
+TEST_P(FrameCadenceAdapterSimulcastLayersParamTest,
+ IgnoresKeyFrameRequestJustBeforeIdleRepeating) {
+ // (Only for > 0 spatial layers as we assume not converged with 0 layers)
+ if (NumSpatialLayers() == 0)
+ return;
+
+ // Plot:
+ // 1. 0 * kMinFrameDelay: Start converged. Frame -> adapter.
+ // 2. 1 * kMinFrameDelay: Frame -> callback. New repeat scheduled at
+ // (kMaxFpsHz + 1) * kMinFrameDelay.
+ // 3. kMaxFpsHz * kMinFrameDelay: Process keyframe.
+ // 4. (kMaxFpsHz + N) * kMinFrameDelay (1 <= N <= kMaxFpsHz): Short repeats
+ // due to not converged.
+ for (int i = 0; i != NumSpatialLayers(); i++) {
+ adapter_->UpdateLayerStatus(i, /*enabled=*/true);
+ adapter_->UpdateLayerQualityConvergence(i, /*converged=*/true);
+ }
+ adapter_->OnFrame(CreateFrame());
+ time_controller_.AdvanceTime(kIdleFrameDelay);
+
+ // We process the key frame request kMinFrameDelay before the first idle
+ // repeat should happen. The resulting repeats should happen spaced by
+ // kMinFrameDelay before we get new convergence info.
+ EXPECT_CALL(callback_, RequestRefreshFrame).Times(0);
+ adapter_->ProcessKeyFrameRequest();
+ EXPECT_CALL(callback_, OnFrame).Times(kMaxFpsHz);
+ time_controller_.AdvanceTime(kMaxFpsHz * kMinFrameDelay);
+}
+
+TEST_P(FrameCadenceAdapterSimulcastLayersParamTest,
+ IgnoresKeyFrameRequestShortRepeatsBeforeIdleRepeat) {
+ // (Only for > 0 spatial layers as we assume not converged with 0 layers)
+ if (NumSpatialLayers() == 0)
+ return;
+ // Plot:
+ // 1. 0 * kMinFrameDelay: Start converged. Frame -> adapter.
+ // 2. 1 * kMinFrameDelay: Frame -> callback. New repeat scheduled at
+ // (kMaxFpsHz + 1) * kMinFrameDelay.
+ // 3. 2 * kMinFrameDelay: Process keyframe.
+ // 4. (2 + N) * kMinFrameDelay (1 <= N <= kMaxFpsHz): Short repeats due to not
+ // converged.
+ for (int i = 0; i != NumSpatialLayers(); i++) {
+ adapter_->UpdateLayerStatus(i, /*enabled=*/true);
+ adapter_->UpdateLayerQualityConvergence(i, /*converged=*/true);
+ }
+ adapter_->OnFrame(CreateFrame());
+ time_controller_.AdvanceTime(2 * kMinFrameDelay);
+
+ // We process the key frame request (kMaxFpsHz - 1) * kMinFrameDelay before
+ // the first idle repeat should happen. The resulting repeats should happen
+ // spaced kMinFrameDelay before we get new convergence info.
+ EXPECT_CALL(callback_, RequestRefreshFrame).Times(0);
+ adapter_->ProcessKeyFrameRequest();
+ EXPECT_CALL(callback_, OnFrame).Times(kMaxFpsHz);
+ time_controller_.AdvanceTime(kMaxFpsHz * kMinFrameDelay);
+}
+
+INSTANTIATE_TEST_SUITE_P(,
+ FrameCadenceAdapterSimulcastLayersParamTest,
+ Values(0, 1, 2));
+
+class ZeroHertzLayerQualityConvergenceTest : public ::testing::Test {
+ public:
+ static constexpr TimeDelta kMinFrameDelay = TimeDelta::Millis(100);
+ static constexpr TimeDelta kIdleFrameDelay =
+ FrameCadenceAdapterInterface::kZeroHertzIdleRepeatRatePeriod;
+
+ ZeroHertzLayerQualityConvergenceTest() {
+ adapter_->Initialize(&callback_);
+ adapter_->SetZeroHertzModeEnabled(
+ FrameCadenceAdapterInterface::ZeroHertzModeParams{
+ /*num_simulcast_layers=*/2});
+ adapter_->OnConstraintsChanged(VideoTrackSourceConstraints{
+ /*min_fps=*/0, /*max_fps=*/TimeDelta::Seconds(1) / kMinFrameDelay});
+ time_controller_.AdvanceTime(TimeDelta::Zero());
+ }
+
+ void PassFrame() { adapter_->OnFrame(CreateFrame()); }
+
+ void ExpectFrameEntriesAtDelaysFromNow(
+ std::initializer_list<TimeDelta> list) {
+ Timestamp origin = time_controller_.GetClock()->CurrentTime();
+ for (auto delay : list) {
+ EXPECT_CALL(callback_, OnFrame(origin + delay, _, _));
+ time_controller_.AdvanceTime(origin + delay -
+ time_controller_.GetClock()->CurrentTime());
+ }
+ }
+
+ void ScheduleDelayed(TimeDelta delay, absl::AnyInvocable<void() &&> task) {
+ TaskQueueBase::Current()->PostDelayedTask(std::move(task), delay);
+ }
+
+ protected:
+ ZeroHertzFieldTrialEnabler field_trial_enabler_;
+ MockCallback callback_;
+ GlobalSimulatedTimeController time_controller_{Timestamp::Zero()};
+ std::unique_ptr<FrameCadenceAdapterInterface> adapter_{
+ CreateAdapter(field_trial_enabler_, time_controller_.GetClock())};
+};
+
+TEST_F(ZeroHertzLayerQualityConvergenceTest, InitialStateUnconverged) {
+ // As the layer count is just configured, assume we start out as unconverged.
+ PassFrame();
+ ExpectFrameEntriesAtDelaysFromNow({
+ 1 * kMinFrameDelay, // Original frame emitted
+ 2 * kMinFrameDelay, // Short repeats.
+ 3 * kMinFrameDelay, // ...
+ });
+}
+
+TEST_F(ZeroHertzLayerQualityConvergenceTest, UnconvergedAfterLayersEnabled) {
+ // With newly enabled layers we assume quality is unconverged.
+ adapter_->UpdateLayerStatus(0, /*enabled=*/true);
+ adapter_->UpdateLayerStatus(1, /*enabled=*/true);
+ PassFrame();
+ ExpectFrameEntriesAtDelaysFromNow({
+ kMinFrameDelay, // Original frame emitted
+ 2 * kMinFrameDelay, // Unconverged repeats.
+ 3 * kMinFrameDelay, // ...
+ });
+}
+
+TEST_F(ZeroHertzLayerQualityConvergenceTest,
+ RepeatsPassedFramesUntilConvergence) {
+ ScheduleDelayed(TimeDelta::Zero(), [&] {
+ adapter_->UpdateLayerStatus(0, /*enabled=*/true);
+ adapter_->UpdateLayerStatus(1, /*enabled=*/true);
+ PassFrame();
+ });
+ ScheduleDelayed(2.5 * kMinFrameDelay, [&] {
+ adapter_->UpdateLayerQualityConvergence(/*spatial_index=*/1, true);
+ });
+ ScheduleDelayed(3.5 * kMinFrameDelay, [&] {
+ adapter_->UpdateLayerQualityConvergence(/*spatial_index=*/0, true);
+ });
+ ScheduleDelayed(8 * kMinFrameDelay, [&] { PassFrame(); });
+ ScheduleDelayed(9.5 * kMinFrameDelay, [&] {
+ adapter_->UpdateLayerQualityConvergence(/*spatial_index=*/0, true);
+ });
+ ScheduleDelayed(10.5 * kMinFrameDelay, [&] {
+ adapter_->UpdateLayerQualityConvergence(/*spatial_index=*/1, true);
+ });
+ ExpectFrameEntriesAtDelaysFromNow({
+ kMinFrameDelay, // Original frame emitted
+ 2 * kMinFrameDelay, // Repeat from kMinFrameDelay.
+
+ // 2.5 * kMinFrameDelay: Converged in layer 1, layer 0 still unconverged.
+ 3 * kMinFrameDelay, // Repeat from 2 * kMinFrameDelay.
+
+ // 3.5 * kMinFrameDelay: Converged in layer 0 as well.
+ 4 * kMinFrameDelay, // Repeat from 3 * kMinFrameDelay. An idle repeat is
+ // scheduled for kIdleFrameDelay + 3 *
+ // kMinFrameDelay.
+
+ // A new frame is passed at 8 * kMinFrameDelay.
+ 9 * kMinFrameDelay, // Original frame emitted
+
+ // 9.5 * kMinFrameDelay: Converged in layer 0, layer 1 still unconverged.
+ 10 * kMinFrameDelay, // Repeat from 9 * kMinFrameDelay.
+ // 10.5 * kMinFrameDelay: Converged in layer 0 as well.
+ 11 * kMinFrameDelay, // Idle repeats from 1000.
+ 11 * kMinFrameDelay + kIdleFrameDelay, // ...
+ 11 * kMinFrameDelay + 2 * kIdleFrameDelay, // ...
+ // ...
+ });
+}
+
+class FrameCadenceAdapterMetricsTest : public ::testing::Test {
+ public:
+ FrameCadenceAdapterMetricsTest() : time_controller_(Timestamp::Millis(1)) {
+ metrics::Reset();
+ }
+ void DepleteTaskQueues() { time_controller_.AdvanceTime(TimeDelta::Zero()); }
+
+ protected:
+ GlobalSimulatedTimeController time_controller_;
+};
+
+TEST_F(FrameCadenceAdapterMetricsTest, RecordsNoUmasWithNoFrameTransfer) {
+ MockCallback callback;
+ test::ScopedKeyValueConfig no_field_trials;
+ auto adapter = CreateAdapter(no_field_trials, nullptr);
+ adapter->Initialize(&callback);
+ adapter->OnConstraintsChanged(
+ VideoTrackSourceConstraints{absl::nullopt, absl::nullopt});
+ adapter->OnConstraintsChanged(VideoTrackSourceConstraints{absl::nullopt, 1});
+ adapter->OnConstraintsChanged(VideoTrackSourceConstraints{2, 3});
+ adapter->OnConstraintsChanged(VideoTrackSourceConstraints{4, 4});
+ adapter->OnConstraintsChanged(VideoTrackSourceConstraints{5, absl::nullopt});
+ DepleteTaskQueues();
+ EXPECT_TRUE(metrics::Samples("WebRTC.Screenshare.FrameRateConstraints.Exists")
+ .empty());
+ EXPECT_TRUE(
+ metrics::Samples("WebRTC.Screenshare.FrameRateConstraints.Min.Exists")
+ .empty());
+ EXPECT_TRUE(
+ metrics::Samples("WebRTC.Screenshare.FrameRateConstraints.Min.Value")
+ .empty());
+ EXPECT_TRUE(
+ metrics::Samples("WebRTC.Screenshare.FrameRateConstraints.Max.Exists")
+ .empty());
+ EXPECT_TRUE(
+ metrics::Samples("WebRTC.Screenshare.FrameRateConstraints.Max.Value")
+ .empty());
+ EXPECT_TRUE(
+ metrics::Samples("WebRTC.Screenshare.FrameRateConstraints.MinUnset.Max")
+ .empty());
+ EXPECT_TRUE(metrics::Samples(
+ "WebRTC.Screenshare.FrameRateConstraints.MinLessThanMax.Min")
+ .empty());
+ EXPECT_TRUE(metrics::Samples(
+ "WebRTC.Screenshare.FrameRateConstraints.MinLessThanMax.Max")
+ .empty());
+ EXPECT_TRUE(
+ metrics::Samples(
+ "WebRTC.Screenshare.FrameRateConstraints.60MinPlusMaxMinusOne")
+ .empty());
+}
+
+TEST_F(FrameCadenceAdapterMetricsTest, RecordsNoUmasWithoutEnabledContentType) {
+ MockCallback callback;
+ test::ScopedKeyValueConfig no_field_trials;
+ auto adapter = CreateAdapter(no_field_trials, time_controller_.GetClock());
+ adapter->Initialize(&callback);
+ adapter->OnFrame(CreateFrame());
+ adapter->OnConstraintsChanged(
+ VideoTrackSourceConstraints{absl::nullopt, absl::nullopt});
+ adapter->OnConstraintsChanged(VideoTrackSourceConstraints{absl::nullopt, 1});
+ adapter->OnConstraintsChanged(VideoTrackSourceConstraints{2, 3});
+ adapter->OnConstraintsChanged(VideoTrackSourceConstraints{4, 4});
+ adapter->OnConstraintsChanged(VideoTrackSourceConstraints{5, absl::nullopt});
+ DepleteTaskQueues();
+ EXPECT_TRUE(metrics::Samples("WebRTC.Screenshare.FrameRateConstraints.Exists")
+ .empty());
+ EXPECT_TRUE(
+ metrics::Samples("WebRTC.Screenshare.FrameRateConstraints.Min.Exists")
+ .empty());
+ EXPECT_TRUE(
+ metrics::Samples("WebRTC.Screenshare.FrameRateConstraints.Min.Value")
+ .empty());
+ EXPECT_TRUE(
+ metrics::Samples("WebRTC.Screenshare.FrameRateConstraints.Max.Exists")
+ .empty());
+ EXPECT_TRUE(
+ metrics::Samples("WebRTC.Screenshare.FrameRateConstraints.Max.Value")
+ .empty());
+ EXPECT_TRUE(
+ metrics::Samples("WebRTC.Screenshare.FrameRateConstraints.MinUnset.Max")
+ .empty());
+ EXPECT_TRUE(metrics::Samples(
+ "WebRTC.Screenshare.FrameRateConstraints.MinLessThanMax.Min")
+ .empty());
+ EXPECT_TRUE(metrics::Samples(
+ "WebRTC.Screenshare.FrameRateConstraints.MinLessThanMax.Max")
+ .empty());
+ EXPECT_TRUE(
+ metrics::Samples(
+ "WebRTC.Screenshare.FrameRateConstraints.60MinPlusMaxMinusOne")
+ .empty());
+}
+
+TEST_F(FrameCadenceAdapterMetricsTest, RecordsNoConstraintsIfUnsetOnFrame) {
+ MockCallback callback;
+ test::ScopedKeyValueConfig no_field_trials;
+ auto adapter = CreateAdapter(no_field_trials, time_controller_.GetClock());
+ adapter->Initialize(&callback);
+ adapter->SetZeroHertzModeEnabled(
+ FrameCadenceAdapterInterface::ZeroHertzModeParams{});
+ adapter->OnFrame(CreateFrame());
+ DepleteTaskQueues();
+ EXPECT_THAT(
+ metrics::Samples("WebRTC.Screenshare.FrameRateConstraints.Exists"),
+ ElementsAre(Pair(false, 1)));
+}
+
+TEST_F(FrameCadenceAdapterMetricsTest, RecordsEmptyConstraintsIfSetOnFrame) {
+ MockCallback callback;
+ test::ScopedKeyValueConfig no_field_trials;
+ auto adapter = CreateAdapter(no_field_trials, time_controller_.GetClock());
+ adapter->Initialize(&callback);
+ adapter->SetZeroHertzModeEnabled(
+ FrameCadenceAdapterInterface::ZeroHertzModeParams{});
+ adapter->OnConstraintsChanged(
+ VideoTrackSourceConstraints{absl::nullopt, absl::nullopt});
+ adapter->OnFrame(CreateFrame());
+ DepleteTaskQueues();
+ EXPECT_THAT(
+ metrics::Samples("WebRTC.Screenshare.FrameRateConstraints.Exists"),
+ ElementsAre(Pair(true, 1)));
+ EXPECT_THAT(
+ metrics::Samples("WebRTC.Screenshare.FrameRateConstraints.Min.Exists"),
+ ElementsAre(Pair(false, 1)));
+ EXPECT_TRUE(
+ metrics::Samples("WebRTC.Screenshare.FrameRateConstraints.Min.Value")
+ .empty());
+ EXPECT_THAT(
+ metrics::Samples("WebRTC.Screenshare.FrameRateConstraints.Max.Exists"),
+ ElementsAre(Pair(false, 1)));
+ EXPECT_TRUE(
+ metrics::Samples("WebRTC.Screenshare.FrameRateConstraints.Max.Value")
+ .empty());
+ EXPECT_TRUE(
+ metrics::Samples("WebRTC.Screenshare.FrameRateConstraints.MinUnset.Max")
+ .empty());
+ EXPECT_TRUE(metrics::Samples(
+ "WebRTC.Screenshare.FrameRateConstraints.MinLessThanMax.Min")
+ .empty());
+ EXPECT_TRUE(metrics::Samples(
+ "WebRTC.Screenshare.FrameRateConstraints.MinLessThanMax.Max")
+ .empty());
+ EXPECT_TRUE(
+ metrics::Samples(
+ "WebRTC.Screenshare.FrameRateConstraints.60MinPlusMaxMinusOne")
+ .empty());
+}
+
+TEST_F(FrameCadenceAdapterMetricsTest, RecordsMaxConstraintIfSetOnFrame) {
+ MockCallback callback;
+ test::ScopedKeyValueConfig no_field_trials;
+ auto adapter = CreateAdapter(no_field_trials, time_controller_.GetClock());
+ adapter->Initialize(&callback);
+ adapter->SetZeroHertzModeEnabled(
+ FrameCadenceAdapterInterface::ZeroHertzModeParams{});
+ adapter->OnConstraintsChanged(
+ VideoTrackSourceConstraints{absl::nullopt, 2.0});
+ adapter->OnFrame(CreateFrame());
+ DepleteTaskQueues();
+ EXPECT_THAT(
+ metrics::Samples("WebRTC.Screenshare.FrameRateConstraints.Min.Exists"),
+ ElementsAre(Pair(false, 1)));
+ EXPECT_TRUE(
+ metrics::Samples("WebRTC.Screenshare.FrameRateConstraints.Min.Value")
+ .empty());
+ EXPECT_THAT(
+ metrics::Samples("WebRTC.Screenshare.FrameRateConstraints.Max.Exists"),
+ ElementsAre(Pair(true, 1)));
+ EXPECT_THAT(
+ metrics::Samples("WebRTC.Screenshare.FrameRateConstraints.Max.Value"),
+ ElementsAre(Pair(2.0, 1)));
+ EXPECT_THAT(
+ metrics::Samples("WebRTC.Screenshare.FrameRateConstraints.MinUnset.Max"),
+ ElementsAre(Pair(2.0, 1)));
+ EXPECT_TRUE(metrics::Samples(
+ "WebRTC.Screenshare.FrameRateConstraints.MinLessThanMax.Min")
+ .empty());
+ EXPECT_TRUE(metrics::Samples(
+ "WebRTC.Screenshare.FrameRateConstraints.MinLessThanMax.Max")
+ .empty());
+ EXPECT_TRUE(
+ metrics::Samples(
+ "WebRTC.Screenshare.FrameRateConstraints.60MinPlusMaxMinusOne")
+ .empty());
+}
+
+TEST_F(FrameCadenceAdapterMetricsTest, RecordsMinConstraintIfSetOnFrame) {
+ MockCallback callback;
+ test::ScopedKeyValueConfig no_field_trials;
+ auto adapter = CreateAdapter(no_field_trials, time_controller_.GetClock());
+ adapter->Initialize(&callback);
+ adapter->SetZeroHertzModeEnabled(
+ FrameCadenceAdapterInterface::ZeroHertzModeParams{});
+ adapter->OnConstraintsChanged(
+ VideoTrackSourceConstraints{3.0, absl::nullopt});
+ adapter->OnFrame(CreateFrame());
+ DepleteTaskQueues();
+ EXPECT_THAT(
+ metrics::Samples("WebRTC.Screenshare.FrameRateConstraints.Min.Exists"),
+ ElementsAre(Pair(true, 1)));
+ EXPECT_THAT(
+ metrics::Samples("WebRTC.Screenshare.FrameRateConstraints.Min.Value"),
+ ElementsAre(Pair(3.0, 1)));
+ EXPECT_THAT(
+ metrics::Samples("WebRTC.Screenshare.FrameRateConstraints.Max.Exists"),
+ ElementsAre(Pair(false, 1)));
+ EXPECT_TRUE(
+ metrics::Samples("WebRTC.Screenshare.FrameRateConstraints.Max.Value")
+ .empty());
+ EXPECT_TRUE(
+ metrics::Samples("WebRTC.Screenshare.FrameRateConstraints.MinUnset.Max")
+ .empty());
+ EXPECT_TRUE(metrics::Samples(
+ "WebRTC.Screenshare.FrameRateConstraints.MinLessThanMax.Min")
+ .empty());
+ EXPECT_TRUE(metrics::Samples(
+ "WebRTC.Screenshare.FrameRateConstraints.MinLessThanMax.Max")
+ .empty());
+ EXPECT_TRUE(
+ metrics::Samples(
+ "WebRTC.Screenshare.FrameRateConstraints.60MinPlusMaxMinusOne")
+ .empty());
+}
+
+TEST_F(FrameCadenceAdapterMetricsTest, RecordsMinGtMaxConstraintIfSetOnFrame) {
+ MockCallback callback;
+ test::ScopedKeyValueConfig no_field_trials;
+ auto adapter = CreateAdapter(no_field_trials, time_controller_.GetClock());
+ adapter->Initialize(&callback);
+ adapter->SetZeroHertzModeEnabled(
+ FrameCadenceAdapterInterface::ZeroHertzModeParams{});
+ adapter->OnConstraintsChanged(VideoTrackSourceConstraints{5.0, 4.0});
+ adapter->OnFrame(CreateFrame());
+ DepleteTaskQueues();
+ EXPECT_THAT(
+ metrics::Samples("WebRTC.Screenshare.FrameRateConstraints.Min.Exists"),
+ ElementsAre(Pair(true, 1)));
+ EXPECT_THAT(
+ metrics::Samples("WebRTC.Screenshare.FrameRateConstraints.Min.Value"),
+ ElementsAre(Pair(5.0, 1)));
+ EXPECT_THAT(
+ metrics::Samples("WebRTC.Screenshare.FrameRateConstraints.Max.Exists"),
+ ElementsAre(Pair(true, 1)));
+ EXPECT_THAT(
+ metrics::Samples("WebRTC.Screenshare.FrameRateConstraints.Max.Value"),
+ ElementsAre(Pair(4.0, 1)));
+ EXPECT_TRUE(
+ metrics::Samples("WebRTC.Screenshare.FrameRateConstraints.MinUnset.Max")
+ .empty());
+ EXPECT_TRUE(metrics::Samples(
+ "WebRTC.Screenshare.FrameRateConstraints.MinLessThanMax.Min")
+ .empty());
+ EXPECT_TRUE(metrics::Samples(
+ "WebRTC.Screenshare.FrameRateConstraints.MinLessThanMax.Max")
+ .empty());
+ EXPECT_THAT(
+ metrics::Samples(
+ "WebRTC.Screenshare.FrameRateConstraints.60MinPlusMaxMinusOne"),
+ ElementsAre(Pair(60 * 5.0 + 4.0 - 1, 1)));
+}
+
+TEST_F(FrameCadenceAdapterMetricsTest, RecordsMinLtMaxConstraintIfSetOnFrame) {
+ MockCallback callback;
+ test::ScopedKeyValueConfig no_field_trials;
+ auto adapter = CreateAdapter(no_field_trials, time_controller_.GetClock());
+ adapter->Initialize(&callback);
+ adapter->SetZeroHertzModeEnabled(
+ FrameCadenceAdapterInterface::ZeroHertzModeParams{});
+ adapter->OnConstraintsChanged(VideoTrackSourceConstraints{4.0, 5.0});
+ adapter->OnFrame(CreateFrame());
+ DepleteTaskQueues();
+ EXPECT_THAT(metrics::Samples(
+ "WebRTC.Screenshare.FrameRateConstraints.MinLessThanMax.Min"),
+ ElementsAre(Pair(4.0, 1)));
+ EXPECT_THAT(metrics::Samples(
+ "WebRTC.Screenshare.FrameRateConstraints.MinLessThanMax.Max"),
+ ElementsAre(Pair(5.0, 1)));
+ EXPECT_THAT(
+ metrics::Samples(
+ "WebRTC.Screenshare.FrameRateConstraints.60MinPlusMaxMinusOne"),
+ ElementsAre(Pair(60 * 4.0 + 5.0 - 1, 1)));
+}
+
+TEST_F(FrameCadenceAdapterMetricsTest, RecordsTimeUntilFirstFrame) {
+ MockCallback callback;
+ test::ScopedKeyValueConfig no_field_trials;
+ auto adapter = CreateAdapter(no_field_trials, time_controller_.GetClock());
+ adapter->Initialize(&callback);
+ adapter->SetZeroHertzModeEnabled(
+ FrameCadenceAdapterInterface::ZeroHertzModeParams{});
+ adapter->OnConstraintsChanged(VideoTrackSourceConstraints{0, 5.0});
+ time_controller_.AdvanceTime(TimeDelta::Millis(666));
+ adapter->OnFrame(CreateFrame());
+ DepleteTaskQueues();
+ EXPECT_THAT(
+ metrics::Samples("WebRTC.Screenshare.ZeroHz.TimeUntilFirstFrameMs"),
+ ElementsAre(Pair(666, 1)));
+}
+
+TEST(FrameCadenceAdapterRealTimeTest, TimestampsDoNotDrift) {
+ // This regression test must be performed in realtime because of limitations
+ // in GlobalSimulatedTimeController.
+ //
+ // We sleep for a long while in OnFrame when a repeat was scheduled which
+ // should reflect in accordingly increased ntp_time_ms() and timestamp_us() in
+ // the repeated frames.
+ auto factory = CreateDefaultTaskQueueFactory();
+ auto queue =
+ factory->CreateTaskQueue("test", TaskQueueFactory::Priority::NORMAL);
+ ZeroHertzFieldTrialEnabler enabler;
+ MockCallback callback;
+ Clock* clock = Clock::GetRealTimeClock();
+ std::unique_ptr<FrameCadenceAdapterInterface> adapter;
+ int frame_counter = 0;
+ int64_t original_ntp_time_ms;
+ int64_t original_timestamp_us;
+ rtc::Event event;
+ queue->PostTask([&] {
+ adapter = CreateAdapter(enabler, clock);
+ adapter->Initialize(&callback);
+ adapter->SetZeroHertzModeEnabled(
+ FrameCadenceAdapterInterface::ZeroHertzModeParams{});
+ adapter->OnConstraintsChanged(VideoTrackSourceConstraints{0, 30});
+ auto frame = CreateFrame();
+ original_ntp_time_ms = clock->CurrentNtpInMilliseconds();
+ frame.set_ntp_time_ms(original_ntp_time_ms);
+ original_timestamp_us = clock->CurrentTime().us();
+ frame.set_timestamp_us(original_timestamp_us);
+ constexpr int kSleepMs = rtc::kNumMillisecsPerSec / 2;
+ EXPECT_CALL(callback, OnFrame)
+ .WillRepeatedly(
+ Invoke([&](Timestamp, int, const VideoFrame& incoming_frame) {
+ ++frame_counter;
+ // Avoid the first OnFrame and sleep on the second.
+ if (frame_counter == 2) {
+ SleepMs(kSleepMs);
+ } else if (frame_counter == 3) {
+ EXPECT_GE(incoming_frame.ntp_time_ms(),
+ original_ntp_time_ms + kSleepMs);
+ EXPECT_GE(incoming_frame.timestamp_us(),
+ original_timestamp_us + kSleepMs);
+ event.Set();
+ }
+ }));
+ adapter->OnFrame(frame);
+ });
+ event.Wait(rtc::Event::kForever);
+ rtc::Event finalized;
+ queue->PostTask([&] {
+ adapter = nullptr;
+ finalized.Set();
+ });
+ finalized.Wait(rtc::Event::kForever);
+}
+
+} // namespace
+} // namespace webrtc
diff --git a/third_party/libwebrtc/video/frame_decode_scheduler.h b/third_party/libwebrtc/video/frame_decode_scheduler.h
new file mode 100644
index 0000000000..29e27c22c8
--- /dev/null
+++ b/third_party/libwebrtc/video/frame_decode_scheduler.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VIDEO_FRAME_DECODE_SCHEDULER_H_
+#define VIDEO_FRAME_DECODE_SCHEDULER_H_
+
+#include <stdint.h>
+
+#include "absl/functional/any_invocable.h"
+#include "absl/types/optional.h"
+#include "api/units/timestamp.h"
+#include "video/frame_decode_timing.h"
+
+namespace webrtc {
+
+class FrameDecodeScheduler {
+ public:
+ // Invoked when a frame with `rtp_timestamp` is ready for decoding.
+ using FrameReleaseCallback =
+ absl::AnyInvocable<void(uint32_t rtp_timestamp,
+ Timestamp render_time) &&>;
+
+ virtual ~FrameDecodeScheduler() = default;
+
+ // Returns the rtp timestamp of the next frame scheduled for release, or
+ // `nullopt` if no frame is currently scheduled.
+ virtual absl::optional<uint32_t> ScheduledRtpTimestamp() = 0;
+
+ // Schedules a frame for release based on `schedule`. When released,
+ // `callback` will be invoked with the `rtp` timestamp of the frame and the
+ // `render_time`
+ virtual void ScheduleFrame(uint32_t rtp,
+ FrameDecodeTiming::FrameSchedule schedule,
+ FrameReleaseCallback callback) = 0;
+
+ // Cancels all scheduled frames.
+ virtual void CancelOutstanding() = 0;
+
+ // Stop() Must be called before destruction.
+ virtual void Stop() = 0;
+};
+
+} // namespace webrtc
+
+#endif // VIDEO_FRAME_DECODE_SCHEDULER_H_
diff --git a/third_party/libwebrtc/video/frame_decode_scheduler_gn/moz.build b/third_party/libwebrtc/video/frame_decode_scheduler_gn/moz.build
new file mode 100644
index 0000000000..834d9880e7
--- /dev/null
+++ b/third_party/libwebrtc/video/frame_decode_scheduler_gn/moz.build
@@ -0,0 +1,216 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+DEFINES["WEBRTC_STRICT_FIELD_TRIALS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "!/third_party/libwebrtc/gen",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "crypt32",
+ "iphlpapi",
+ "secur32",
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "mips32":
+
+ DEFINES["MIPS32_LE"] = True
+ DEFINES["MIPS_FPU_LE"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "mips64":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("frame_decode_scheduler_gn")
diff --git a/third_party/libwebrtc/video/frame_decode_timing.cc b/third_party/libwebrtc/video/frame_decode_timing.cc
new file mode 100644
index 0000000000..58ecd41c9e
--- /dev/null
+++ b/third_party/libwebrtc/video/frame_decode_timing.cc
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video/frame_decode_timing.h"
+
+#include <algorithm>
+
+#include "absl/types/optional.h"
+#include "api/units/time_delta.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+
+FrameDecodeTiming::FrameDecodeTiming(Clock* clock,
+ webrtc::VCMTiming const* timing)
+ : clock_(clock), timing_(timing) {
+ RTC_DCHECK(clock_);
+ RTC_DCHECK(timing_);
+}
+
+absl::optional<FrameDecodeTiming::FrameSchedule>
+FrameDecodeTiming::OnFrameBufferUpdated(uint32_t next_temporal_unit_rtp,
+ uint32_t last_temporal_unit_rtp,
+ TimeDelta max_wait_for_frame,
+ bool too_many_frames_queued) {
+ RTC_DCHECK_GE(max_wait_for_frame, TimeDelta::Zero());
+ const Timestamp now = clock_->CurrentTime();
+ Timestamp render_time = timing_->RenderTime(next_temporal_unit_rtp, now);
+ TimeDelta max_wait =
+ timing_->MaxWaitingTime(render_time, now, too_many_frames_queued);
+
+ // If the delay is not too far in the past, or this is the last decodable
+ // frame then it is the best frame to be decoded. Otherwise, fast-forward
+ // to the next frame in the buffer.
+ if (max_wait <= -kMaxAllowedFrameDelay &&
+ next_temporal_unit_rtp != last_temporal_unit_rtp) {
+ RTC_DLOG(LS_VERBOSE) << "Fast-forwarded frame " << next_temporal_unit_rtp
+ << " render time " << render_time << " with delay "
+ << max_wait;
+ return absl::nullopt;
+ }
+
+ max_wait.Clamp(TimeDelta::Zero(), max_wait_for_frame);
+ RTC_DLOG(LS_VERBOSE) << "Selected frame with rtp " << next_temporal_unit_rtp
+ << " render time " << render_time
+ << " with a max wait of " << max_wait_for_frame
+ << " clamped to " << max_wait;
+ Timestamp latest_decode_time = now + max_wait;
+ return FrameSchedule{.latest_decode_time = latest_decode_time,
+ .render_time = render_time};
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/video/frame_decode_timing.h b/third_party/libwebrtc/video/frame_decode_timing.h
new file mode 100644
index 0000000000..6bde4702ad
--- /dev/null
+++ b/third_party/libwebrtc/video/frame_decode_timing.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VIDEO_FRAME_DECODE_TIMING_H_
+#define VIDEO_FRAME_DECODE_TIMING_H_
+
+#include <stdint.h>
+
+#include <functional>
+
+#include "api/task_queue/pending_task_safety_flag.h"
+#include "api/task_queue/task_queue_base.h"
+#include "modules/video_coding/timing/timing.h"
+#include "system_wrappers/include/clock.h"
+
+namespace webrtc {
+
+class FrameDecodeTiming {
+ public:
+ FrameDecodeTiming(Clock* clock, webrtc::VCMTiming const* timing);
+ ~FrameDecodeTiming() = default;
+ FrameDecodeTiming(const FrameDecodeTiming&) = delete;
+ FrameDecodeTiming& operator=(const FrameDecodeTiming&) = delete;
+
+ // Any frame that has decode delay more than this in the past can be
+ // fast-forwarded.
+ static constexpr TimeDelta kMaxAllowedFrameDelay = TimeDelta::Millis(5);
+
+ struct FrameSchedule {
+ Timestamp latest_decode_time;
+ Timestamp render_time;
+ };
+
+ absl::optional<FrameSchedule> OnFrameBufferUpdated(
+ uint32_t next_temporal_unit_rtp,
+ uint32_t last_temporal_unit_rtp,
+ TimeDelta max_wait_for_frame,
+ bool too_many_frames_queued);
+
+ private:
+ Clock* const clock_;
+ webrtc::VCMTiming const* const timing_;
+};
+
+} // namespace webrtc
+
+#endif // VIDEO_FRAME_DECODE_TIMING_H_
diff --git a/third_party/libwebrtc/video/frame_decode_timing_gn/moz.build b/third_party/libwebrtc/video/frame_decode_timing_gn/moz.build
new file mode 100644
index 0000000000..570b927d3a
--- /dev/null
+++ b/third_party/libwebrtc/video/frame_decode_timing_gn/moz.build
@@ -0,0 +1,232 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+DEFINES["WEBRTC_STRICT_FIELD_TRIALS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "!/third_party/libwebrtc/gen",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/video/frame_decode_timing.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "crypt32",
+ "iphlpapi",
+ "secur32",
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "mips32":
+
+ DEFINES["MIPS32_LE"] = True
+ DEFINES["MIPS_FPU_LE"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "mips64":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("frame_decode_timing_gn")
diff --git a/third_party/libwebrtc/video/frame_decode_timing_unittest.cc b/third_party/libwebrtc/video/frame_decode_timing_unittest.cc
new file mode 100644
index 0000000000..83ea91692c
--- /dev/null
+++ b/third_party/libwebrtc/video/frame_decode_timing_unittest.cc
@@ -0,0 +1,147 @@
+/*
+ * Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video/frame_decode_timing.h"
+
+#include <stdint.h>
+
+#include "absl/types/optional.h"
+#include "api/units/time_delta.h"
+#include "modules/video_coding/timing/timing.h"
+#include "rtc_base/containers/flat_map.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+#include "test/scoped_key_value_config.h"
+#include "video/video_receive_stream2.h"
+
+namespace webrtc {
+
+using ::testing::AllOf;
+using ::testing::Eq;
+using ::testing::Field;
+using ::testing::Optional;
+
+namespace {
+
+class FakeVCMTiming : public webrtc::VCMTiming {
+ public:
+ explicit FakeVCMTiming(Clock* clock, const FieldTrialsView& field_trials)
+ : webrtc::VCMTiming(clock, field_trials) {}
+
+ Timestamp RenderTime(uint32_t frame_timestamp, Timestamp now) const override {
+ RTC_DCHECK(render_time_map_.contains(frame_timestamp));
+ auto it = render_time_map_.find(frame_timestamp);
+ return it->second;
+ }
+
+ TimeDelta MaxWaitingTime(Timestamp render_time,
+ Timestamp now,
+ bool too_many_frames_queued) const override {
+ RTC_DCHECK(wait_time_map_.contains(render_time));
+ auto it = wait_time_map_.find(render_time);
+ return it->second;
+ }
+
+ void SetTimes(uint32_t frame_timestamp,
+ Timestamp render_time,
+ TimeDelta max_decode_wait) {
+ render_time_map_.insert_or_assign(frame_timestamp, render_time);
+ wait_time_map_.insert_or_assign(render_time, max_decode_wait);
+ }
+
+ protected:
+ flat_map<uint32_t, Timestamp> render_time_map_;
+ flat_map<Timestamp, TimeDelta> wait_time_map_;
+};
+} // namespace
+
+class FrameDecodeTimingTest : public ::testing::Test {
+ public:
+ FrameDecodeTimingTest()
+ : clock_(Timestamp::Millis(1000)),
+ timing_(&clock_, field_trials_),
+ frame_decode_scheduler_(&clock_, &timing_) {}
+
+ protected:
+ test::ScopedKeyValueConfig field_trials_;
+ SimulatedClock clock_;
+ FakeVCMTiming timing_;
+ FrameDecodeTiming frame_decode_scheduler_;
+};
+
+TEST_F(FrameDecodeTimingTest, ReturnsWaitTimesWhenValid) {
+ const TimeDelta decode_delay = TimeDelta::Millis(42);
+ const Timestamp render_time = clock_.CurrentTime() + TimeDelta::Millis(60);
+ timing_.SetTimes(90000, render_time, decode_delay);
+
+ EXPECT_THAT(frame_decode_scheduler_.OnFrameBufferUpdated(
+ 90000, 180000, kMaxWaitForFrame, false),
+ Optional(AllOf(
+ Field(&FrameDecodeTiming::FrameSchedule::latest_decode_time,
+ Eq(clock_.CurrentTime() + decode_delay)),
+ Field(&FrameDecodeTiming::FrameSchedule::render_time,
+ Eq(render_time)))));
+}
+
+TEST_F(FrameDecodeTimingTest, FastForwardsFrameTooFarInThePast) {
+ const TimeDelta decode_delay =
+ -FrameDecodeTiming::kMaxAllowedFrameDelay - TimeDelta::Millis(1);
+ const Timestamp render_time = clock_.CurrentTime();
+ timing_.SetTimes(90000, render_time, decode_delay);
+
+ EXPECT_THAT(frame_decode_scheduler_.OnFrameBufferUpdated(
+ 90000, 180000, kMaxWaitForFrame, false),
+ Eq(absl::nullopt));
+}
+
+TEST_F(FrameDecodeTimingTest, NoFastForwardIfOnlyFrameToDecode) {
+ const TimeDelta decode_delay =
+ -FrameDecodeTiming::kMaxAllowedFrameDelay - TimeDelta::Millis(1);
+ const Timestamp render_time = clock_.CurrentTime();
+ timing_.SetTimes(90000, render_time, decode_delay);
+
+ // Negative `decode_delay` means that `latest_decode_time` is now.
+ EXPECT_THAT(frame_decode_scheduler_.OnFrameBufferUpdated(
+ 90000, 90000, kMaxWaitForFrame, false),
+ Optional(AllOf(
+ Field(&FrameDecodeTiming::FrameSchedule::latest_decode_time,
+ Eq(clock_.CurrentTime())),
+ Field(&FrameDecodeTiming::FrameSchedule::render_time,
+ Eq(render_time)))));
+}
+
+TEST_F(FrameDecodeTimingTest, MaxWaitCapped) {
+ TimeDelta frame_delay = TimeDelta::Millis(30);
+ const TimeDelta decode_delay = TimeDelta::Seconds(3);
+ const Timestamp render_time = clock_.CurrentTime() + TimeDelta::Seconds(3);
+ timing_.SetTimes(90000, render_time, decode_delay);
+ timing_.SetTimes(180000, render_time + frame_delay,
+ decode_delay + frame_delay);
+
+ EXPECT_THAT(frame_decode_scheduler_.OnFrameBufferUpdated(
+ 90000, 270000, kMaxWaitForFrame, false),
+ Optional(AllOf(
+ Field(&FrameDecodeTiming::FrameSchedule::latest_decode_time,
+ Eq(clock_.CurrentTime() + kMaxWaitForFrame)),
+ Field(&FrameDecodeTiming::FrameSchedule::render_time,
+ Eq(render_time)))));
+
+ // Test cap keyframe.
+ clock_.AdvanceTime(frame_delay);
+ EXPECT_THAT(frame_decode_scheduler_.OnFrameBufferUpdated(
+ 180000, 270000, kMaxWaitForKeyFrame, false),
+ Optional(AllOf(
+ Field(&FrameDecodeTiming::FrameSchedule::latest_decode_time,
+ Eq(clock_.CurrentTime() + kMaxWaitForKeyFrame)),
+ Field(&FrameDecodeTiming::FrameSchedule::render_time,
+ Eq(render_time + frame_delay)))));
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/video/frame_dumping_decoder.cc b/third_party/libwebrtc/video/frame_dumping_decoder.cc
new file mode 100644
index 0000000000..9592565893
--- /dev/null
+++ b/third_party/libwebrtc/video/frame_dumping_decoder.cc
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video/frame_dumping_decoder.h"
+
+#include <memory>
+#include <utility>
+
+#include "modules/video_coding/include/video_codec_interface.h"
+#include "modules/video_coding/utility/ivf_file_writer.h"
+
+namespace webrtc {
+namespace {
+
+class FrameDumpingDecoder : public VideoDecoder {
+ public:
+ FrameDumpingDecoder(std::unique_ptr<VideoDecoder> decoder, FileWrapper file);
+ ~FrameDumpingDecoder() override;
+
+ bool Configure(const Settings& settings) override;
+ int32_t Decode(const EncodedImage& input_image,
+ bool missing_frames,
+ int64_t render_time_ms) override;
+ int32_t RegisterDecodeCompleteCallback(
+ DecodedImageCallback* callback) override;
+ int32_t Release() override;
+ DecoderInfo GetDecoderInfo() const override;
+ const char* ImplementationName() const override;
+
+ private:
+ std::unique_ptr<VideoDecoder> decoder_;
+ VideoCodecType codec_type_ = VideoCodecType::kVideoCodecGeneric;
+ std::unique_ptr<IvfFileWriter> writer_;
+};
+
+FrameDumpingDecoder::FrameDumpingDecoder(std::unique_ptr<VideoDecoder> decoder,
+ FileWrapper file)
+ : decoder_(std::move(decoder)),
+ writer_(IvfFileWriter::Wrap(std::move(file),
+ /* byte_limit= */ 100000000)) {}
+
+FrameDumpingDecoder::~FrameDumpingDecoder() = default;
+
+bool FrameDumpingDecoder::Configure(const Settings& settings) {
+ codec_type_ = settings.codec_type();
+ return decoder_->Configure(settings);
+}
+
+int32_t FrameDumpingDecoder::Decode(const EncodedImage& input_image,
+ bool missing_frames,
+ int64_t render_time_ms) {
+ int32_t ret = decoder_->Decode(input_image, missing_frames, render_time_ms);
+ writer_->WriteFrame(input_image, codec_type_);
+
+ return ret;
+}
+
+int32_t FrameDumpingDecoder::RegisterDecodeCompleteCallback(
+ DecodedImageCallback* callback) {
+ return decoder_->RegisterDecodeCompleteCallback(callback);
+}
+
+int32_t FrameDumpingDecoder::Release() {
+ return decoder_->Release();
+}
+
+VideoDecoder::DecoderInfo FrameDumpingDecoder::GetDecoderInfo() const {
+ return decoder_->GetDecoderInfo();
+}
+
+const char* FrameDumpingDecoder::ImplementationName() const {
+ return decoder_->ImplementationName();
+}
+
+} // namespace
+
+std::unique_ptr<VideoDecoder> CreateFrameDumpingDecoderWrapper(
+ std::unique_ptr<VideoDecoder> decoder,
+ FileWrapper file) {
+ return std::make_unique<FrameDumpingDecoder>(std::move(decoder),
+ std::move(file));
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/video/frame_dumping_decoder.h b/third_party/libwebrtc/video/frame_dumping_decoder.h
new file mode 100644
index 0000000000..3a97c8bc61
--- /dev/null
+++ b/third_party/libwebrtc/video/frame_dumping_decoder.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VIDEO_FRAME_DUMPING_DECODER_H_
+#define VIDEO_FRAME_DUMPING_DECODER_H_
+
+#include <memory>
+
+#include "api/video_codecs/video_decoder.h"
+#include "rtc_base/system/file_wrapper.h"
+
+namespace webrtc {
+
+// Creates a decoder wrapper that writes the encoded frames to an IVF file.
+std::unique_ptr<VideoDecoder> CreateFrameDumpingDecoderWrapper(
+ std::unique_ptr<VideoDecoder> decoder,
+ FileWrapper file);
+
+} // namespace webrtc
+
+#endif // VIDEO_FRAME_DUMPING_DECODER_H_
diff --git a/third_party/libwebrtc/video/frame_dumping_decoder_gn/moz.build b/third_party/libwebrtc/video/frame_dumping_decoder_gn/moz.build
new file mode 100644
index 0000000000..3266e8f07f
--- /dev/null
+++ b/third_party/libwebrtc/video/frame_dumping_decoder_gn/moz.build
@@ -0,0 +1,233 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+DEFINES["WEBRTC_STRICT_FIELD_TRIALS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "!/third_party/libwebrtc/gen",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/video/frame_dumping_decoder.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "GLESv2",
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "crypt32",
+ "iphlpapi",
+ "secur32",
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "mips32":
+
+ DEFINES["MIPS32_LE"] = True
+ DEFINES["MIPS_FPU_LE"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "mips64":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("frame_dumping_decoder_gn")
diff --git a/third_party/libwebrtc/video/frame_encode_metadata_writer.cc b/third_party/libwebrtc/video/frame_encode_metadata_writer.cc
new file mode 100644
index 0000000000..d6095a090b
--- /dev/null
+++ b/third_party/libwebrtc/video/frame_encode_metadata_writer.cc
@@ -0,0 +1,278 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video/frame_encode_metadata_writer.h"
+
+#include <algorithm>
+#include <memory>
+#include <utility>
+
+#include "common_video/h264/sps_vui_rewriter.h"
+#include "modules/include/module_common_types_public.h"
+#include "modules/video_coding/include/video_coding_defines.h"
+#include "modules/video_coding/svc/create_scalability_structure.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/time_utils.h"
+
+namespace webrtc {
+namespace {
+const int kMessagesThrottlingThreshold = 2;
+const int kThrottleRatio = 100000;
+
+class EncodedImageBufferWrapper : public EncodedImageBufferInterface {
+ public:
+ explicit EncodedImageBufferWrapper(rtc::Buffer&& buffer)
+ : buffer_(std::move(buffer)) {}
+
+ const uint8_t* data() const override { return buffer_.data(); }
+ uint8_t* data() override { return buffer_.data(); }
+ size_t size() const override { return buffer_.size(); }
+
+ private:
+ rtc::Buffer buffer_;
+};
+
+} // namespace
+
+FrameEncodeMetadataWriter::TimingFramesLayerInfo::TimingFramesLayerInfo() =
+ default;
+FrameEncodeMetadataWriter::TimingFramesLayerInfo::~TimingFramesLayerInfo() =
+ default;
+
+FrameEncodeMetadataWriter::FrameEncodeMetadataWriter(
+ EncodedImageCallback* frame_drop_callback)
+ : frame_drop_callback_(frame_drop_callback),
+ framerate_fps_(0),
+ last_timing_frame_time_ms_(-1),
+ reordered_frames_logged_messages_(0),
+ stalled_encoder_logged_messages_(0) {
+ codec_settings_.timing_frame_thresholds = {-1, 0};
+}
+FrameEncodeMetadataWriter::~FrameEncodeMetadataWriter() {}
+
+void FrameEncodeMetadataWriter::OnEncoderInit(const VideoCodec& codec) {
+ MutexLock lock(&lock_);
+ codec_settings_ = codec;
+ size_t num_spatial_layers = codec_settings_.numberOfSimulcastStreams;
+ if (codec_settings_.codecType == kVideoCodecVP9) {
+ num_spatial_layers = std::max(
+ num_spatial_layers,
+ static_cast<size_t>(codec_settings_.VP9()->numberOfSpatialLayers));
+ } else if (codec_settings_.codecType == kVideoCodecAV1 &&
+ codec_settings_.GetScalabilityMode().has_value()) {
+ std::unique_ptr<ScalableVideoController> structure =
+ CreateScalabilityStructure(*codec_settings_.GetScalabilityMode());
+ if (structure) {
+ num_spatial_layers = structure->StreamConfig().num_spatial_layers;
+ } else {
+ // |structure| maybe nullptr if the scalability mode is invalid.
+ RTC_LOG(LS_WARNING) << "Cannot create ScalabilityStructure, since the "
+ "scalability mode is invalid";
+ }
+ }
+ num_spatial_layers_ = std::max(num_spatial_layers, size_t{1});
+}
+
+void FrameEncodeMetadataWriter::OnSetRates(
+ const VideoBitrateAllocation& bitrate_allocation,
+ uint32_t framerate_fps) {
+ MutexLock lock(&lock_);
+ framerate_fps_ = framerate_fps;
+ if (timing_frames_info_.size() < num_spatial_layers_) {
+ timing_frames_info_.resize(num_spatial_layers_);
+ }
+ for (size_t i = 0; i < num_spatial_layers_; ++i) {
+ timing_frames_info_[i].target_bitrate_bytes_per_sec =
+ bitrate_allocation.GetSpatialLayerSum(i) / 8;
+ }
+}
+
+void FrameEncodeMetadataWriter::OnEncodeStarted(const VideoFrame& frame) {
+ MutexLock lock(&lock_);
+
+ timing_frames_info_.resize(num_spatial_layers_);
+ FrameMetadata metadata;
+ metadata.rtp_timestamp = frame.timestamp();
+ metadata.encode_start_time_ms = rtc::TimeMillis();
+ metadata.ntp_time_ms = frame.ntp_time_ms();
+ metadata.timestamp_us = frame.timestamp_us();
+ metadata.rotation = frame.rotation();
+ metadata.color_space = frame.color_space();
+ metadata.packet_infos = frame.packet_infos();
+ for (size_t si = 0; si < num_spatial_layers_; ++si) {
+ RTC_DCHECK(timing_frames_info_[si].frames.empty() ||
+ rtc::TimeDiff(
+ frame.render_time_ms(),
+ timing_frames_info_[si].frames.back().timestamp_us / 1000) >=
+ 0);
+ // If stream is disabled due to low bandwidth OnEncodeStarted still will be
+ // called and have to be ignored.
+ if (timing_frames_info_[si].target_bitrate_bytes_per_sec == 0)
+ continue;
+ if (timing_frames_info_[si].frames.size() == kMaxEncodeStartTimeListSize) {
+ ++stalled_encoder_logged_messages_;
+ if (stalled_encoder_logged_messages_ <= kMessagesThrottlingThreshold ||
+ stalled_encoder_logged_messages_ % kThrottleRatio == 0) {
+ RTC_LOG(LS_WARNING) << "Too many frames in the encode_start_list."
+ " Did encoder stall?";
+ if (stalled_encoder_logged_messages_ == kMessagesThrottlingThreshold) {
+ RTC_LOG(LS_WARNING)
+ << "Too many log messages. Further stalled encoder"
+ "warnings will be throttled.";
+ }
+ }
+ frame_drop_callback_->OnDroppedFrame(
+ EncodedImageCallback::DropReason::kDroppedByEncoder);
+ timing_frames_info_[si].frames.pop_front();
+ }
+ timing_frames_info_[si].frames.emplace_back(metadata);
+ }
+}
+
+void FrameEncodeMetadataWriter::FillTimingInfo(size_t simulcast_svc_idx,
+ EncodedImage* encoded_image) {
+ MutexLock lock(&lock_);
+ absl::optional<size_t> outlier_frame_size;
+ absl::optional<int64_t> encode_start_ms;
+ uint8_t timing_flags = VideoSendTiming::kNotTriggered;
+
+ int64_t encode_done_ms = rtc::TimeMillis();
+
+ encode_start_ms =
+ ExtractEncodeStartTimeAndFillMetadata(simulcast_svc_idx, encoded_image);
+
+ if (timing_frames_info_.size() > simulcast_svc_idx) {
+ size_t target_bitrate =
+ timing_frames_info_[simulcast_svc_idx].target_bitrate_bytes_per_sec;
+ if (framerate_fps_ > 0 && target_bitrate > 0) {
+ // framerate and target bitrate were reported by encoder.
+ size_t average_frame_size = target_bitrate / framerate_fps_;
+ outlier_frame_size.emplace(
+ average_frame_size *
+ codec_settings_.timing_frame_thresholds.outlier_ratio_percent / 100);
+ }
+ }
+
+ // Outliers trigger timing frames, but do not affect scheduled timing
+ // frames.
+ if (outlier_frame_size && encoded_image->size() >= *outlier_frame_size) {
+ timing_flags |= VideoSendTiming::kTriggeredBySize;
+ }
+
+ // Check if it's time to send a timing frame.
+ int64_t timing_frame_delay_ms =
+ encoded_image->capture_time_ms_ - last_timing_frame_time_ms_;
+ // Trigger threshold if it's a first frame, too long passed since the last
+ // timing frame, or we already sent timing frame on a different simulcast
+ // stream with the same capture time.
+ if (last_timing_frame_time_ms_ == -1 ||
+ timing_frame_delay_ms >=
+ codec_settings_.timing_frame_thresholds.delay_ms ||
+ timing_frame_delay_ms == 0) {
+ timing_flags |= VideoSendTiming::kTriggeredByTimer;
+ last_timing_frame_time_ms_ = encoded_image->capture_time_ms_;
+ }
+
+ // If encode start is not available that means that encoder uses internal
+ // source. In that case capture timestamp may be from a different clock with a
+ // drift relative to rtc::TimeMillis(). We can't use it for Timing frames,
+ // because to being sent in the network capture time required to be less than
+ // all the other timestamps.
+ if (encode_start_ms) {
+ encoded_image->SetEncodeTime(*encode_start_ms, encode_done_ms);
+ encoded_image->timing_.flags = timing_flags;
+ } else {
+ encoded_image->timing_.flags = VideoSendTiming::kInvalid;
+ }
+}
+
+void FrameEncodeMetadataWriter::UpdateBitstream(
+ const CodecSpecificInfo* codec_specific_info,
+ EncodedImage* encoded_image) {
+ if (!codec_specific_info ||
+ codec_specific_info->codecType != kVideoCodecH264 ||
+ encoded_image->_frameType != VideoFrameType::kVideoFrameKey) {
+ return;
+ }
+
+ // Make sure that the data is not copied if owned by EncodedImage.
+ const EncodedImage& buffer = *encoded_image;
+ rtc::Buffer modified_buffer =
+ SpsVuiRewriter::ParseOutgoingBitstreamAndRewrite(
+ buffer, encoded_image->ColorSpace());
+
+ encoded_image->SetEncodedData(
+ rtc::make_ref_counted<EncodedImageBufferWrapper>(
+ std::move(modified_buffer)));
+}
+
+void FrameEncodeMetadataWriter::Reset() {
+ MutexLock lock(&lock_);
+ for (auto& info : timing_frames_info_) {
+ info.frames.clear();
+ }
+ last_timing_frame_time_ms_ = -1;
+ reordered_frames_logged_messages_ = 0;
+ stalled_encoder_logged_messages_ = 0;
+}
+
+absl::optional<int64_t>
+FrameEncodeMetadataWriter::ExtractEncodeStartTimeAndFillMetadata(
+ size_t simulcast_svc_idx,
+ EncodedImage* encoded_image) {
+ absl::optional<int64_t> result;
+ size_t num_simulcast_svc_streams = timing_frames_info_.size();
+ if (simulcast_svc_idx < num_simulcast_svc_streams) {
+ auto metadata_list = &timing_frames_info_[simulcast_svc_idx].frames;
+ // Skip frames for which there was OnEncodeStarted but no OnEncodedImage
+ // call. These are dropped by encoder internally.
+ // Because some hardware encoders don't preserve capture timestamp we
+ // use RTP timestamps here.
+ while (!metadata_list->empty() &&
+ IsNewerTimestamp(encoded_image->Timestamp(),
+ metadata_list->front().rtp_timestamp)) {
+ frame_drop_callback_->OnDroppedFrame(
+ EncodedImageCallback::DropReason::kDroppedByEncoder);
+ metadata_list->pop_front();
+ }
+
+ encoded_image->content_type_ =
+ (codec_settings_.mode == VideoCodecMode::kScreensharing)
+ ? VideoContentType::SCREENSHARE
+ : VideoContentType::UNSPECIFIED;
+
+ if (!metadata_list->empty() &&
+ metadata_list->front().rtp_timestamp == encoded_image->Timestamp()) {
+ result.emplace(metadata_list->front().encode_start_time_ms);
+ encoded_image->capture_time_ms_ =
+ metadata_list->front().timestamp_us / 1000;
+ encoded_image->ntp_time_ms_ = metadata_list->front().ntp_time_ms;
+ encoded_image->rotation_ = metadata_list->front().rotation;
+ encoded_image->SetColorSpace(metadata_list->front().color_space);
+ encoded_image->SetPacketInfos(metadata_list->front().packet_infos);
+ metadata_list->pop_front();
+ } else {
+ ++reordered_frames_logged_messages_;
+ if (reordered_frames_logged_messages_ <= kMessagesThrottlingThreshold ||
+ reordered_frames_logged_messages_ % kThrottleRatio == 0) {
+ RTC_LOG(LS_WARNING) << "Frame with no encode started time recordings. "
+ "Encoder may be reordering frames "
+ "or not preserving RTP timestamps.";
+ if (reordered_frames_logged_messages_ == kMessagesThrottlingThreshold) {
+ RTC_LOG(LS_WARNING) << "Too many log messages. Further frames "
+ "reordering warnings will be throttled.";
+ }
+ }
+ }
+ }
+ return result;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/video/frame_encode_metadata_writer.h b/third_party/libwebrtc/video/frame_encode_metadata_writer.h
new file mode 100644
index 0000000000..afebca816c
--- /dev/null
+++ b/third_party/libwebrtc/video/frame_encode_metadata_writer.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VIDEO_FRAME_ENCODE_METADATA_WRITER_H_
+#define VIDEO_FRAME_ENCODE_METADATA_WRITER_H_
+
+#include <list>
+#include <vector>
+
+#include "absl/types/optional.h"
+#include "api/video/encoded_image.h"
+#include "api/video_codecs/video_codec.h"
+#include "api/video_codecs/video_encoder.h"
+#include "modules/video_coding/include/video_codec_interface.h"
+#include "rtc_base/synchronization/mutex.h"
+
+namespace webrtc {
+
+class FrameEncodeMetadataWriter {
+ public:
+ explicit FrameEncodeMetadataWriter(EncodedImageCallback* frame_drop_callback);
+ ~FrameEncodeMetadataWriter();
+
+ void OnEncoderInit(const VideoCodec& codec);
+ void OnSetRates(const VideoBitrateAllocation& bitrate_allocation,
+ uint32_t framerate_fps);
+
+ void OnEncodeStarted(const VideoFrame& frame);
+
+ void FillTimingInfo(size_t simulcast_svc_idx, EncodedImage* encoded_image);
+
+ void UpdateBitstream(const CodecSpecificInfo* codec_specific_info,
+ EncodedImage* encoded_image);
+
+ void Reset();
+
+ private:
+ // For non-internal-source encoders, returns encode started time and fixes
+ // capture timestamp for the frame, if corrupted by the encoder.
+ absl::optional<int64_t> ExtractEncodeStartTimeAndFillMetadata(
+ size_t simulcast_svc_idx,
+ EncodedImage* encoded_image) RTC_EXCLUSIVE_LOCKS_REQUIRED(lock_);
+
+ struct FrameMetadata {
+ uint32_t rtp_timestamp;
+ int64_t encode_start_time_ms;
+ int64_t ntp_time_ms = 0;
+ int64_t timestamp_us = 0;
+ VideoRotation rotation = kVideoRotation_0;
+ absl::optional<ColorSpace> color_space;
+ RtpPacketInfos packet_infos;
+ };
+ struct TimingFramesLayerInfo {
+ TimingFramesLayerInfo();
+ ~TimingFramesLayerInfo();
+ size_t target_bitrate_bytes_per_sec = 0;
+ std::list<FrameMetadata> frames;
+ };
+
+ Mutex lock_;
+ EncodedImageCallback* const frame_drop_callback_;
+ VideoCodec codec_settings_ RTC_GUARDED_BY(&lock_);
+ uint32_t framerate_fps_ RTC_GUARDED_BY(&lock_);
+
+ size_t num_spatial_layers_ RTC_GUARDED_BY(&lock_);
+ // Separate instance for each simulcast stream or spatial layer.
+ std::vector<TimingFramesLayerInfo> timing_frames_info_ RTC_GUARDED_BY(&lock_);
+ int64_t last_timing_frame_time_ms_ RTC_GUARDED_BY(&lock_);
+ size_t reordered_frames_logged_messages_ RTC_GUARDED_BY(&lock_);
+ size_t stalled_encoder_logged_messages_ RTC_GUARDED_BY(&lock_);
+};
+
+} // namespace webrtc
+
+#endif // VIDEO_FRAME_ENCODE_METADATA_WRITER_H_
diff --git a/third_party/libwebrtc/video/frame_encode_metadata_writer_unittest.cc b/third_party/libwebrtc/video/frame_encode_metadata_writer_unittest.cc
new file mode 100644
index 0000000000..e151282b77
--- /dev/null
+++ b/third_party/libwebrtc/video/frame_encode_metadata_writer_unittest.cc
@@ -0,0 +1,471 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video/frame_encode_metadata_writer.h"
+
+#include <cstddef>
+#include <vector>
+
+#include "api/video/i420_buffer.h"
+#include "api/video/video_frame.h"
+#include "api/video/video_timing.h"
+#include "common_video/h264/h264_common.h"
+#include "common_video/test/utilities.h"
+#include "modules/video_coding/include/video_coding_defines.h"
+#include "rtc_base/time_utils.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace test {
+namespace {
+
+const rtc::scoped_refptr<I420Buffer> kFrameBuffer = I420Buffer::Create(4, 4);
+
+inline size_t FrameSize(const size_t& min_frame_size,
+ const size_t& max_frame_size,
+ const int& s,
+ const int& i) {
+ return min_frame_size + (s + 1) * i % (max_frame_size - min_frame_size);
+}
+
+class FakeEncodedImageCallback : public EncodedImageCallback {
+ public:
+ FakeEncodedImageCallback() : num_frames_dropped_(0) {}
+ Result OnEncodedImage(const EncodedImage& encoded_image,
+ const CodecSpecificInfo* codec_specific_info) override {
+ return Result(Result::OK);
+ }
+ void OnDroppedFrame(DropReason reason) override { ++num_frames_dropped_; }
+ size_t GetNumFramesDropped() { return num_frames_dropped_; }
+
+ private:
+ size_t num_frames_dropped_;
+};
+
+enum class FrameType {
+ kNormal,
+ kTiming,
+ kDropped,
+};
+
+bool IsTimingFrame(const EncodedImage& image) {
+ return image.timing_.flags != VideoSendTiming::kInvalid &&
+ image.timing_.flags != VideoSendTiming::kNotTriggered;
+}
+
+// Emulates `num_frames` on `num_streams` frames with capture timestamps
+// increased by 1 from 0. Size of each frame is between
+// `min_frame_size` and `max_frame_size`, outliers are counted relatevely to
+// `average_frame_sizes[]` for each stream.
+std::vector<std::vector<FrameType>> GetTimingFrames(
+ const int64_t delay_ms,
+ const size_t min_frame_size,
+ const size_t max_frame_size,
+ std::vector<size_t> average_frame_sizes,
+ const int num_streams,
+ const int num_frames) {
+ FakeEncodedImageCallback sink;
+ FrameEncodeMetadataWriter encode_timer(&sink);
+ VideoCodec codec_settings;
+ codec_settings.numberOfSimulcastStreams = num_streams;
+ codec_settings.timing_frame_thresholds = {delay_ms,
+ kDefaultOutlierFrameSizePercent};
+ encode_timer.OnEncoderInit(codec_settings);
+ const size_t kFramerate = 30;
+ VideoBitrateAllocation bitrate_allocation;
+ for (int si = 0; si < num_streams; ++si) {
+ bitrate_allocation.SetBitrate(si, 0,
+ average_frame_sizes[si] * 8 * kFramerate);
+ }
+ encode_timer.OnSetRates(bitrate_allocation, kFramerate);
+
+ std::vector<std::vector<FrameType>> result(num_streams);
+ int64_t current_timestamp = 0;
+ for (int i = 0; i < num_frames; ++i) {
+ current_timestamp += 1;
+ VideoFrame frame = VideoFrame::Builder()
+ .set_timestamp_rtp(current_timestamp * 90)
+ .set_timestamp_ms(current_timestamp)
+ .set_video_frame_buffer(kFrameBuffer)
+ .build();
+ encode_timer.OnEncodeStarted(frame);
+ for (int si = 0; si < num_streams; ++si) {
+ // every (5+s)-th frame is dropped on s-th stream by design.
+ bool dropped = i % (5 + si) == 0;
+
+ EncodedImage image;
+ image.SetEncodedData(EncodedImageBuffer::Create(max_frame_size));
+ image.set_size(FrameSize(min_frame_size, max_frame_size, si, i));
+ image.capture_time_ms_ = current_timestamp;
+ image.SetTimestamp(static_cast<uint32_t>(current_timestamp * 90));
+ image.SetSpatialIndex(si);
+
+ if (dropped) {
+ result[si].push_back(FrameType::kDropped);
+ continue;
+ }
+
+ encode_timer.FillTimingInfo(si, &image);
+
+ if (IsTimingFrame(image)) {
+ result[si].push_back(FrameType::kTiming);
+ } else {
+ result[si].push_back(FrameType::kNormal);
+ }
+ }
+ }
+ return result;
+}
+} // namespace
+
+TEST(FrameEncodeMetadataWriterTest, MarksTimingFramesPeriodicallyTogether) {
+ const int64_t kDelayMs = 29;
+ const size_t kMinFrameSize = 10;
+ const size_t kMaxFrameSize = 20;
+ const int kNumFrames = 1000;
+ const int kNumStreams = 3;
+ // No outliers as 1000 is larger than anything from range [10,20].
+ const std::vector<size_t> kAverageSize = {1000, 1000, 1000};
+ auto frames = GetTimingFrames(kDelayMs, kMinFrameSize, kMaxFrameSize,
+ kAverageSize, kNumStreams, kNumFrames);
+ // Timing frames should be tirggered every delayMs.
+ // As no outliers are expected, frames on all streams have to be
+ // marked together.
+ int last_timing_frame = -1;
+ for (int i = 0; i < kNumFrames; ++i) {
+ int num_normal = 0;
+ int num_timing = 0;
+ int num_dropped = 0;
+ for (int s = 0; s < kNumStreams; ++s) {
+ if (frames[s][i] == FrameType::kTiming) {
+ ++num_timing;
+ } else if (frames[s][i] == FrameType::kNormal) {
+ ++num_normal;
+ } else {
+ ++num_dropped;
+ }
+ }
+ // Can't have both normal and timing frames at the same timstamp.
+ EXPECT_TRUE(num_timing == 0 || num_normal == 0);
+ if (num_dropped < kNumStreams) {
+ if (last_timing_frame == -1 || i >= last_timing_frame + kDelayMs) {
+ // If didn't have timing frames for a period, current sent frame has to
+ // be one. No normal frames should be sent.
+ EXPECT_EQ(num_normal, 0);
+ } else {
+ // No unneeded timing frames should be sent.
+ EXPECT_EQ(num_timing, 0);
+ }
+ }
+ if (num_timing > 0)
+ last_timing_frame = i;
+ }
+}
+
+TEST(FrameEncodeMetadataWriterTest, MarksOutliers) {
+ const int64_t kDelayMs = 29;
+ const size_t kMinFrameSize = 2495;
+ const size_t kMaxFrameSize = 2505;
+ const int kNumFrames = 1000;
+ const int kNumStreams = 3;
+ // Possible outliers as 1000 lies in range [995, 1005].
+ const std::vector<size_t> kAverageSize = {998, 1000, 1004};
+ auto frames = GetTimingFrames(kDelayMs, kMinFrameSize, kMaxFrameSize,
+ kAverageSize, kNumStreams, kNumFrames);
+ // All outliers should be marked.
+ for (int i = 0; i < kNumFrames; ++i) {
+ for (int s = 0; s < kNumStreams; ++s) {
+ if (FrameSize(kMinFrameSize, kMaxFrameSize, s, i) >=
+ kAverageSize[s] * kDefaultOutlierFrameSizePercent / 100) {
+ // Too big frame. May be dropped or timing, but not normal.
+ EXPECT_NE(frames[s][i], FrameType::kNormal);
+ }
+ }
+ }
+}
+
+TEST(FrameEncodeMetadataWriterTest, NoTimingFrameIfNoEncodeStartTime) {
+ int64_t timestamp = 1;
+ constexpr size_t kFrameSize = 500;
+ EncodedImage image;
+ image.SetEncodedData(EncodedImageBuffer::Create(kFrameSize));
+ image.capture_time_ms_ = timestamp;
+ image.SetTimestamp(static_cast<uint32_t>(timestamp * 90));
+
+ FakeEncodedImageCallback sink;
+ FrameEncodeMetadataWriter encode_timer(&sink);
+ VideoCodec codec_settings;
+ // Make all frames timing frames.
+ codec_settings.timing_frame_thresholds.delay_ms = 1;
+ encode_timer.OnEncoderInit(codec_settings);
+ VideoBitrateAllocation bitrate_allocation;
+ bitrate_allocation.SetBitrate(0, 0, 500000);
+ encode_timer.OnSetRates(bitrate_allocation, 30);
+
+ // Verify a single frame works with encode start time set.
+ VideoFrame frame = VideoFrame::Builder()
+ .set_timestamp_ms(timestamp)
+ .set_timestamp_rtp(timestamp * 90)
+ .set_video_frame_buffer(kFrameBuffer)
+ .build();
+ encode_timer.OnEncodeStarted(frame);
+ encode_timer.FillTimingInfo(0, &image);
+ EXPECT_TRUE(IsTimingFrame(image));
+
+ // New frame, now skip OnEncodeStarted. Should not result in timing frame.
+ image.capture_time_ms_ = ++timestamp;
+ image.SetTimestamp(static_cast<uint32_t>(timestamp * 90));
+ image.timing_ = EncodedImage::Timing();
+ encode_timer.FillTimingInfo(0, &image);
+ EXPECT_FALSE(IsTimingFrame(image));
+}
+
+TEST(FrameEncodeMetadataWriterTest, NotifiesAboutDroppedFrames) {
+ const int64_t kTimestampMs1 = 47721840;
+ const int64_t kTimestampMs2 = 47721850;
+ const int64_t kTimestampMs3 = 47721860;
+ const int64_t kTimestampMs4 = 47721870;
+
+ FakeEncodedImageCallback sink;
+ FrameEncodeMetadataWriter encode_timer(&sink);
+ encode_timer.OnEncoderInit(VideoCodec());
+ // Any non-zero bitrate needed to be set before the first frame.
+ VideoBitrateAllocation bitrate_allocation;
+ bitrate_allocation.SetBitrate(0, 0, 500000);
+ encode_timer.OnSetRates(bitrate_allocation, 30);
+
+ EncodedImage image;
+ VideoFrame frame = VideoFrame::Builder()
+ .set_timestamp_rtp(kTimestampMs1 * 90)
+ .set_timestamp_ms(kTimestampMs1)
+ .set_video_frame_buffer(kFrameBuffer)
+ .build();
+
+ image.capture_time_ms_ = kTimestampMs1;
+ image.SetTimestamp(static_cast<uint32_t>(image.capture_time_ms_ * 90));
+ frame.set_timestamp(image.capture_time_ms_ * 90);
+ frame.set_timestamp_us(image.capture_time_ms_ * 1000);
+ encode_timer.OnEncodeStarted(frame);
+
+ EXPECT_EQ(0u, sink.GetNumFramesDropped());
+ encode_timer.FillTimingInfo(0, &image);
+
+ image.capture_time_ms_ = kTimestampMs2;
+ image.SetTimestamp(static_cast<uint32_t>(image.capture_time_ms_ * 90));
+ image.timing_ = EncodedImage::Timing();
+ frame.set_timestamp(image.capture_time_ms_ * 90);
+ frame.set_timestamp_us(image.capture_time_ms_ * 1000);
+ encode_timer.OnEncodeStarted(frame);
+ // No OnEncodedImageCall for timestamp2. Yet, at this moment it's not known
+ // that frame with timestamp2 was dropped.
+ EXPECT_EQ(0u, sink.GetNumFramesDropped());
+
+ image.capture_time_ms_ = kTimestampMs3;
+ image.SetTimestamp(static_cast<uint32_t>(image.capture_time_ms_ * 90));
+ image.timing_ = EncodedImage::Timing();
+ frame.set_timestamp(image.capture_time_ms_ * 90);
+ frame.set_timestamp_us(image.capture_time_ms_ * 1000);
+ encode_timer.OnEncodeStarted(frame);
+ encode_timer.FillTimingInfo(0, &image);
+ EXPECT_EQ(1u, sink.GetNumFramesDropped());
+
+ image.capture_time_ms_ = kTimestampMs4;
+ image.SetTimestamp(static_cast<uint32_t>(image.capture_time_ms_ * 90));
+ image.timing_ = EncodedImage::Timing();
+ frame.set_timestamp(image.capture_time_ms_ * 90);
+ frame.set_timestamp_us(image.capture_time_ms_ * 1000);
+ encode_timer.OnEncodeStarted(frame);
+ encode_timer.FillTimingInfo(0, &image);
+ EXPECT_EQ(1u, sink.GetNumFramesDropped());
+}
+
+TEST(FrameEncodeMetadataWriterTest, RestoresCaptureTimestamps) {
+ EncodedImage image;
+ const int64_t kTimestampMs = 123456;
+ FakeEncodedImageCallback sink;
+
+ FrameEncodeMetadataWriter encode_timer(&sink);
+ encode_timer.OnEncoderInit(VideoCodec());
+ // Any non-zero bitrate needed to be set before the first frame.
+ VideoBitrateAllocation bitrate_allocation;
+ bitrate_allocation.SetBitrate(0, 0, 500000);
+ encode_timer.OnSetRates(bitrate_allocation, 30);
+
+ image.capture_time_ms_ = kTimestampMs; // Correct timestamp.
+ image.SetTimestamp(static_cast<uint32_t>(image.capture_time_ms_ * 90));
+ VideoFrame frame = VideoFrame::Builder()
+ .set_timestamp_ms(image.capture_time_ms_)
+ .set_timestamp_rtp(image.capture_time_ms_ * 90)
+ .set_video_frame_buffer(kFrameBuffer)
+ .build();
+ encode_timer.OnEncodeStarted(frame);
+ image.capture_time_ms_ = 0; // Incorrect timestamp.
+ encode_timer.FillTimingInfo(0, &image);
+ EXPECT_EQ(kTimestampMs, image.capture_time_ms_);
+}
+
+TEST(FrameEncodeMetadataWriterTest, CopiesRotation) {
+ EncodedImage image;
+ const int64_t kTimestampMs = 123456;
+ FakeEncodedImageCallback sink;
+
+ FrameEncodeMetadataWriter encode_timer(&sink);
+ encode_timer.OnEncoderInit(VideoCodec());
+ // Any non-zero bitrate needed to be set before the first frame.
+ VideoBitrateAllocation bitrate_allocation;
+ bitrate_allocation.SetBitrate(0, 0, 500000);
+ encode_timer.OnSetRates(bitrate_allocation, 30);
+
+ image.SetTimestamp(static_cast<uint32_t>(kTimestampMs * 90));
+ VideoFrame frame = VideoFrame::Builder()
+ .set_timestamp_ms(kTimestampMs)
+ .set_timestamp_rtp(kTimestampMs * 90)
+ .set_rotation(kVideoRotation_180)
+ .set_video_frame_buffer(kFrameBuffer)
+ .build();
+ encode_timer.OnEncodeStarted(frame);
+ encode_timer.FillTimingInfo(0, &image);
+ EXPECT_EQ(kVideoRotation_180, image.rotation_);
+}
+
+TEST(FrameEncodeMetadataWriterTest, SetsContentType) {
+ EncodedImage image;
+ const int64_t kTimestampMs = 123456;
+ FakeEncodedImageCallback sink;
+
+ FrameEncodeMetadataWriter encode_timer(&sink);
+ VideoCodec codec;
+ codec.mode = VideoCodecMode::kScreensharing;
+ encode_timer.OnEncoderInit(codec);
+ // Any non-zero bitrate needed to be set before the first frame.
+ VideoBitrateAllocation bitrate_allocation;
+ bitrate_allocation.SetBitrate(0, 0, 500000);
+ encode_timer.OnSetRates(bitrate_allocation, 30);
+
+ image.SetTimestamp(static_cast<uint32_t>(kTimestampMs * 90));
+ VideoFrame frame = VideoFrame::Builder()
+ .set_timestamp_ms(kTimestampMs)
+ .set_timestamp_rtp(kTimestampMs * 90)
+ .set_rotation(kVideoRotation_180)
+ .set_video_frame_buffer(kFrameBuffer)
+ .build();
+ encode_timer.OnEncodeStarted(frame);
+ encode_timer.FillTimingInfo(0, &image);
+ EXPECT_EQ(VideoContentType::SCREENSHARE, image.content_type_);
+}
+
+TEST(FrameEncodeMetadataWriterTest, CopiesColorSpace) {
+ EncodedImage image;
+ const int64_t kTimestampMs = 123456;
+ FakeEncodedImageCallback sink;
+
+ FrameEncodeMetadataWriter encode_timer(&sink);
+ encode_timer.OnEncoderInit(VideoCodec());
+ // Any non-zero bitrate needed to be set before the first frame.
+ VideoBitrateAllocation bitrate_allocation;
+ bitrate_allocation.SetBitrate(0, 0, 500000);
+ encode_timer.OnSetRates(bitrate_allocation, 30);
+
+ webrtc::ColorSpace color_space =
+ CreateTestColorSpace(/*with_hdr_metadata=*/true);
+ image.SetTimestamp(static_cast<uint32_t>(kTimestampMs * 90));
+ VideoFrame frame = VideoFrame::Builder()
+ .set_timestamp_ms(kTimestampMs)
+ .set_timestamp_rtp(kTimestampMs * 90)
+ .set_color_space(color_space)
+ .set_video_frame_buffer(kFrameBuffer)
+ .build();
+ encode_timer.OnEncodeStarted(frame);
+ encode_timer.FillTimingInfo(0, &image);
+ ASSERT_NE(image.ColorSpace(), nullptr);
+ EXPECT_EQ(color_space, *image.ColorSpace());
+}
+
+TEST(FrameEncodeMetadataWriterTest, CopiesPacketInfos) {
+ EncodedImage image;
+ const int64_t kTimestampMs = 123456;
+ FakeEncodedImageCallback sink;
+
+ FrameEncodeMetadataWriter encode_timer(&sink);
+ encode_timer.OnEncoderInit(VideoCodec());
+ // Any non-zero bitrate needed to be set before the first frame.
+ VideoBitrateAllocation bitrate_allocation;
+ bitrate_allocation.SetBitrate(0, 0, 500000);
+ encode_timer.OnSetRates(bitrate_allocation, 30);
+
+ RtpPacketInfos packet_infos = CreatePacketInfos(3);
+ image.SetTimestamp(static_cast<uint32_t>(kTimestampMs * 90));
+ VideoFrame frame = VideoFrame::Builder()
+ .set_timestamp_ms(kTimestampMs)
+ .set_timestamp_rtp(kTimestampMs * 90)
+ .set_packet_infos(packet_infos)
+ .set_video_frame_buffer(kFrameBuffer)
+ .build();
+ encode_timer.OnEncodeStarted(frame);
+ encode_timer.FillTimingInfo(0, &image);
+ EXPECT_EQ(image.PacketInfos().size(), 3U);
+}
+
+TEST(FrameEncodeMetadataWriterTest, DoesNotRewriteBitstreamWithoutCodecInfo) {
+ uint8_t buffer[] = {1, 2, 3};
+ auto image_buffer = EncodedImageBuffer::Create(buffer, sizeof(buffer));
+ EncodedImage image;
+ image.SetEncodedData(image_buffer);
+
+ FakeEncodedImageCallback sink;
+ FrameEncodeMetadataWriter encode_metadata_writer(&sink);
+ encode_metadata_writer.UpdateBitstream(nullptr, &image);
+ EXPECT_EQ(image.GetEncodedData(), image_buffer);
+ EXPECT_EQ(image.size(), sizeof(buffer));
+}
+
+TEST(FrameEncodeMetadataWriterTest, DoesNotRewriteVp8Bitstream) {
+ uint8_t buffer[] = {1, 2, 3};
+ auto image_buffer = EncodedImageBuffer::Create(buffer, sizeof(buffer));
+ EncodedImage image;
+ image.SetEncodedData(image_buffer);
+ CodecSpecificInfo codec_specific_info;
+ codec_specific_info.codecType = kVideoCodecVP8;
+
+ FakeEncodedImageCallback sink;
+ FrameEncodeMetadataWriter encode_metadata_writer(&sink);
+ encode_metadata_writer.UpdateBitstream(&codec_specific_info, &image);
+ EXPECT_EQ(image.GetEncodedData(), image_buffer);
+ EXPECT_EQ(image.size(), sizeof(buffer));
+}
+
+TEST(FrameEncodeMetadataWriterTest, RewritesH264BitstreamWithNonOptimalSps) {
+ const uint8_t kOriginalSps[] = {0, 0, 0, 1, H264::NaluType::kSps,
+ 0x00, 0x00, 0x03, 0x03, 0xF4,
+ 0x05, 0x03, 0xC7, 0xC0};
+ const uint8_t kRewrittenSps[] = {0, 0, 0, 1, H264::NaluType::kSps,
+ 0x00, 0x00, 0x03, 0x03, 0xF4,
+ 0x05, 0x03, 0xC7, 0xE0, 0x1B,
+ 0x41, 0x10, 0x8D, 0x00};
+
+ EncodedImage image;
+ image.SetEncodedData(
+ EncodedImageBuffer::Create(kOriginalSps, sizeof(kOriginalSps)));
+ image._frameType = VideoFrameType::kVideoFrameKey;
+
+ CodecSpecificInfo codec_specific_info;
+ codec_specific_info.codecType = kVideoCodecH264;
+
+ FakeEncodedImageCallback sink;
+ FrameEncodeMetadataWriter encode_metadata_writer(&sink);
+ encode_metadata_writer.UpdateBitstream(&codec_specific_info, &image);
+
+ EXPECT_THAT(std::vector<uint8_t>(image.data(), image.data() + image.size()),
+ testing::ElementsAreArray(kRewrittenSps));
+}
+
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/video/full_stack_tests.cc b/third_party/libwebrtc/video/full_stack_tests.cc
new file mode 100644
index 0000000000..cddf98343d
--- /dev/null
+++ b/third_party/libwebrtc/video/full_stack_tests.cc
@@ -0,0 +1,1189 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include <memory>
+#include <string>
+#include <utility>
+#include <vector>
+
+#include "absl/flags/flag.h"
+#include "absl/flags/parse.h"
+#include "absl/types/optional.h"
+#include "api/test/simulated_network.h"
+#include "api/test/test_dependency_factory.h"
+#include "api/test/video_quality_test_fixture.h"
+#include "api/video_codecs/sdp_video_format.h"
+#include "api/video_codecs/video_codec.h"
+#include "api/video_codecs/vp9_profile.h"
+#include "modules/video_coding/codecs/vp9/include/vp9.h"
+#include "system_wrappers/include/field_trial.h"
+#include "test/field_trial.h"
+#include "test/gtest.h"
+#include "test/testsupport/file_utils.h"
+#include "video/config/video_encoder_config.h"
+#include "video/video_quality_test.h"
+
+ABSL_FLAG(std::string,
+ rtc_event_log_name,
+ "",
+ "Filename for rtc event log. Two files "
+ "with \"_send\" and \"_recv\" suffixes will be created.");
+ABSL_FLAG(std::string,
+ rtp_dump_name,
+ "",
+ "Filename for dumped received RTP stream.");
+ABSL_FLAG(std::string,
+ encoded_frame_path,
+ "",
+ "The base path for encoded frame logs. Created files will have "
+ "the form <encoded_frame_path>.<n>.(recv|send.<m>).ivf");
+
+namespace webrtc {
+
+namespace {
+static const int kFullStackTestDurationSecs = 45;
+
+struct ParamsWithLogging : public VideoQualityTest::Params {
+ public:
+ ParamsWithLogging() {
+ // Use these logging flags by default, for everything.
+ logging = {absl::GetFlag(FLAGS_rtc_event_log_name),
+ absl::GetFlag(FLAGS_rtp_dump_name),
+ absl::GetFlag(FLAGS_encoded_frame_path)};
+ this->config = BuiltInNetworkBehaviorConfig();
+ }
+};
+
+std::unique_ptr<VideoQualityTestFixtureInterface>
+CreateVideoQualityTestFixture() {
+ // The components will normally be nullptr (= use defaults), but it's possible
+ // for external test runners to override the list of injected components.
+ auto components = TestDependencyFactory::GetInstance().CreateComponents();
+ return std::make_unique<VideoQualityTest>(std::move(components));
+}
+
+// Takes the current active field trials set, and appends some new trials.
+std::string AppendFieldTrials(std::string new_trial_string) {
+ return std::string(field_trial::GetFieldTrialString()) + new_trial_string;
+}
+
+std::string ClipNameToClipPath(const char* clip_name) {
+ return test::ResourcePath(clip_name, "yuv");
+}
+} // namespace
+
+// VideoQualityTest::Params params = {
+// { ... }, // Common.
+// { ... }, // Video-specific settings.
+// { ... }, // Screenshare-specific settings.
+// { ... }, // Analyzer settings.
+// pipe, // FakeNetworkPipe::Config
+// { ... }, // Spatial scalability.
+// logs // bool
+// };
+
+#if defined(RTC_ENABLE_VP9)
+TEST(FullStackTest, Foreman_Cif_Net_Delay_0_0_Plr_0_VP9) {
+ auto fixture = CreateVideoQualityTestFixture();
+ ParamsWithLogging foreman_cif;
+ foreman_cif.call.send_side_bwe = true;
+ foreman_cif.video[0] = {
+ true, 352, 288, 30,
+ 700000, 700000, 700000, false,
+ "VP9", 1, 0, 0,
+ false, false, true, ClipNameToClipPath("foreman_cif")};
+ foreman_cif.analyzer = {"foreman_cif_net_delay_0_0_plr_0_VP9", 0.0, 0.0,
+ kFullStackTestDurationSecs};
+ fixture->RunWithAnalyzer(foreman_cif);
+}
+
+TEST(GenericDescriptorTest,
+ Foreman_Cif_Delay_50_0_Plr_5_VP9_Generic_Descriptor) {
+ auto fixture = CreateVideoQualityTestFixture();
+ ParamsWithLogging foreman_cif;
+ foreman_cif.call.send_side_bwe = true;
+ foreman_cif.video[0] = {
+ true, 352, 288, 30,
+ 30000, 500000, 2000000, false,
+ "VP9", 1, 0, 0,
+ false, false, true, ClipNameToClipPath("foreman_cif")};
+ foreman_cif.analyzer = {"foreman_cif_delay_50_0_plr_5_VP9_generic_descriptor",
+ 0.0, 0.0, kFullStackTestDurationSecs};
+ foreman_cif.config->loss_percent = 5;
+ foreman_cif.config->queue_delay_ms = 50;
+ foreman_cif.call.generic_descriptor = true;
+ fixture->RunWithAnalyzer(foreman_cif);
+}
+
+TEST(FullStackTest, Generator_Net_Delay_0_0_Plr_0_VP9Profile2) {
+ // Profile 2 might not be available on some platforms until
+ // https://bugs.chromium.org/p/webm/issues/detail?id=1544 is solved.
+ bool profile_2_is_supported = false;
+ for (const auto& codec : SupportedVP9Codecs()) {
+ if (ParseSdpForVP9Profile(codec.parameters)
+ .value_or(VP9Profile::kProfile0) == VP9Profile::kProfile2) {
+ profile_2_is_supported = true;
+ }
+ }
+ if (!profile_2_is_supported)
+ return;
+ auto fixture = CreateVideoQualityTestFixture();
+
+ SdpVideoFormat::Parameters vp92 = {
+ {kVP9FmtpProfileId, VP9ProfileToString(VP9Profile::kProfile2)}};
+ ParamsWithLogging generator;
+ generator.call.send_side_bwe = true;
+ generator.video[0] = {
+ true, 352, 288, 30, 700000, 700000, 700000, false, "VP9",
+ 1, 0, 0, false, false, true, "GeneratorI010", 0, vp92};
+ generator.analyzer = {"generator_net_delay_0_0_plr_0_VP9Profile2", 0.0, 0.0,
+ kFullStackTestDurationSecs};
+ fixture->RunWithAnalyzer(generator);
+}
+
+TEST(FullStackTest, Foreman_Cif_Net_Delay_0_0_Plr_0_Multiplex) {
+ auto fixture = CreateVideoQualityTestFixture();
+ ParamsWithLogging foreman_cif;
+ foreman_cif.call.send_side_bwe = true;
+ foreman_cif.video[0] = {
+ true, 352, 288, 30,
+ 700000, 700000, 700000, false,
+ "multiplex", 1, 0, 0,
+ false, false, false, ClipNameToClipPath("foreman_cif")};
+ foreman_cif.analyzer = {"foreman_cif_net_delay_0_0_plr_0_Multiplex", 0.0, 0.0,
+ kFullStackTestDurationSecs};
+ fixture->RunWithAnalyzer(foreman_cif);
+}
+
+TEST(FullStackTest, Generator_Net_Delay_0_0_Plr_0_Multiplex) {
+ auto fixture = CreateVideoQualityTestFixture();
+
+ ParamsWithLogging generator;
+ generator.call.send_side_bwe = true;
+ generator.video[0] = {
+ true, 352, 288, 30, 700000, 700000, 700000, false,
+ "multiplex", 1, 0, 0, false, false, false, "GeneratorI420A"};
+ generator.analyzer = {"generator_net_delay_0_0_plr_0_Multiplex", 0.0, 0.0,
+ kFullStackTestDurationSecs};
+ fixture->RunWithAnalyzer(generator);
+}
+
+#endif // defined(RTC_ENABLE_VP9)
+
+#if defined(WEBRTC_LINUX)
+// Crashes on the linux trusty perf bot: bugs.webrtc.org/9129.
+#define MAYBE_Net_Delay_0_0_Plr_0 DISABLED_Net_Delay_0_0_Plr_0
+#else
+#define MAYBE_Net_Delay_0_0_Plr_0 Net_Delay_0_0_Plr_0
+#endif
+TEST(FullStackTest, MAYBE_Net_Delay_0_0_Plr_0) {
+ auto fixture = CreateVideoQualityTestFixture();
+ ParamsWithLogging paris_qcif;
+ paris_qcif.call.send_side_bwe = true;
+ paris_qcif.video[0] = {
+ true, 176, 144, 30,
+ 300000, 300000, 300000, false,
+ "VP8", 1, 0, 0,
+ false, false, true, ClipNameToClipPath("paris_qcif")};
+ paris_qcif.analyzer = {"net_delay_0_0_plr_0", 36.0, 0.96,
+ kFullStackTestDurationSecs};
+ fixture->RunWithAnalyzer(paris_qcif);
+}
+
+TEST(GenericDescriptorTest,
+ Foreman_Cif_Net_Delay_0_0_Plr_0_Generic_Descriptor) {
+ auto fixture = CreateVideoQualityTestFixture();
+ // TODO(pbos): Decide on psnr/ssim thresholds for foreman_cif.
+ ParamsWithLogging foreman_cif;
+ foreman_cif.call.send_side_bwe = true;
+ foreman_cif.video[0] = {
+ true, 352, 288, 30,
+ 700000, 700000, 700000, false,
+ "VP8", 1, 0, 0,
+ false, false, true, ClipNameToClipPath("foreman_cif")};
+ foreman_cif.analyzer = {"foreman_cif_net_delay_0_0_plr_0_generic_descriptor",
+ 0.0, 0.0, kFullStackTestDurationSecs};
+ foreman_cif.call.generic_descriptor = true;
+ fixture->RunWithAnalyzer(foreman_cif);
+}
+
+TEST(GenericDescriptorTest,
+ Foreman_Cif_30kbps_Net_Delay_0_0_Plr_0_Generic_Descriptor) {
+ auto fixture = CreateVideoQualityTestFixture();
+ ParamsWithLogging foreman_cif;
+ foreman_cif.call.send_side_bwe = true;
+ foreman_cif.video[0] = {
+ true, 352, 288, 10,
+ 30000, 30000, 30000, false,
+ "VP8", 1, 0, 0,
+ false, false, true, ClipNameToClipPath("foreman_cif")};
+ foreman_cif.analyzer = {
+ "foreman_cif_30kbps_net_delay_0_0_plr_0_generic_descriptor", 0.0, 0.0,
+ kFullStackTestDurationSecs};
+ foreman_cif.call.generic_descriptor = true;
+ fixture->RunWithAnalyzer(foreman_cif);
+}
+
+// Link capacity below default start rate.
+TEST(FullStackTest, Foreman_Cif_Link_150kbps_Net_Delay_0_0_Plr_0) {
+ auto fixture = CreateVideoQualityTestFixture();
+ ParamsWithLogging foreman_cif;
+ foreman_cif.call.send_side_bwe = true;
+ foreman_cif.video[0] = {
+ true, 352, 288, 30,
+ 30000, 500000, 2000000, false,
+ "VP8", 1, 0, 0,
+ false, false, true, ClipNameToClipPath("foreman_cif")};
+ foreman_cif.analyzer = {"foreman_cif_link_150kbps_net_delay_0_0_plr_0", 0.0,
+ 0.0, kFullStackTestDurationSecs};
+ foreman_cif.config->link_capacity_kbps = 150;
+ fixture->RunWithAnalyzer(foreman_cif);
+}
+
+// Restricted network and encoder overproducing by 30%.
+TEST(FullStackTest,
+ Foreman_Cif_Link_150kbps_Delay100ms_30pkts_Queue_Overshoot30) {
+ auto fixture = CreateVideoQualityTestFixture();
+ ParamsWithLogging foreman_cif;
+ foreman_cif.call.send_side_bwe = true;
+ foreman_cif.video[0] = {
+ true, 352, 288, 30,
+ 30000, 500000, 2000000, false,
+ "VP8", 1, 0, 0,
+ false, false, true, ClipNameToClipPath("foreman_cif"),
+ 0, {}, 1.30};
+ foreman_cif.analyzer = {
+ "foreman_cif_link_150kbps_delay100ms_30pkts_queue_overshoot30", 0.0, 0.0,
+ kFullStackTestDurationSecs};
+ foreman_cif.config->link_capacity_kbps = 150;
+ foreman_cif.config->queue_length_packets = 30;
+ foreman_cif.config->queue_delay_ms = 100;
+ fixture->RunWithAnalyzer(foreman_cif);
+}
+
+// Weak 3G-style link: 250kbps, 1% loss, 100ms delay, 15 packets queue.
+// Packet rate and loss are low enough that loss will happen with ~3s interval.
+// This triggers protection overhead to toggle between zero and non-zero.
+// Link queue is restrictive enough to trigger loss on probes.
+TEST(FullStackTest, Foreman_Cif_Link_250kbps_Delay100ms_10pkts_Loss1) {
+ auto fixture = CreateVideoQualityTestFixture();
+ ParamsWithLogging foreman_cif;
+ foreman_cif.call.send_side_bwe = true;
+ foreman_cif.video[0] = {
+ true, 352, 288, 30,
+ 30000, 500000, 2000000, false,
+ "VP8", 1, 0, 0,
+ false, false, true, ClipNameToClipPath("foreman_cif"),
+ 0, {}, 1.30};
+ foreman_cif.analyzer = {"foreman_cif_link_250kbps_delay100ms_10pkts_loss1",
+ 0.0, 0.0, kFullStackTestDurationSecs};
+ foreman_cif.config->link_capacity_kbps = 250;
+ foreman_cif.config->queue_length_packets = 10;
+ foreman_cif.config->queue_delay_ms = 100;
+ foreman_cif.config->loss_percent = 1;
+ fixture->RunWithAnalyzer(foreman_cif);
+}
+
+TEST(GenericDescriptorTest, Foreman_Cif_Delay_50_0_Plr_5_Generic_Descriptor) {
+ auto fixture = CreateVideoQualityTestFixture();
+ ParamsWithLogging foreman_cif;
+ foreman_cif.call.send_side_bwe = true;
+ foreman_cif.video[0] = {
+ true, 352, 288, 30,
+ 30000, 500000, 2000000, false,
+ "VP8", 1, 0, 0,
+ false, false, true, ClipNameToClipPath("foreman_cif")};
+ foreman_cif.analyzer = {"foreman_cif_delay_50_0_plr_5_generic_descriptor",
+ 0.0, 0.0, kFullStackTestDurationSecs};
+ foreman_cif.config->loss_percent = 5;
+ foreman_cif.config->queue_delay_ms = 50;
+ foreman_cif.call.generic_descriptor = true;
+ fixture->RunWithAnalyzer(foreman_cif);
+}
+
+TEST(GenericDescriptorTest,
+ Foreman_Cif_Delay_50_0_Plr_5_Ulpfec_Generic_Descriptor) {
+ auto fixture = CreateVideoQualityTestFixture();
+ ParamsWithLogging foreman_cif;
+ foreman_cif.call.send_side_bwe = true;
+ foreman_cif.video[0] = {
+ true, 352, 288, 30,
+ 30000, 500000, 2000000, false,
+ "VP8", 1, 0, 0,
+ true, false, true, ClipNameToClipPath("foreman_cif")};
+ foreman_cif.analyzer = {
+ "foreman_cif_delay_50_0_plr_5_ulpfec_generic_descriptor", 0.0, 0.0,
+ kFullStackTestDurationSecs};
+ foreman_cif.config->loss_percent = 5;
+ foreman_cif.config->queue_delay_ms = 50;
+ foreman_cif.call.generic_descriptor = true;
+ fixture->RunWithAnalyzer(foreman_cif);
+}
+
+TEST(FullStackTest, Foreman_Cif_Delay_50_0_Plr_5_Flexfec) {
+ auto fixture = CreateVideoQualityTestFixture();
+ ParamsWithLogging foreman_cif;
+ foreman_cif.call.send_side_bwe = true;
+ foreman_cif.video[0] = {
+ true, 352, 288, 30,
+ 30000, 500000, 2000000, false,
+ "VP8", 1, 0, 0,
+ false, true, true, ClipNameToClipPath("foreman_cif")};
+ foreman_cif.analyzer = {"foreman_cif_delay_50_0_plr_5_flexfec", 0.0, 0.0,
+ kFullStackTestDurationSecs};
+ foreman_cif.config->loss_percent = 5;
+ foreman_cif.config->queue_delay_ms = 50;
+ fixture->RunWithAnalyzer(foreman_cif);
+}
+
+TEST(FullStackTest, Foreman_Cif_500kbps_Delay_50_0_Plr_3_Flexfec) {
+ auto fixture = CreateVideoQualityTestFixture();
+ ParamsWithLogging foreman_cif;
+ foreman_cif.call.send_side_bwe = true;
+ foreman_cif.video[0] = {
+ true, 352, 288, 30,
+ 30000, 500000, 2000000, false,
+ "VP8", 1, 0, 0,
+ false, true, true, ClipNameToClipPath("foreman_cif")};
+ foreman_cif.analyzer = {"foreman_cif_500kbps_delay_50_0_plr_3_flexfec", 0.0,
+ 0.0, kFullStackTestDurationSecs};
+ foreman_cif.config->loss_percent = 3;
+ foreman_cif.config->link_capacity_kbps = 500;
+ foreman_cif.config->queue_delay_ms = 50;
+ fixture->RunWithAnalyzer(foreman_cif);
+}
+
+TEST(FullStackTest, Foreman_Cif_500kbps_Delay_50_0_Plr_3_Ulpfec) {
+ auto fixture = CreateVideoQualityTestFixture();
+ ParamsWithLogging foreman_cif;
+ foreman_cif.call.send_side_bwe = true;
+ foreman_cif.video[0] = {
+ true, 352, 288, 30,
+ 30000, 500000, 2000000, false,
+ "VP8", 1, 0, 0,
+ true, false, true, ClipNameToClipPath("foreman_cif")};
+ foreman_cif.analyzer = {"foreman_cif_500kbps_delay_50_0_plr_3_ulpfec", 0.0,
+ 0.0, kFullStackTestDurationSecs};
+ foreman_cif.config->loss_percent = 3;
+ foreman_cif.config->link_capacity_kbps = 500;
+ foreman_cif.config->queue_delay_ms = 50;
+ fixture->RunWithAnalyzer(foreman_cif);
+}
+
+#if defined(WEBRTC_USE_H264)
+TEST(FullStackTest, Foreman_Cif_Net_Delay_0_0_Plr_0_H264) {
+ auto fixture = CreateVideoQualityTestFixture();
+ // TODO(pbos): Decide on psnr/ssim thresholds for foreman_cif.
+ ParamsWithLogging foreman_cif;
+ foreman_cif.call.send_side_bwe = true;
+ foreman_cif.video[0] = {
+ true, 352, 288, 30,
+ 700000, 700000, 700000, false,
+ "H264", 1, 0, 0,
+ false, false, true, ClipNameToClipPath("foreman_cif")};
+ foreman_cif.analyzer = {"foreman_cif_net_delay_0_0_plr_0_H264", 0.0, 0.0,
+ kFullStackTestDurationSecs};
+ fixture->RunWithAnalyzer(foreman_cif);
+}
+
+TEST(FullStackTest, Foreman_Cif_30kbps_Net_Delay_0_0_Plr_0_H264) {
+ auto fixture = CreateVideoQualityTestFixture();
+ ParamsWithLogging foreman_cif;
+ foreman_cif.call.send_side_bwe = true;
+ foreman_cif.video[0] = {
+ true, 352, 288, 10,
+ 30000, 30000, 30000, false,
+ "H264", 1, 0, 0,
+ false, false, true, ClipNameToClipPath("foreman_cif")};
+ foreman_cif.analyzer = {"foreman_cif_30kbps_net_delay_0_0_plr_0_H264", 0.0,
+ 0.0, kFullStackTestDurationSecs};
+ fixture->RunWithAnalyzer(foreman_cif);
+}
+
+TEST(GenericDescriptorTest,
+ Foreman_Cif_Delay_50_0_Plr_5_H264_Generic_Descriptor) {
+ auto fixture = CreateVideoQualityTestFixture();
+ ParamsWithLogging foreman_cif;
+ foreman_cif.call.send_side_bwe = true;
+ foreman_cif.video[0] = {
+ true, 352, 288, 30,
+ 30000, 500000, 2000000, false,
+ "H264", 1, 0, 0,
+ false, false, true, ClipNameToClipPath("foreman_cif")};
+ foreman_cif.analyzer = {
+ "foreman_cif_delay_50_0_plr_5_H264_generic_descriptor", 0.0, 0.0,
+ kFullStackTestDurationSecs};
+ foreman_cif.config->loss_percent = 5;
+ foreman_cif.config->queue_delay_ms = 50;
+ foreman_cif.call.generic_descriptor = true;
+ fixture->RunWithAnalyzer(foreman_cif);
+}
+
+TEST(FullStackTest, Foreman_Cif_Delay_50_0_Plr_5_H264_Sps_Pps_Idr) {
+ test::ScopedFieldTrials override_field_trials(
+ AppendFieldTrials("WebRTC-SpsPpsIdrIsH264Keyframe/Enabled/"));
+ auto fixture = CreateVideoQualityTestFixture();
+
+ ParamsWithLogging foreman_cif;
+ foreman_cif.call.send_side_bwe = true;
+ foreman_cif.video[0] = {
+ true, 352, 288, 30,
+ 30000, 500000, 2000000, false,
+ "H264", 1, 0, 0,
+ false, false, true, ClipNameToClipPath("foreman_cif")};
+ foreman_cif.analyzer = {"foreman_cif_delay_50_0_plr_5_H264_sps_pps_idr", 0.0,
+ 0.0, kFullStackTestDurationSecs};
+ foreman_cif.config->loss_percent = 5;
+ foreman_cif.config->queue_delay_ms = 50;
+ fixture->RunWithAnalyzer(foreman_cif);
+}
+
+// Verify that this is worth the bot time, before enabling.
+TEST(FullStackTest, Foreman_Cif_Delay_50_0_Plr_5_H264_Flexfec) {
+ auto fixture = CreateVideoQualityTestFixture();
+ ParamsWithLogging foreman_cif;
+ foreman_cif.call.send_side_bwe = true;
+ foreman_cif.video[0] = {
+ true, 352, 288, 30,
+ 30000, 500000, 2000000, false,
+ "H264", 1, 0, 0,
+ false, true, true, ClipNameToClipPath("foreman_cif")};
+ foreman_cif.analyzer = {"foreman_cif_delay_50_0_plr_5_H264_flexfec", 0.0, 0.0,
+ kFullStackTestDurationSecs};
+ foreman_cif.config->loss_percent = 5;
+ foreman_cif.config->queue_delay_ms = 50;
+ fixture->RunWithAnalyzer(foreman_cif);
+}
+
+// Ulpfec with H264 is an unsupported combination, so this test is only useful
+// for debugging. It is therefore disabled by default.
+TEST(FullStackTest, DISABLED_Foreman_Cif_Delay_50_0_Plr_5_H264_Ulpfec) {
+ auto fixture = CreateVideoQualityTestFixture();
+ ParamsWithLogging foreman_cif;
+ foreman_cif.call.send_side_bwe = true;
+ foreman_cif.video[0] = {
+ true, 352, 288, 30,
+ 30000, 500000, 2000000, false,
+ "H264", 1, 0, 0,
+ true, false, true, ClipNameToClipPath("foreman_cif")};
+ foreman_cif.analyzer = {"foreman_cif_delay_50_0_plr_5_H264_ulpfec", 0.0, 0.0,
+ kFullStackTestDurationSecs};
+ foreman_cif.config->loss_percent = 5;
+ foreman_cif.config->queue_delay_ms = 50;
+ fixture->RunWithAnalyzer(foreman_cif);
+}
+#endif // defined(WEBRTC_USE_H264)
+
+TEST(FullStackTest, Foreman_Cif_500kbps) {
+ auto fixture = CreateVideoQualityTestFixture();
+ ParamsWithLogging foreman_cif;
+ foreman_cif.call.send_side_bwe = true;
+ foreman_cif.video[0] = {
+ true, 352, 288, 30,
+ 30000, 500000, 2000000, false,
+ "VP8", 1, 0, 0,
+ false, false, true, ClipNameToClipPath("foreman_cif")};
+ foreman_cif.analyzer = {"foreman_cif_500kbps", 0.0, 0.0,
+ kFullStackTestDurationSecs};
+ foreman_cif.config->queue_length_packets = 0;
+ foreman_cif.config->queue_delay_ms = 0;
+ foreman_cif.config->link_capacity_kbps = 500;
+ fixture->RunWithAnalyzer(foreman_cif);
+}
+
+TEST(FullStackTest, Foreman_Cif_500kbps_32pkts_Queue) {
+ auto fixture = CreateVideoQualityTestFixture();
+ ParamsWithLogging foreman_cif;
+ foreman_cif.call.send_side_bwe = true;
+ foreman_cif.video[0] = {
+ true, 352, 288, 30,
+ 30000, 500000, 2000000, false,
+ "VP8", 1, 0, 0,
+ false, false, true, ClipNameToClipPath("foreman_cif")};
+ foreman_cif.analyzer = {"foreman_cif_500kbps_32pkts_queue", 0.0, 0.0,
+ kFullStackTestDurationSecs};
+ foreman_cif.config->queue_length_packets = 32;
+ foreman_cif.config->queue_delay_ms = 0;
+ foreman_cif.config->link_capacity_kbps = 500;
+ fixture->RunWithAnalyzer(foreman_cif);
+}
+
+TEST(FullStackTest, Foreman_Cif_500kbps_100ms) {
+ auto fixture = CreateVideoQualityTestFixture();
+ ParamsWithLogging foreman_cif;
+ foreman_cif.call.send_side_bwe = true;
+ foreman_cif.video[0] = {
+ true, 352, 288, 30,
+ 30000, 500000, 2000000, false,
+ "VP8", 1, 0, 0,
+ false, false, true, ClipNameToClipPath("foreman_cif")};
+ foreman_cif.analyzer = {"foreman_cif_500kbps_100ms", 0.0, 0.0,
+ kFullStackTestDurationSecs};
+ foreman_cif.config->queue_length_packets = 0;
+ foreman_cif.config->queue_delay_ms = 100;
+ foreman_cif.config->link_capacity_kbps = 500;
+ fixture->RunWithAnalyzer(foreman_cif);
+}
+
+TEST(GenericDescriptorTest,
+ Foreman_Cif_500kbps_100ms_32pkts_Queue_Generic_Descriptor) {
+ auto fixture = CreateVideoQualityTestFixture();
+ ParamsWithLogging foreman_cif;
+ foreman_cif.call.send_side_bwe = true;
+ foreman_cif.video[0] = {
+ true, 352, 288, 30,
+ 30000, 500000, 2000000, false,
+ "VP8", 1, 0, 0,
+ false, false, true, ClipNameToClipPath("foreman_cif")};
+ foreman_cif.analyzer = {
+ "foreman_cif_500kbps_100ms_32pkts_queue_generic_descriptor", 0.0, 0.0,
+ kFullStackTestDurationSecs};
+ foreman_cif.config->queue_length_packets = 32;
+ foreman_cif.config->queue_delay_ms = 100;
+ foreman_cif.config->link_capacity_kbps = 500;
+ foreman_cif.call.generic_descriptor = true;
+ fixture->RunWithAnalyzer(foreman_cif);
+}
+
+TEST(FullStackTest, Foreman_Cif_500kbps_100ms_32pkts_Queue_Recv_Bwe) {
+ auto fixture = CreateVideoQualityTestFixture();
+ ParamsWithLogging foreman_cif;
+ foreman_cif.call.send_side_bwe = false;
+ foreman_cif.video[0] = {
+ true, 352, 288, 30,
+ 30000, 500000, 2000000, false,
+ "VP8", 1, 0, 0,
+ false, false, true, ClipNameToClipPath("foreman_cif")};
+ foreman_cif.analyzer = {"foreman_cif_500kbps_100ms_32pkts_queue_recv_bwe",
+ 0.0, 0.0, kFullStackTestDurationSecs};
+ foreman_cif.config->queue_length_packets = 32;
+ foreman_cif.config->queue_delay_ms = 100;
+ foreman_cif.config->link_capacity_kbps = 500;
+ fixture->RunWithAnalyzer(foreman_cif);
+}
+
+TEST(FullStackTest, Foreman_Cif_1000kbps_100ms_32pkts_Queue) {
+ auto fixture = CreateVideoQualityTestFixture();
+ ParamsWithLogging foreman_cif;
+ foreman_cif.call.send_side_bwe = true;
+ foreman_cif.video[0] = {
+ true, 352, 288, 30,
+ 30000, 2000000, 2000000, false,
+ "VP8", 1, 0, 0,
+ false, false, true, ClipNameToClipPath("foreman_cif")};
+ foreman_cif.analyzer = {"foreman_cif_1000kbps_100ms_32pkts_queue", 0.0, 0.0,
+ kFullStackTestDurationSecs};
+ foreman_cif.config->queue_length_packets = 32;
+ foreman_cif.config->queue_delay_ms = 100;
+ foreman_cif.config->link_capacity_kbps = 1000;
+ fixture->RunWithAnalyzer(foreman_cif);
+}
+
+// TODO(sprang): Remove this if we have the similar ModerateLimits below?
+TEST(FullStackTest, Conference_Motion_Hd_2000kbps_100ms_32pkts_Queue) {
+ auto fixture = CreateVideoQualityTestFixture();
+ ParamsWithLogging conf_motion_hd;
+ conf_motion_hd.call.send_side_bwe = true;
+ conf_motion_hd.video[0] = {
+ true, 1280,
+ 720, 50,
+ 30000, 3000000,
+ 3000000, false,
+ "VP8", 1,
+ 0, 0,
+ false, false,
+ false, ClipNameToClipPath("ConferenceMotion_1280_720_50")};
+ conf_motion_hd.analyzer = {"conference_motion_hd_2000kbps_100ms_32pkts_queue",
+ 0.0, 0.0, kFullStackTestDurationSecs};
+ conf_motion_hd.config->queue_length_packets = 32;
+ conf_motion_hd.config->queue_delay_ms = 100;
+ conf_motion_hd.config->link_capacity_kbps = 2000;
+ fixture->RunWithAnalyzer(conf_motion_hd);
+}
+
+TEST(GenericDescriptorTest,
+ Conference_Motion_Hd_2tl_Moderate_Limits_Generic_Descriptor) {
+ auto fixture = CreateVideoQualityTestFixture();
+ ParamsWithLogging conf_motion_hd;
+ conf_motion_hd.call.send_side_bwe = true;
+ conf_motion_hd.video[0] = {
+ true, 1280,
+ 720, 50,
+ 30000, 3000000,
+ 3000000, false,
+ "VP8", 2,
+ -1, 0,
+ false, false,
+ false, ClipNameToClipPath("ConferenceMotion_1280_720_50")};
+ conf_motion_hd.analyzer = {
+ "conference_motion_hd_2tl_moderate_limits_generic_descriptor", 0.0, 0.0,
+ kFullStackTestDurationSecs};
+ conf_motion_hd.config->queue_length_packets = 50;
+ conf_motion_hd.config->loss_percent = 3;
+ conf_motion_hd.config->queue_delay_ms = 100;
+ conf_motion_hd.config->link_capacity_kbps = 2000;
+ conf_motion_hd.call.generic_descriptor = true;
+ fixture->RunWithAnalyzer(conf_motion_hd);
+}
+
+TEST(FullStackTest, Conference_Motion_Hd_3tl_Moderate_Limits) {
+ auto fixture = CreateVideoQualityTestFixture();
+ ParamsWithLogging conf_motion_hd;
+ conf_motion_hd.call.send_side_bwe = true;
+ conf_motion_hd.video[0] = {
+ true, 1280,
+ 720, 50,
+ 30000, 3000000,
+ 3000000, false,
+ "VP8", 3,
+ -1, 0,
+ false, false,
+ false, ClipNameToClipPath("ConferenceMotion_1280_720_50")};
+ conf_motion_hd.analyzer = {"conference_motion_hd_3tl_moderate_limits", 0.0,
+ 0.0, kFullStackTestDurationSecs};
+ conf_motion_hd.config->queue_length_packets = 50;
+ conf_motion_hd.config->loss_percent = 3;
+ conf_motion_hd.config->queue_delay_ms = 100;
+ conf_motion_hd.config->link_capacity_kbps = 2000;
+ fixture->RunWithAnalyzer(conf_motion_hd);
+}
+
+TEST(FullStackTest, Conference_Motion_Hd_4tl_Moderate_Limits) {
+ auto fixture = CreateVideoQualityTestFixture();
+ ParamsWithLogging conf_motion_hd;
+ conf_motion_hd.call.send_side_bwe = true;
+ conf_motion_hd.video[0] = {
+ true, 1280,
+ 720, 50,
+ 30000, 3000000,
+ 3000000, false,
+ "VP8", 4,
+ -1, 0,
+ false, false,
+ false, ClipNameToClipPath("ConferenceMotion_1280_720_50")};
+ conf_motion_hd.analyzer = {"conference_motion_hd_4tl_moderate_limits", 0.0,
+ 0.0, kFullStackTestDurationSecs};
+ conf_motion_hd.config->queue_length_packets = 50;
+ conf_motion_hd.config->loss_percent = 3;
+ conf_motion_hd.config->queue_delay_ms = 100;
+ conf_motion_hd.config->link_capacity_kbps = 2000;
+ fixture->RunWithAnalyzer(conf_motion_hd);
+}
+
+TEST(FullStackTest, Conference_Motion_Hd_3tl_Alt_Moderate_Limits) {
+ test::ScopedFieldTrials field_trial(
+ AppendFieldTrials("WebRTC-UseShortVP8TL3Pattern/Enabled/"));
+ auto fixture = CreateVideoQualityTestFixture();
+ ParamsWithLogging conf_motion_hd;
+ conf_motion_hd.call.send_side_bwe = true;
+ conf_motion_hd.video[0] = {
+ true, 1280,
+ 720, 50,
+ 30000, 3000000,
+ 3000000, false,
+ "VP8", 3,
+ -1, 0,
+ false, false,
+ false, ClipNameToClipPath("ConferenceMotion_1280_720_50")};
+ conf_motion_hd.analyzer = {"conference_motion_hd_3tl_alt_moderate_limits",
+ 0.0, 0.0, kFullStackTestDurationSecs};
+ conf_motion_hd.config->queue_length_packets = 50;
+ conf_motion_hd.config->loss_percent = 3;
+ conf_motion_hd.config->queue_delay_ms = 100;
+ conf_motion_hd.config->link_capacity_kbps = 2000;
+ fixture->RunWithAnalyzer(conf_motion_hd);
+}
+
+TEST(FullStackTest, Conference_Motion_Hd_3tl_Alt_Heavy_Moderate_Limits) {
+ auto fixture = CreateVideoQualityTestFixture();
+ test::ScopedFieldTrials field_trial(
+ AppendFieldTrials("WebRTC-UseShortVP8TL3Pattern/Enabled/"
+ "WebRTC-UseBaseHeavyVP8TL3RateAllocation/Enabled/"));
+ ParamsWithLogging conf_motion_hd;
+ conf_motion_hd.call.send_side_bwe = true;
+ conf_motion_hd.video[0] = {
+ true, 1280,
+ 720, 50,
+ 30000, 3000000,
+ 3000000, false,
+ "VP8", 3,
+ -1, 0,
+ false, false,
+ false, ClipNameToClipPath("ConferenceMotion_1280_720_50")};
+ conf_motion_hd.analyzer = {
+ "conference_motion_hd_3tl_alt_heavy_moderate_limits", 0.0, 0.0,
+ kFullStackTestDurationSecs};
+ conf_motion_hd.config->queue_length_packets = 50;
+ conf_motion_hd.config->loss_percent = 3;
+ conf_motion_hd.config->queue_delay_ms = 100;
+ conf_motion_hd.config->link_capacity_kbps = 2000;
+ fixture->RunWithAnalyzer(conf_motion_hd);
+}
+
+#if defined(RTC_ENABLE_VP9)
+TEST(FullStackTest, Conference_Motion_Hd_2000kbps_100ms_32pkts_Queue_Vp9) {
+ auto fixture = CreateVideoQualityTestFixture();
+ ParamsWithLogging conf_motion_hd;
+ conf_motion_hd.call.send_side_bwe = true;
+ conf_motion_hd.video[0] = {
+ true, 1280,
+ 720, 50,
+ 30000, 3000000,
+ 3000000, false,
+ "VP9", 1,
+ 0, 0,
+ false, false,
+ false, ClipNameToClipPath("ConferenceMotion_1280_720_50")};
+ conf_motion_hd.analyzer = {
+ "conference_motion_hd_2000kbps_100ms_32pkts_queue_vp9", 0.0, 0.0,
+ kFullStackTestDurationSecs};
+ conf_motion_hd.config->queue_length_packets = 32;
+ conf_motion_hd.config->queue_delay_ms = 100;
+ conf_motion_hd.config->link_capacity_kbps = 2000;
+ fixture->RunWithAnalyzer(conf_motion_hd);
+}
+#endif
+
+TEST(FullStackTest, Screenshare_Slides) {
+ auto fixture = CreateVideoQualityTestFixture();
+ ParamsWithLogging screenshare;
+ screenshare.call.send_side_bwe = true;
+ screenshare.video[0] = {true, 1850, 1110, 5, 50000, 200000,
+ 1000000, false, "VP8", 2, 1, 400000,
+ false, false, false, ""};
+ screenshare.screenshare[0] = {true, false, 10};
+ screenshare.analyzer = {"screenshare_slides", 0.0, 0.0,
+ kFullStackTestDurationSecs};
+ fixture->RunWithAnalyzer(screenshare);
+}
+
+#if !defined(WEBRTC_MAC) && !defined(WEBRTC_WIN)
+// TODO(bugs.webrtc.org/9840): Investigate why is this test flaky on Win/Mac.
+TEST(FullStackTest, Screenshare_Slides_Simulcast) {
+ auto fixture = CreateVideoQualityTestFixture();
+ ParamsWithLogging screenshare;
+ screenshare.call.send_side_bwe = true;
+ screenshare.screenshare[0] = {true, false, 10};
+ screenshare.video[0] = {true, 1850, 1110, 30, 800000, 2500000,
+ 2500000, false, "VP8", 2, 1, 400000,
+ false, false, false, ""};
+ screenshare.analyzer = {"screenshare_slides_simulcast", 0.0, 0.0,
+ kFullStackTestDurationSecs};
+ ParamsWithLogging screenshare_params_high;
+ screenshare_params_high.video[0] = {
+ true, 1850, 1110, 60, 600000, 1250000, 1250000, false,
+ "VP8", 2, 0, 400000, false, false, false, ""};
+ VideoQualityTest::Params screenshare_params_low;
+ screenshare_params_low.video[0] = {true, 1850, 1110, 5, 30000, 200000,
+ 1000000, false, "VP8", 2, 0, 400000,
+ false, false, false, ""};
+
+ std::vector<VideoStream> streams = {
+ VideoQualityTest::DefaultVideoStream(screenshare_params_low, 0),
+ VideoQualityTest::DefaultVideoStream(screenshare_params_high, 0)};
+ screenshare.ss[0] = {
+ streams, 1, 1, 0, InterLayerPredMode::kOn, std::vector<SpatialLayer>(),
+ false};
+ fixture->RunWithAnalyzer(screenshare);
+}
+
+#endif // !defined(WEBRTC_MAC) && !defined(WEBRTC_WIN)
+
+TEST(FullStackTest, Screenshare_Slides_Scrolling) {
+ auto fixture = CreateVideoQualityTestFixture();
+ ParamsWithLogging config;
+ config.call.send_side_bwe = true;
+ config.video[0] = {true, 1850, 1110 / 2, 5, 50000, 200000,
+ 1000000, false, "VP8", 2, 1, 400000,
+ false, false, false, ""};
+ config.screenshare[0] = {true, false, 10, 2};
+ config.analyzer = {"screenshare_slides_scrolling", 0.0, 0.0,
+ kFullStackTestDurationSecs};
+ fixture->RunWithAnalyzer(config);
+}
+
+TEST(GenericDescriptorTest, Screenshare_Slides_Lossy_Net_Generic_Descriptor) {
+ auto fixture = CreateVideoQualityTestFixture();
+ ParamsWithLogging screenshare;
+ screenshare.call.send_side_bwe = true;
+ screenshare.video[0] = {true, 1850, 1110, 5, 50000, 200000,
+ 1000000, false, "VP8", 2, 1, 400000,
+ false, false, false, ""};
+ screenshare.screenshare[0] = {true, false, 10};
+ screenshare.analyzer = {"screenshare_slides_lossy_net_generic_descriptor",
+ 0.0, 0.0, kFullStackTestDurationSecs};
+ screenshare.config->loss_percent = 5;
+ screenshare.config->queue_delay_ms = 200;
+ screenshare.config->link_capacity_kbps = 500;
+ screenshare.call.generic_descriptor = true;
+ fixture->RunWithAnalyzer(screenshare);
+}
+
+TEST(FullStackTest, Screenshare_Slides_Very_Lossy) {
+ auto fixture = CreateVideoQualityTestFixture();
+ ParamsWithLogging screenshare;
+ screenshare.call.send_side_bwe = true;
+ screenshare.video[0] = {true, 1850, 1110, 5, 50000, 200000,
+ 1000000, false, "VP8", 2, 1, 400000,
+ false, false, false, ""};
+ screenshare.screenshare[0] = {true, false, 10};
+ screenshare.analyzer = {"screenshare_slides_very_lossy", 0.0, 0.0,
+ kFullStackTestDurationSecs};
+ screenshare.config->loss_percent = 10;
+ screenshare.config->queue_delay_ms = 200;
+ screenshare.config->link_capacity_kbps = 500;
+ fixture->RunWithAnalyzer(screenshare);
+}
+
+TEST(FullStackTest, Screenshare_Slides_Lossy_Limited) {
+ auto fixture = CreateVideoQualityTestFixture();
+ ParamsWithLogging screenshare;
+ screenshare.call.send_side_bwe = true;
+ screenshare.video[0] = {true, 1850, 1110, 5, 50000, 200000,
+ 1000000, false, "VP8", 2, 1, 400000,
+ false, false, false, ""};
+ screenshare.screenshare[0] = {true, false, 10};
+ screenshare.analyzer = {"screenshare_slides_lossy_limited", 0.0, 0.0,
+ kFullStackTestDurationSecs};
+ screenshare.config->loss_percent = 5;
+ screenshare.config->link_capacity_kbps = 200;
+ screenshare.config->queue_length_packets = 30;
+
+ fixture->RunWithAnalyzer(screenshare);
+}
+
+TEST(FullStackTest, Screenshare_Slides_Moderately_Restricted) {
+ auto fixture = CreateVideoQualityTestFixture();
+ ParamsWithLogging screenshare;
+ screenshare.call.send_side_bwe = true;
+ screenshare.video[0] = {true, 1850, 1110, 5, 50000, 200000,
+ 1000000, false, "VP8", 2, 1, 400000,
+ false, false, false, ""};
+ screenshare.screenshare[0] = {true, false, 10};
+ screenshare.analyzer = {"screenshare_slides_moderately_restricted", 0.0, 0.0,
+ kFullStackTestDurationSecs};
+ screenshare.config->loss_percent = 1;
+ screenshare.config->link_capacity_kbps = 1200;
+ screenshare.config->queue_length_packets = 30;
+
+ fixture->RunWithAnalyzer(screenshare);
+}
+
+// Since ParamsWithLogging::Video is not trivially destructible, we can't
+// store these structs as const globals.
+ParamsWithLogging::Video SvcVp9Video() {
+ return ParamsWithLogging::Video{
+ true, 1280,
+ 720, 30,
+ 800000, 2500000,
+ 2500000, false,
+ "VP9", 3,
+ 2, 400000,
+ false, false,
+ false, ClipNameToClipPath("ConferenceMotion_1280_720_50")};
+}
+
+ParamsWithLogging::Video SimulcastVp8VideoHigh() {
+ return ParamsWithLogging::Video{
+ true, 1280,
+ 720, 30,
+ 800000, 2500000,
+ 2500000, false,
+ "VP8", 3,
+ 2, 400000,
+ false, false,
+ false, ClipNameToClipPath("ConferenceMotion_1280_720_50")};
+}
+
+ParamsWithLogging::Video SimulcastVp8VideoMedium() {
+ return ParamsWithLogging::Video{
+ true, 640,
+ 360, 30,
+ 150000, 500000,
+ 700000, false,
+ "VP8", 3,
+ 2, 400000,
+ false, false,
+ false, ClipNameToClipPath("ConferenceMotion_1280_720_50")};
+}
+
+ParamsWithLogging::Video SimulcastVp8VideoLow() {
+ return ParamsWithLogging::Video{
+ true, 320,
+ 180, 30,
+ 30000, 150000,
+ 200000, false,
+ "VP8", 3,
+ 2, 400000,
+ false, false,
+ false, ClipNameToClipPath("ConferenceMotion_1280_720_50")};
+}
+
+#if defined(RTC_ENABLE_VP9)
+
+TEST(FullStackTest, Screenshare_Slides_Vp9_3sl_High_Fps) {
+ auto fixture = CreateVideoQualityTestFixture();
+ ParamsWithLogging screenshare;
+ screenshare.call.send_side_bwe = true;
+ screenshare.video[0] = {true, 1850, 1110, 30, 50000, 200000,
+ 2000000, false, "VP9", 1, 0, 400000,
+ false, false, false, ""};
+ screenshare.screenshare[0] = {true, false, 10};
+ screenshare.analyzer = {"screenshare_slides_vp9_3sl_high_fps", 0.0, 0.0,
+ kFullStackTestDurationSecs};
+ screenshare.ss[0] = {
+ std::vector<VideoStream>(), 0, 3, 2, InterLayerPredMode::kOn,
+ std::vector<SpatialLayer>(), true};
+ fixture->RunWithAnalyzer(screenshare);
+}
+
+// TODO(http://bugs.webrtc.org/9506): investigate.
+#if !defined(WEBRTC_MAC)
+
+TEST(FullStackTest, Vp9ksvc_3sl_High) {
+ webrtc::test::ScopedFieldTrials override_trials(
+ AppendFieldTrials("WebRTC-Vp9IssueKeyFrameOnLayerDeactivation/Enabled/"));
+ auto fixture = CreateVideoQualityTestFixture();
+ ParamsWithLogging simulcast;
+ simulcast.call.send_side_bwe = true;
+ simulcast.video[0] = SvcVp9Video();
+ simulcast.analyzer = {"vp9ksvc_3sl_high", 0.0, 0.0,
+ kFullStackTestDurationSecs};
+ simulcast.ss[0] = {
+ std::vector<VideoStream>(), 0, 3, 2, InterLayerPredMode::kOnKeyPic,
+ std::vector<SpatialLayer>(), false};
+ fixture->RunWithAnalyzer(simulcast);
+}
+
+TEST(FullStackTest, Vp9ksvc_3sl_Low) {
+ webrtc::test::ScopedFieldTrials override_trials(
+ AppendFieldTrials("WebRTC-Vp9IssueKeyFrameOnLayerDeactivation/Enabled/"));
+ auto fixture = CreateVideoQualityTestFixture();
+ ParamsWithLogging simulcast;
+ simulcast.call.send_side_bwe = true;
+ simulcast.video[0] = SvcVp9Video();
+ simulcast.analyzer = {"vp9ksvc_3sl_low", 0.0, 0.0,
+ kFullStackTestDurationSecs};
+ simulcast.ss[0] = {
+ std::vector<VideoStream>(), 0, 3, 0, InterLayerPredMode::kOnKeyPic,
+ std::vector<SpatialLayer>(), false};
+ fixture->RunWithAnalyzer(simulcast);
+}
+
+TEST(FullStackTest, Vp9ksvc_3sl_Low_Bw_Limited) {
+ webrtc::test::ScopedFieldTrials override_trials(
+ AppendFieldTrials("WebRTC-Vp9IssueKeyFrameOnLayerDeactivation/Enabled/"
+ "WebRTC-Vp9ExternalRefCtrl/Enabled/"));
+ auto fixture = CreateVideoQualityTestFixture();
+ ParamsWithLogging simulcast;
+ simulcast.config->link_capacity_kbps = 500;
+ simulcast.call.send_side_bwe = true;
+ simulcast.video[0] = SvcVp9Video();
+ simulcast.analyzer = {"vp9ksvc_3sl_low_bw_limited", 0.0, 0.0,
+ kFullStackTestDurationSecs};
+ simulcast.ss[0] = {
+ std::vector<VideoStream>(), 0, 3, 0, InterLayerPredMode::kOnKeyPic,
+ std::vector<SpatialLayer>(), false};
+ fixture->RunWithAnalyzer(simulcast);
+}
+
+TEST(FullStackTest, Vp9ksvc_3sl_Medium_Network_Restricted) {
+ webrtc::test::ScopedFieldTrials override_trials(
+ AppendFieldTrials("WebRTC-Vp9IssueKeyFrameOnLayerDeactivation/Enabled/"));
+ auto fixture = CreateVideoQualityTestFixture();
+ ParamsWithLogging simulcast;
+ simulcast.call.send_side_bwe = true;
+ simulcast.video[0] = SvcVp9Video();
+ simulcast.analyzer = {"vp9ksvc_3sl_medium_network_restricted", 0.0, 0.0,
+ kFullStackTestDurationSecs};
+ simulcast.ss[0] = {
+ std::vector<VideoStream>(), 0, 3, -1, InterLayerPredMode::kOnKeyPic,
+ std::vector<SpatialLayer>(), false};
+ simulcast.config->link_capacity_kbps = 1000;
+ simulcast.config->queue_delay_ms = 100;
+ fixture->RunWithAnalyzer(simulcast);
+}
+
+// TODO(webrtc:9722): Remove when experiment is cleaned up.
+TEST(FullStackTest, Vp9ksvc_3sl_Medium_Network_Restricted_Trusted_Rate) {
+ webrtc::test::ScopedFieldTrials override_trials(
+ AppendFieldTrials("WebRTC-Vp9IssueKeyFrameOnLayerDeactivation/Enabled/"));
+ auto fixture = CreateVideoQualityTestFixture();
+ ParamsWithLogging simulcast;
+ simulcast.call.send_side_bwe = true;
+ simulcast.video[0] = SvcVp9Video();
+ simulcast.analyzer = {"vp9ksvc_3sl_medium_network_restricted_trusted_rate",
+ 0.0, 0.0, kFullStackTestDurationSecs};
+ simulcast.ss[0] = {
+ std::vector<VideoStream>(), 0, 3, -1, InterLayerPredMode::kOnKeyPic,
+ std::vector<SpatialLayer>(), false};
+ simulcast.config->link_capacity_kbps = 1000;
+ simulcast.config->queue_delay_ms = 100;
+ fixture->RunWithAnalyzer(simulcast);
+}
+#endif // !defined(WEBRTC_MAC)
+
+#endif // defined(RTC_ENABLE_VP9)
+
+// Android bots can't handle FullHD, so disable the test.
+// TODO(bugs.webrtc.org/9220): Investigate source of flakiness on Mac.
+#if defined(WEBRTC_ANDROID) || defined(WEBRTC_MAC)
+#define MAYBE_Simulcast_HD_High DISABLED_Simulcast_HD_High
+#else
+#define MAYBE_Simulcast_HD_High Simulcast_HD_High
+#endif
+
+TEST(FullStackTest, MAYBE_Simulcast_HD_High) {
+ auto fixture = CreateVideoQualityTestFixture();
+ ParamsWithLogging simulcast;
+ simulcast.call.send_side_bwe = true;
+ simulcast.video[0] = {true, 1920, 1080, 30, 800000, 2500000,
+ 2500000, false, "VP8", 3, 2, 400000,
+ false, false, false, "Generator"};
+ simulcast.analyzer = {"simulcast_HD_high", 0.0, 0.0,
+ kFullStackTestDurationSecs};
+ simulcast.config->loss_percent = 0;
+ simulcast.config->queue_delay_ms = 100;
+ std::vector<VideoStream> streams = {
+ VideoQualityTest::DefaultVideoStream(simulcast, 0),
+ VideoQualityTest::DefaultVideoStream(simulcast, 0),
+ VideoQualityTest::DefaultVideoStream(simulcast, 0)};
+ simulcast.ss[0] = {
+ streams, 2, 1, 0, InterLayerPredMode::kOn, std::vector<SpatialLayer>(),
+ true};
+ webrtc::test::ScopedFieldTrials override_trials(AppendFieldTrials(
+ "WebRTC-ForceSimulatedOveruseIntervalMs/1000-50000-300/"));
+ fixture->RunWithAnalyzer(simulcast);
+}
+
+TEST(FullStackTest, Simulcast_Vp8_3sl_High) {
+ auto fixture = CreateVideoQualityTestFixture();
+ ParamsWithLogging simulcast;
+ simulcast.call.send_side_bwe = true;
+ simulcast.video[0] = SimulcastVp8VideoHigh();
+ simulcast.analyzer = {"simulcast_vp8_3sl_high", 0.0, 0.0,
+ kFullStackTestDurationSecs};
+ simulcast.config->loss_percent = 0;
+ simulcast.config->queue_delay_ms = 100;
+ ParamsWithLogging video_params_high;
+ video_params_high.video[0] = SimulcastVp8VideoHigh();
+ ParamsWithLogging video_params_medium;
+ video_params_medium.video[0] = SimulcastVp8VideoMedium();
+ ParamsWithLogging video_params_low;
+ video_params_low.video[0] = SimulcastVp8VideoLow();
+
+ std::vector<VideoStream> streams = {
+ VideoQualityTest::DefaultVideoStream(video_params_low, 0),
+ VideoQualityTest::DefaultVideoStream(video_params_medium, 0),
+ VideoQualityTest::DefaultVideoStream(video_params_high, 0)};
+ simulcast.ss[0] = {
+ streams, 2, 1, 0, InterLayerPredMode::kOn, std::vector<SpatialLayer>(),
+ false};
+ fixture->RunWithAnalyzer(simulcast);
+}
+
+TEST(FullStackTest, Simulcast_Vp8_3sl_Low) {
+ auto fixture = CreateVideoQualityTestFixture();
+ ParamsWithLogging simulcast;
+ simulcast.call.send_side_bwe = true;
+ simulcast.video[0] = SimulcastVp8VideoHigh();
+ simulcast.analyzer = {"simulcast_vp8_3sl_low", 0.0, 0.0,
+ kFullStackTestDurationSecs};
+ simulcast.config->loss_percent = 0;
+ simulcast.config->queue_delay_ms = 100;
+ ParamsWithLogging video_params_high;
+ video_params_high.video[0] = SimulcastVp8VideoHigh();
+ ParamsWithLogging video_params_medium;
+ video_params_medium.video[0] = SimulcastVp8VideoMedium();
+ ParamsWithLogging video_params_low;
+ video_params_low.video[0] = SimulcastVp8VideoLow();
+
+ std::vector<VideoStream> streams = {
+ VideoQualityTest::DefaultVideoStream(video_params_low, 0),
+ VideoQualityTest::DefaultVideoStream(video_params_medium, 0),
+ VideoQualityTest::DefaultVideoStream(video_params_high, 0)};
+ simulcast.ss[0] = {
+ streams, 0, 1, 0, InterLayerPredMode::kOn, std::vector<SpatialLayer>(),
+ false};
+ fixture->RunWithAnalyzer(simulcast);
+}
+
+// This test assumes ideal network conditions with target bandwidth being
+// available and exercises WebRTC calls with a high target bitrate(100 Mbps).
+// Android32 bots can't handle this high bitrate, so disable test for those.
+#if defined(WEBRTC_ANDROID)
+#define MAYBE_High_Bitrate_With_Fake_Codec DISABLED_High_Bitrate_With_Fake_Codec
+#else
+#define MAYBE_High_Bitrate_With_Fake_Codec High_Bitrate_With_Fake_Codec
+#endif // defined(WEBRTC_ANDROID)
+TEST(FullStackTest, MAYBE_High_Bitrate_With_Fake_Codec) {
+ auto fixture = CreateVideoQualityTestFixture();
+ const int target_bitrate = 100000000;
+ ParamsWithLogging generator;
+ generator.call.send_side_bwe = true;
+ generator.call.call_bitrate_config.min_bitrate_bps = target_bitrate;
+ generator.call.call_bitrate_config.start_bitrate_bps = target_bitrate;
+ generator.call.call_bitrate_config.max_bitrate_bps = target_bitrate;
+ generator.video[0] = {true,
+ 360,
+ 240,
+ 30,
+ target_bitrate / 2,
+ target_bitrate,
+ target_bitrate * 2,
+ false,
+ "FakeCodec",
+ 1,
+ 0,
+ 0,
+ false,
+ false,
+ false,
+ "Generator"};
+ generator.analyzer = {"high_bitrate_with_fake_codec", 0.0, 0.0,
+ kFullStackTestDurationSecs};
+ fixture->RunWithAnalyzer(generator);
+}
+
+#if defined(WEBRTC_ANDROID) || defined(WEBRTC_IOS)
+// Fails on mobile devices:
+// https://bugs.chromium.org/p/webrtc/issues/detail?id=7301
+#define MAYBE_Largeroom_50thumb DISABLED_Largeroom_50thumb
+#else
+#define MAYBE_Largeroom_50thumb Largeroom_50thumb
+#endif
+
+TEST(FullStackTest, MAYBE_Largeroom_50thumb) {
+ auto fixture = CreateVideoQualityTestFixture();
+ ParamsWithLogging large_room;
+ large_room.call.send_side_bwe = true;
+ large_room.video[0] = SimulcastVp8VideoHigh();
+ large_room.analyzer = {"largeroom_50thumb", 0.0, 0.0,
+ kFullStackTestDurationSecs};
+ large_room.config->loss_percent = 0;
+ large_room.config->queue_delay_ms = 100;
+ ParamsWithLogging video_params_high;
+ video_params_high.video[0] = SimulcastVp8VideoHigh();
+ ParamsWithLogging video_params_medium;
+ video_params_medium.video[0] = SimulcastVp8VideoMedium();
+ ParamsWithLogging video_params_low;
+ video_params_low.video[0] = SimulcastVp8VideoLow();
+
+ std::vector<VideoStream> streams = {
+ VideoQualityTest::DefaultVideoStream(video_params_low, 0),
+ VideoQualityTest::DefaultVideoStream(video_params_medium, 0),
+ VideoQualityTest::DefaultVideoStream(video_params_high, 0)};
+ large_room.call.num_thumbnails = 50;
+ large_room.ss[0] = {
+ streams, 2, 1, 0, InterLayerPredMode::kOn, std::vector<SpatialLayer>(),
+ false};
+ fixture->RunWithAnalyzer(large_room);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/video/full_stack_tests_plot.py b/third_party/libwebrtc/video/full_stack_tests_plot.py
new file mode 100755
index 0000000000..c195b72a54
--- /dev/null
+++ b/third_party/libwebrtc/video/full_stack_tests_plot.py
@@ -0,0 +1,469 @@
+#!/usr/bin/env python
+# Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+"""Generate graphs for data generated by loopback tests.
+
+Usage examples:
+ Show end to end time for a single full stack test.
+ ./full_stack_tests_plot.py -df end_to_end -o 600 --frames 1000 vp9_data.txt
+
+ Show simultaneously PSNR and encoded frame size for two different runs of
+ full stack test. Averaged over a cycle of 200 frames. Used e.g. for
+ screenshare slide test.
+ ./full_stack_tests_plot.py -c 200 -df psnr -drf encoded_frame_size \\
+ before.txt after.txt
+
+ Similar to the previous test, but multiple graphs.
+ ./full_stack_tests_plot.py -c 200 -df psnr vp8.txt vp9.txt --next \\
+ -c 200 -df sender_time vp8.txt vp9.txt --next \\
+ -c 200 -df end_to_end vp8.txt vp9.txt
+"""
+
+import argparse
+from collections import defaultdict
+import itertools
+import sys
+import matplotlib.pyplot as plt
+import numpy
+
+# Fields
+DROPPED = 0
+INPUT_TIME = 1 # ms (timestamp)
+SEND_TIME = 2 # ms (timestamp)
+RECV_TIME = 3 # ms (timestamp)
+RENDER_TIME = 4 # ms (timestamp)
+ENCODED_FRAME_SIZE = 5 # bytes
+PSNR = 6
+SSIM = 7
+ENCODE_TIME = 8 # ms (time interval)
+
+TOTAL_RAW_FIELDS = 9
+
+SENDER_TIME = TOTAL_RAW_FIELDS + 0
+RECEIVER_TIME = TOTAL_RAW_FIELDS + 1
+END_TO_END = TOTAL_RAW_FIELDS + 2
+RENDERED_DELTA = TOTAL_RAW_FIELDS + 3
+
+FIELD_MASK = 255
+
+# Options
+HIDE_DROPPED = 256
+RIGHT_Y_AXIS = 512
+
+# internal field id, field name, title
+_FIELDS = [
+ # Raw
+ (DROPPED, "dropped", "dropped"),
+ (INPUT_TIME, "input_time_ms", "input time"),
+ (SEND_TIME, "send_time_ms", "send time"),
+ (RECV_TIME, "recv_time_ms", "recv time"),
+ (ENCODED_FRAME_SIZE, "encoded_frame_size", "encoded frame size"),
+ (PSNR, "psnr", "PSNR"),
+ (SSIM, "ssim", "SSIM"),
+ (RENDER_TIME, "render_time_ms", "render time"),
+ (ENCODE_TIME, "encode_time_ms", "encode time"),
+ # Auto-generated
+ (SENDER_TIME, "sender_time", "sender time"),
+ (RECEIVER_TIME, "receiver_time", "receiver time"),
+ (END_TO_END, "end_to_end", "end to end"),
+ (RENDERED_DELTA, "rendered_delta", "rendered delta"),
+]
+
+NAME_TO_ID = {field[1]: field[0] for field in _FIELDS}
+ID_TO_TITLE = {field[0]: field[2] for field in _FIELDS}
+
+
+def FieldArgToId(arg):
+ if arg == "none":
+ return None
+ if arg in NAME_TO_ID:
+ return NAME_TO_ID[arg]
+ if arg + "_ms" in NAME_TO_ID:
+ return NAME_TO_ID[arg + "_ms"]
+ raise Exception("Unrecognized field name \"{}\"".format(arg))
+
+
+class PlotLine(object):
+ """Data for a single graph line."""
+
+ def __init__(self, label, values, flags):
+ self.label = label
+ self.values = values
+ self.flags = flags
+
+
+class Data(object):
+ """Object representing one full stack test."""
+
+ def __init__(self, filename):
+ self.title = ""
+ self.length = 0
+ self.samples = defaultdict(list)
+
+ self._ReadSamples(filename)
+
+ def _ReadSamples(self, filename):
+ """Reads graph data from the given file."""
+ f = open(filename)
+ it = iter(f)
+
+ self.title = it.next().strip()
+ self.length = int(it.next())
+ field_names = [name.strip() for name in it.next().split()]
+ field_ids = [NAME_TO_ID[name] for name in field_names]
+
+ for field_id in field_ids:
+ self.samples[field_id] = [0.0] * self.length
+
+ for sample_id in xrange(self.length):
+ for col, value in enumerate(it.next().split()):
+ self.samples[field_ids[col]][sample_id] = float(value)
+
+ self._SubtractFirstInputTime()
+ self._GenerateAdditionalData()
+
+ f.close()
+
+ def _SubtractFirstInputTime(self):
+ offset = self.samples[INPUT_TIME][0]
+ for field in [INPUT_TIME, SEND_TIME, RECV_TIME, RENDER_TIME]:
+ if field in self.samples:
+ self.samples[field] = [x - offset for x in self.samples[field]]
+
+ def _GenerateAdditionalData(self):
+ """Calculates sender time, receiver time etc. from the raw data."""
+ s = self.samples
+ last_render_time = 0
+ for field_id in [
+ SENDER_TIME, RECEIVER_TIME, END_TO_END, RENDERED_DELTA
+ ]:
+ s[field_id] = [0] * self.length
+
+ for k in range(self.length):
+ s[SENDER_TIME][k] = s[SEND_TIME][k] - s[INPUT_TIME][k]
+
+ decoded_time = s[RENDER_TIME][k]
+ s[RECEIVER_TIME][k] = decoded_time - s[RECV_TIME][k]
+ s[END_TO_END][k] = decoded_time - s[INPUT_TIME][k]
+ if not s[DROPPED][k]:
+ if k > 0:
+ s[RENDERED_DELTA][k] = decoded_time - last_render_time
+ last_render_time = decoded_time
+
+ def _Hide(self, values):
+ """
+ Replaces values for dropped frames with None.
+ These values are then skipped by the Plot() method.
+ """
+
+ return [
+ None if self.samples[DROPPED][k] else values[k]
+ for k in range(len(values))
+ ]
+
+ def AddSamples(self, config, target_lines_list):
+ """Creates graph lines from the current data set with given config."""
+ for field in config.fields:
+ # field is None means the user wants just to skip the color.
+ if field is None:
+ target_lines_list.append(None)
+ continue
+
+ field_id = field & FIELD_MASK
+ values = self.samples[field_id]
+
+ if field & HIDE_DROPPED:
+ values = self._Hide(values)
+
+ target_lines_list.append(
+ PlotLine(self.title + " " + ID_TO_TITLE[field_id], values,
+ field & ~FIELD_MASK))
+
+
+def AverageOverCycle(values, length):
+ """
+ Returns the list:
+ [
+ avg(values[0], values[length], ...),
+ avg(values[1], values[length + 1], ...),
+ ...
+ avg(values[length - 1], values[2 * length - 1], ...),
+ ]
+
+ Skips None values when calculating the average value.
+ """
+
+ total = [0.0] * length
+ count = [0] * length
+ for k, val in enumerate(values):
+ if val is not None:
+ total[k % length] += val
+ count[k % length] += 1
+
+ result = [0.0] * length
+ for k in range(length):
+ result[k] = total[k] / count[k] if count[k] else None
+ return result
+
+
+class PlotConfig(object):
+ """Object representing a single graph."""
+
+ def __init__(self,
+ fields,
+ data_list,
+ cycle_length=None,
+ frames=None,
+ offset=0,
+ output_filename=None,
+ title="Graph"):
+ self.fields = fields
+ self.data_list = data_list
+ self.cycle_length = cycle_length
+ self.frames = frames
+ self.offset = offset
+ self.output_filename = output_filename
+ self.title = title
+
+ def Plot(self, ax1):
+ lines = []
+ for data in self.data_list:
+ if not data:
+ # Add None lines to skip the colors.
+ lines.extend([None] * len(self.fields))
+ else:
+ data.AddSamples(self, lines)
+
+ def _SliceValues(values):
+ if self.offset:
+ values = values[self.offset:]
+ if self.frames:
+ values = values[:self.frames]
+ return values
+
+ length = None
+ for line in lines:
+ if line is None:
+ continue
+
+ line.values = _SliceValues(line.values)
+ if self.cycle_length:
+ line.values = AverageOverCycle(line.values, self.cycle_length)
+
+ if length is None:
+ length = len(line.values)
+ elif length != len(line.values):
+ raise Exception("All arrays should have the same length!")
+
+ ax1.set_xlabel("Frame", fontsize="large")
+ if any(line.flags & RIGHT_Y_AXIS for line in lines if line):
+ ax2 = ax1.twinx()
+ ax2.set_xlabel("Frame", fontsize="large")
+ else:
+ ax2 = None
+
+ # Have to implement color_cycle manually, due to two scales in a graph.
+ color_cycle = ["b", "r", "g", "c", "m", "y", "k"]
+ color_iter = itertools.cycle(color_cycle)
+
+ for line in lines:
+ if not line:
+ color_iter.next()
+ continue
+
+ if self.cycle_length:
+ x = numpy.array(range(self.cycle_length))
+ else:
+ x = numpy.array(
+ range(self.offset, self.offset + len(line.values)))
+ y = numpy.array(line.values)
+ ax = ax2 if line.flags & RIGHT_Y_AXIS else ax1
+ ax.Plot(x,
+ y,
+ "o-",
+ label=line.label,
+ markersize=3.0,
+ linewidth=1.0,
+ color=color_iter.next())
+
+ ax1.grid(True)
+ if ax2:
+ ax1.legend(loc="upper left", shadow=True, fontsize="large")
+ ax2.legend(loc="upper right", shadow=True, fontsize="large")
+ else:
+ ax1.legend(loc="best", shadow=True, fontsize="large")
+
+
+def LoadFiles(filenames):
+ result = []
+ for filename in filenames:
+ if filename in LoadFiles.cache:
+ result.append(LoadFiles.cache[filename])
+ else:
+ data = Data(filename)
+ LoadFiles.cache[filename] = data
+ result.append(data)
+ return result
+
+
+LoadFiles.cache = {}
+
+
+def GetParser():
+ class CustomAction(argparse.Action):
+ def __call__(self, parser, namespace, values, option_string=None):
+ if "ordered_args" not in namespace:
+ namespace.ordered_args = []
+ namespace.ordered_args.append((self.dest, values))
+
+ parser = argparse.ArgumentParser(
+ description=__doc__,
+ formatter_class=argparse.RawDescriptionHelpFormatter)
+
+ parser.add_argument("-c",
+ "--cycle_length",
+ nargs=1,
+ action=CustomAction,
+ type=int,
+ help="Cycle length over which to average the values.")
+ parser.add_argument(
+ "-f",
+ "--field",
+ nargs=1,
+ action=CustomAction,
+ help="Name of the field to show. Use 'none' to skip a color.")
+ parser.add_argument("-r",
+ "--right",
+ nargs=0,
+ action=CustomAction,
+ help="Use right Y axis for given field.")
+ parser.add_argument("-d",
+ "--drop",
+ nargs=0,
+ action=CustomAction,
+ help="Hide values for dropped frames.")
+ parser.add_argument("-o",
+ "--offset",
+ nargs=1,
+ action=CustomAction,
+ type=int,
+ help="Frame offset.")
+ parser.add_argument("-n",
+ "--next",
+ nargs=0,
+ action=CustomAction,
+ help="Separator for multiple graphs.")
+ parser.add_argument(
+ "--frames",
+ nargs=1,
+ action=CustomAction,
+ type=int,
+ help="Frame count to show or take into account while averaging.")
+ parser.add_argument("-t",
+ "--title",
+ nargs=1,
+ action=CustomAction,
+ help="Title of the graph.")
+ parser.add_argument("-O",
+ "--output_filename",
+ nargs=1,
+ action=CustomAction,
+ help="Use to save the graph into a file. "
+ "Otherwise, a window will be shown.")
+ parser.add_argument(
+ "files",
+ nargs="+",
+ action=CustomAction,
+ help="List of text-based files generated by loopback tests.")
+ return parser
+
+
+def _PlotConfigFromArgs(args, graph_num):
+ # Pylint complains about using kwargs, so have to do it this way.
+ cycle_length = None
+ frames = None
+ offset = 0
+ output_filename = None
+ title = "Graph"
+
+ fields = []
+ files = []
+ mask = 0
+ for key, values in args:
+ if key == "cycle_length":
+ cycle_length = values[0]
+ elif key == "frames":
+ frames = values[0]
+ elif key == "offset":
+ offset = values[0]
+ elif key == "output_filename":
+ output_filename = values[0]
+ elif key == "title":
+ title = values[0]
+ elif key == "drop":
+ mask |= HIDE_DROPPED
+ elif key == "right":
+ mask |= RIGHT_Y_AXIS
+ elif key == "field":
+ field_id = FieldArgToId(values[0])
+ fields.append(field_id | mask if field_id is not None else None)
+ mask = 0 # Reset mask after the field argument.
+ elif key == "files":
+ files.extend(values)
+
+ if not files:
+ raise Exception(
+ "Missing file argument(s) for graph #{}".format(graph_num))
+ if not fields:
+ raise Exception(
+ "Missing field argument(s) for graph #{}".format(graph_num))
+
+ return PlotConfig(fields,
+ LoadFiles(files),
+ cycle_length=cycle_length,
+ frames=frames,
+ offset=offset,
+ output_filename=output_filename,
+ title=title)
+
+
+def PlotConfigsFromArgs(args):
+ """Generates plot configs for given command line arguments."""
+ # The way it works:
+ # First we detect separators -n/--next and split arguments into groups, one
+ # for each plot. For each group, we partially parse it with
+ # argparse.ArgumentParser, modified to remember the order of arguments.
+ # Then we traverse the argument list and fill the PlotConfig.
+ args = itertools.groupby(args, lambda x: x in ["-n", "--next"])
+ prep_args = list(list(group) for match, group in args if not match)
+
+ parser = GetParser()
+ plot_configs = []
+ for index, raw_args in enumerate(prep_args):
+ graph_args = parser.parse_args(raw_args).ordered_args
+ plot_configs.append(_PlotConfigFromArgs(graph_args, index))
+ return plot_configs
+
+
+def ShowOrSavePlots(plot_configs):
+ for config in plot_configs:
+ fig = plt.figure(figsize=(14.0, 10.0))
+ ax = fig.add_subPlot(1, 1, 1)
+
+ plt.title(config.title)
+ config.Plot(ax)
+ if config.output_filename:
+ print "Saving to", config.output_filename
+ fig.savefig(config.output_filename)
+ plt.close(fig)
+
+ plt.show()
+
+
+if __name__ == "__main__":
+ ShowOrSavePlots(PlotConfigsFromArgs(sys.argv[1:]))
diff --git a/third_party/libwebrtc/video/g3doc/adaptation.md b/third_party/libwebrtc/video/g3doc/adaptation.md
new file mode 100644
index 0000000000..cb06e886b2
--- /dev/null
+++ b/third_party/libwebrtc/video/g3doc/adaptation.md
@@ -0,0 +1,114 @@
+<!-- go/cmark -->
+<!--* freshness: {owner: 'eshr' reviewed: '2021-04-13'} *-->
+
+# Video Adaptation
+
+Video adaptation is a mechanism which reduces the bandwidth or CPU consumption
+by reducing encoded video quality.
+
+## Overview
+
+Adaptation occurs when a _Resource_ signals that it is currently underused or
+overused. When overused, the video quality is decreased and when underused, the
+video quality is increased. There are currently two dimensions in which the
+quality can be adapted: frame-rate and resolution. The dimension that is adapted
+is based on the degradation preference for the video track.
+
+## Resources
+
+_Resources_ monitor metrics from the system or the video stream. For example, a
+resource could monitor system temperature or the bandwidth usage of the video
+stream. A resource implements the [Resource][resource.h] interface. When a
+resource detects that it is overused, it calls `SetUsageState(kOveruse)`. When
+the resource is no longer overused, it can signal this using
+`SetUsageState(kUnderuse)`.
+
+There are two resources that are used by default on all video tracks: Quality
+scaler resource and encode overuse resource.
+
+### QP Scaler Resource
+
+The quality scaler resource monitors the quantization parameter (QP) of the
+encoded video frames for video send stream and ensures that the quality of the
+stream is acceptable for the current resolution. After each frame is encoded the
+[QualityScaler][quality_scaler.h] is given the QP of the encoded frame. Overuse
+or underuse is signalled when the average QP is outside of the
+[QP thresholds][VideoEncoder::QpThresholds]. If the average QP is above the
+_high_ threshold, the QP scaler signals _overuse_, and when below the _low_
+threshold the QP scaler signals _underuse_.
+
+The thresholds are set by the video encoder in the `scaling_settings` property
+of the [EncoderInfo][EncoderInfo].
+
+*Note:* that the QP scaler is only enabled when the degradation preference is
+`MAINTAIN_FRAMERATE` or `BALANCED`.
+
+### Encode Usage Resource
+
+The [encoder usage resource][encode_usage_resource.h] monitors how long it takes
+to encode a video frame. This works as a good proxy measurement for CPU usage as
+contention increases when CPU usage is high, increasing the encode times of the
+video frames.
+
+The time is tracked from when frame encoding starts to when it is completed. If
+the average encoder usage exceeds the thresholds set, *overuse* is triggered.
+
+### Injecting other Resources
+
+A custom resource can be injected into the call using the
+[Call::AddAdaptationResource][Call::AddAdaptationResource] method.
+
+## Adaptation
+
+When a a *resource* signals the it is over or underused, this signal reaches the
+`ResourceAdaptationProcessor` who requests an `Adaptation` proposal from the
+[VideoStreamAdapter][VideoStreamAdapter]. This proposal is based on the
+degradation preference of the video stream. `ResourceAdaptationProcessor` will
+determine if the `Adaptation` should be applied based on the current adaptation
+status and the `Adaptation` proposal.
+
+### Degradation Preference
+
+There are 3 degradation preferences, described in the
+[RtpParameters][RtpParameters] header. These are
+
+* `MAINTIAIN_FRAMERATE`: Adapt video resolution
+* `MAINTIAIN_RESOLUTION`: Adapt video frame-rate.
+* `BALANCED`: Adapt video frame-rate or resolution.
+
+The degradation preference is set for a video track using the
+`degradation_preference` property in the [RtpParameters][RtpParameters].
+
+## VideoSinkWants and video stream adaptation
+
+Once an adaptation is applied it notifies the video stream. The video stream
+converts this adaptation to a [VideoSinkWants][VideoSinkWants]. These sink wants
+indicate to the video stream that some restrictions should be applied to the
+stream before it is sent to encoding. It has a few properties, but for
+adaptation the properties that might be set are:
+
+* `target_pixel_count`: The desired number of pixels for each video frame. The
+ actual pixel count should be close to this but does not have to be exact so
+ that aspect ratio can be maintained.
+* `max_pixel_count`: The maximum number of pixels in each video frame. This
+ value can not be exceeded if set.
+* `max_framerate_fps`: The maximum frame-rate for the video source. The source
+ is expected to drop frames that cause this threshold to be exceeded.
+
+The `VideoSinkWants` can be applied by any video source, or one may use the
+[AdaptedVideoTraceSource][adapted_video_track_source.h] which is a base class
+for sources that need video adaptation.
+
+[RtpParameters]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/api/rtp_parameters.h?q=%22RTC_EXPORT%20RtpParameters%22
+[resource.h]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/api/adaptation/resource.h
+[Call::AddAdaptationResource]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/call/call.h?q=Call::AddAdaptationResource
+[quality_scaler.h]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/video_coding/utility/quality_scaler.h
+[VideoEncoder::QpThresholds]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/api/video_codecs/video_encoder.h?q=VideoEncoder::QpThresholds
+[EncoderInfo]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/api/video_codecs/video_encoder.h?q=VideoEncoder::EncoderInfo
+[encode_usage_resource.h]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/video/adaptation/encode_usage_resource.h
+[VideoStreamAdapter]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/call/adaptation/video_stream_adapter.h
+[adaptation_constraint.h]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/call/adaptation/adaptation_constraint.h
+[bitrate_constraint.h]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/video/adaptation/bitrate_constraint.h
+[AddOrUpdateSink]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/api/video/video_source_interface.h?q=AddOrUpdateSink
+[VideoSinkWants]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/api/video/video_source_interface.h?q=%22RTC_EXPORT%20VideoSinkWants%22
+[adapted_video_track_source.h]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/media/base/adapted_video_track_source.h
diff --git a/third_party/libwebrtc/video/g3doc/stats.md b/third_party/libwebrtc/video/g3doc/stats.md
new file mode 100644
index 0000000000..7d485a0155
--- /dev/null
+++ b/third_party/libwebrtc/video/g3doc/stats.md
@@ -0,0 +1,215 @@
+<!-- go/cmark -->
+<!--* freshness: {owner: 'asapersson' reviewed: '2021-04-14'} *-->
+
+# Video stats
+
+Overview of collected statistics for [VideoSendStream] and [VideoReceiveStream].
+
+## VideoSendStream
+
+[VideoSendStream::Stats] for a sending stream can be gathered via `VideoSendStream::GetStats()`.
+
+Some statistics are collected per RTP stream (see [StreamStats]) and can be of `StreamType`: `kMedia`, `kRtx`, `kFlexfec`.
+
+Multiple `StreamStats` objects are for example present if simulcast is used (multiple `kMedia` objects) or if RTX or FlexFEC is negotiated.
+
+### SendStatisticsProxy
+`VideoSendStream` owns a [SendStatisticsProxy] which implements
+`VideoStreamEncoderObserver`,
+`RtcpStatisticsCallback`,
+`ReportBlockDataObserver`,
+`RtcpPacketTypeCounterObserver`,
+`StreamDataCountersCallback`,
+`BitrateStatisticsObserver`,
+`FrameCountObserver`,
+`SendSideDelayObserver`
+and holds a `VideoSendStream::Stats` object.
+
+`SendStatisticsProxy` is called via these interfaces by different components (e.g. `RtpRtcp` module) to update stats.
+
+#### StreamStats
+* `type` - kMedia, kRtx or kFlexfec.
+* `referenced_media_ssrc` - only present for type kRtx/kFlexfec. The SSRC for the kMedia stream that retransmissions or FEC is performed for.
+
+Updated when a frame has been encoded, `VideoStreamEncoder::OnEncodedImage`.
+* `frames_encoded `- total number of encoded frames.
+* `encode_frame_rate` - number of encoded frames during the last second.
+* `width` - width of last encoded frame [[rtcoutboundrtpstreamstats-framewidth]].
+* `height` - height of last encoded frame [[rtcoutboundrtpstreamstats-frameheight]].
+* `total_encode_time_ms` - total encode time for encoded frames.
+* `qp_sum` - sum of quantizer values of encoded frames [[rtcoutboundrtpstreamstats-qpsum]].
+* `frame_counts` - total number of encoded key/delta frames [[rtcoutboundrtpstreamstats-keyframesencoded]].
+
+Updated when a RTP packet is transmitted to the network, `RtpSenderEgress::SendPacket`.
+* `rtp_stats` - total number of sent bytes/packets.
+* `total_bitrate_bps` - total bitrate sent in bits per second (over a one second window).
+* `retransmit_bitrate_bps` - total retransmit bitrate sent in bits per second (over a one second window).
+* `avg_delay_ms` - average capture-to-send delay for sent packets (over a one second window).
+* `max_delay_ms` - maximum capture-to-send delay for sent packets (over a one second window).
+* `total_packet_send_delay_ms` - total capture-to-send delay for sent packets [[rtcoutboundrtpstreamstats-totalpacketsenddelay]].
+
+Updated when an incoming RTCP packet is parsed, `RTCPReceiver::ParseCompoundPacket`.
+* `rtcp_packet_type_counts` - total number of received NACK/FIR/PLI packets [rtcoutboundrtpstreamstats-[nackcount], [fircount], [plicount]].
+
+Updated when a RTCP report block packet is received, `RTCPReceiver::TriggerCallbacksFromRtcpPacket`.
+* `rtcp_stats` - RTCP report block data.
+* `report_block_data` - RTCP report block data.
+
+#### Stats
+* `std::map<uint32_t, StreamStats> substreams` - StreamStats mapped per SSRC.
+
+Updated when a frame is received from the source, `VideoStreamEncoder::OnFrame`.
+* `frames` - total number of frames fed to VideoStreamEncoder.
+* `input_frame_rate` - number of frames fed to VideoStreamEncoder during the last second.
+* `frames_dropped_by_congestion_window` - total number of dropped frames due to congestion window pushback.
+* `frames_dropped_by_encoder_queue` - total number of dropped frames due to that the encoder is blocked.
+
+Updated if a frame from the source is dropped, `VideoStreamEncoder::OnDiscardedFrame`.
+* `frames_dropped_by_capturer` - total number dropped frames by the source.
+
+Updated if a frame is dropped by `FrameDropper`, `VideoStreamEncoder::MaybeEncodeVideoFrame`.
+* `frames_dropped_by_rate_limiter` - total number of dropped frames to avoid bitrate overuse.
+
+Updated (if changed) before a frame is passed to the encoder, `VideoStreamEncoder::EncodeVideoFrame`.
+* `encoder_implementation_name` - name of encoder implementation [[rtcoutboundrtpstreamstats-encoderimplementation]].
+
+Updated after a frame has been encoded, `VideoStreamEncoder::OnEncodedImage`.
+* `frames_encoded `- total number of encoded frames [[rtcoutboundrtpstreamstats-framesencoded]].
+* `encode_frame_rate` - number of encoded frames during the last second [[rtcoutboundrtpstreamstats-framespersecond]].
+* `total_encoded_bytes_target` - total target frame size in bytes [[rtcoutboundrtpstreamstats-totalencodedbytestarget]].
+* `huge_frames_sent` - total number of huge frames sent [[rtcoutboundrtpstreamstats-hugeframessent]].
+* `media_bitrate_bps` - the actual bitrate the encoder is producing.
+* `avg_encode_time_ms` - average encode time for encoded frames.
+* `total_encode_time_ms` - total encode time for encoded frames [[rtcoutboundrtpstreamstats-totalencodetime]].
+* `frames_dropped_by_encoder`- total number of dropped frames by the encoder.
+
+Adaptation stats.
+* `bw_limited_resolution` - shows if resolution is limited due to restricted bandwidth.
+* `cpu_limited_resolution` - shows if resolution is limited due to cpu.
+* `bw_limited_framerate` - shows if framerate is limited due to restricted bandwidth.
+* `cpu_limited_framerate` - shows if framerate is limited due to cpu.
+* `quality_limitation_reason` - current reason for limiting resolution and/or framerate [[rtcoutboundrtpstreamstats-qualitylimitationreason]].
+* `quality_limitation_durations_ms` - total time spent in quality limitation state [[rtcoutboundrtpstreamstats-qualitylimitationdurations]].
+* `quality_limitation_resolution_changes` - total number of times that resolution has changed due to quality limitation [[rtcoutboundrtpstreamstats-qualitylimitationresolutionchanges]].
+* `number_of_cpu_adapt_changes` - total number of times resolution/framerate has changed due to cpu limitation.
+* `number_of_quality_adapt_changes` - total number of times resolution/framerate has changed due to quality limitation.
+
+Updated when the encoder is configured, `VideoStreamEncoder::ReconfigureEncoder`.
+* `content_type` - configured content type (UNSPECIFIED/SCREENSHARE).
+
+Updated when the available bitrate changes, `VideoSendStreamImpl::OnBitrateUpdated`.
+* `target_media_bitrate_bps` - the bitrate the encoder is configured to use.
+* `suspended` - shows if video is suspended due to zero target bitrate.
+
+## VideoReceiveStream
+[VideoReceiveStream::Stats] for a receiving stream can be gathered via `VideoReceiveStream::GetStats()`.
+
+### ReceiveStatisticsProxy
+`VideoReceiveStream` owns a [ReceiveStatisticsProxy] which implements
+`VCMReceiveStatisticsCallback`,
+`RtcpCnameCallback`,
+`RtcpPacketTypeCounterObserver`,
+`CallStatsObserver`
+and holds a `VideoReceiveStream::Stats` object.
+
+`ReceiveStatisticsProxy` is called via these interfaces by different components (e.g. `RtpRtcp` module) to update stats.
+
+#### Stats
+* `current_payload_type` - current payload type.
+* `ssrc` - configured SSRC for the received stream.
+
+Updated when a complete frame is received, `FrameBuffer::InsertFrame`.
+* `frame_counts` - total number of key/delta frames received [[rtcinboundrtpstreamstats-keyframesdecoded]].
+* `network_frame_rate` - number of frames received during the last second.
+
+Updated when a frame is ready for decoding, `FrameBuffer::GetNextFrame`. From `VCMTiming`:
+* `jitter_buffer_ms` - jitter buffer delay in ms.
+* `max_decode_ms` - the 95th percentile observed decode time within a time window (10 sec).
+* `render_delay_ms` - render delay in ms.
+* `min_playout_delay_ms` - minimum playout delay in ms.
+* `target_delay_ms` - target playout delay in ms. Max(`min_playout_delay_ms`, `jitter_delay_ms` + `max_decode_ms` + `render_delay_ms`).
+* `current_delay_ms` - actual playout delay in ms.
+* `jitter_buffer_delay_seconds` - total jitter buffer delay in seconds [[rtcinboundrtpstreamstats-jitterbufferdelay]].
+* `jitter_buffer_emitted_count` - total number of frames that have come out from the jitter buffer [[rtcinboundrtpstreamstats-jitterbufferemittedcount]].
+
+Updated (if changed) after a frame is passed to the decoder, `VCMGenericDecoder::Decode`.
+* `decoder_implementation_name` - name of decoder implementation [[rtcinboundrtpstreamstats-decoderimplementation]].
+
+Updated when a frame is ready for decoding, `FrameBuffer::GetNextFrame`.
+* `timing_frame_info` - timestamps for a full lifetime of a frame.
+* `first_frame_received_to_decoded_ms` - initial decoding latency between the first arrived frame and the first decoded frame.
+* `frames_dropped` - total number of dropped frames prior to decoding or if the system is too slow [[rtcreceivedrtpstreamstats-framesdropped]].
+
+Updated after a frame has been decoded, `VCMDecodedFrameCallback::Decoded`.
+* `frames_decoded` - total number of decoded frames [[rtcinboundrtpstreamstats-framesdecoded]].
+* `decode_frame_rate` - number of decoded frames during the last second [[rtcinboundrtpstreamstats-framespersecond]].
+* `decode_ms` - time to decode last frame in ms.
+* `total_decode_time_ms` - total decode time for decoded frames [[rtcinboundrtpstreamstats-totaldecodetime]].
+* `qp_sum` - sum of quantizer values of decoded frames [[rtcinboundrtpstreamstats-qpsum]].
+* `content_type` - content type (UNSPECIFIED/SCREENSHARE).
+* `interframe_delay_max_ms` - max inter-frame delay within a time window between decoded frames.
+
+Updated before a frame is sent to the renderer, `VideoReceiveStream2::OnFrame`.
+* `frames_rendered` - total number of rendered frames.
+* `render_frame_rate` - number of rendered frames during the last second.
+* `width` - width of last frame fed to renderer [[rtcinboundrtpstreamstats-framewidth]].
+* `height` - height of last frame fed to renderer [[rtcinboundrtpstreamstats-frameheight]].
+* `estimated_playout_ntp_timestamp_ms` - estimated playout NTP timestamp [[rtcinboundrtpstreamstats-estimatedplayouttimestamp]].
+* `sync_offset_ms` - NTP timestamp difference between the last played out audio and video frame.
+* `freeze_count` - total number of detected freezes.
+* `pause_count` - total number of detected pauses.
+* `total_freezes_duration_ms` - total duration of freezes in ms.
+* `total_pauses_duration_ms` - total duration of pauses in ms.
+* `total_inter_frame_delay` - sum of inter-frame delay in seconds between rendered frames [[rtcinboundrtpstreamstats-totalinterframedelay]].
+* `total_squared_inter_frame_delay` - sum of squared inter-frame delays in seconds between rendered frames [[rtcinboundrtpstreamstats-totalsquaredinterframedelay]].
+
+`ReceiveStatisticsImpl::OnRtpPacket` is updated for received RTP packets. From `ReceiveStatistics`:
+* `total_bitrate_bps` - incoming bitrate in bps.
+* `rtp_stats` - RTP statistics for the received stream.
+
+Updated when a RTCP packet is sent, `RTCPSender::ComputeCompoundRTCPPacket`.
+* `rtcp_packet_type_counts` - total number of sent NACK/FIR/PLI packets [rtcinboundrtpstreamstats-[nackcount], [fircount], [plicount]].
+
+
+[VideoSendStream]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/call/video_send_stream.h
+[VideoSendStream::Stats]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/call/video_send_stream.h?q=VideoSendStream::Stats
+[StreamStats]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/call/video_send_stream.h?q=VideoSendStream::StreamStats
+[SendStatisticsProxy]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/video/send_statistics_proxy.h
+[rtcoutboundrtpstreamstats-framewidth]: https://w3c.github.io/webrtc-stats/#dom-rtcoutboundrtpstreamstats-framewidth
+[rtcoutboundrtpstreamstats-frameheight]: https://w3c.github.io/webrtc-stats/#dom-rtcoutboundrtpstreamstats-frameheight
+[rtcoutboundrtpstreamstats-qpsum]: https://w3c.github.io/webrtc-stats/#dom-rtcoutboundrtpstreamstats-qpsum
+[rtcoutboundrtpstreamstats-keyframesencoded]: https://w3c.github.io/webrtc-stats/#dom-rtcoutboundrtpstreamstats-keyframesencoded
+[rtcoutboundrtpstreamstats-totalpacketsenddelay]: https://w3c.github.io/webrtc-stats/#dom-rtcoutboundrtpstreamstats-totalpacketsenddelay
+[nackcount]: https://w3c.github.io/webrtc-stats/#dom-rtcoutboundrtpstreamstats-nackcount
+[fircount]: https://w3c.github.io/webrtc-stats/#dom-rtcoutboundrtpstreamstats-fircount
+[plicount]: https://w3c.github.io/webrtc-stats/#dom-rtcoutboundrtpstreamstats-plicount
+[rtcoutboundrtpstreamstats-encoderimplementation]: https://w3c.github.io/webrtc-stats/#dom-rtcoutboundrtpstreamstats-encoderimplementation
+[rtcoutboundrtpstreamstats-framesencoded]: https://w3c.github.io/webrtc-stats/#dom-rtcoutboundrtpstreamstats-framesencoded
+[rtcoutboundrtpstreamstats-framespersecond]: https://w3c.github.io/webrtc-stats/#dom-rtcoutboundrtpstreamstats-framespersecond
+[rtcoutboundrtpstreamstats-totalencodedbytestarget]: https://w3c.github.io/webrtc-stats/#dom-rtcoutboundrtpstreamstats-totalencodedbytestarget
+[rtcoutboundrtpstreamstats-hugeframessent]: https://w3c.github.io/webrtc-stats/#dom-rtcoutboundrtpstreamstats-hugeframessent
+[rtcoutboundrtpstreamstats-totalencodetime]: https://w3c.github.io/webrtc-stats/#dom-rtcoutboundrtpstreamstats-totalencodetime
+[rtcoutboundrtpstreamstats-qualitylimitationreason]: https://w3c.github.io/webrtc-stats/#dom-rtcoutboundrtpstreamstats-qualitylimitationreason
+[rtcoutboundrtpstreamstats-qualitylimitationdurations]: https://w3c.github.io/webrtc-stats/#dom-rtcoutboundrtpstreamstats-qualitylimitationdurations
+[rtcoutboundrtpstreamstats-qualitylimitationresolutionchanges]: https://w3c.github.io/webrtc-stats/#dom-rtcoutboundrtpstreamstats-qualitylimitationresolutionchanges
+
+[VideoReceiveStream]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/call/video_receive_stream.h
+[VideoReceiveStream::Stats]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/call/video_receive_stream.h?q=VideoReceiveStream::Stats
+[ReceiveStatisticsProxy]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/video/receive_statistics_proxy2.h
+[rtcinboundrtpstreamstats-keyframesdecoded]: https://w3c.github.io/webrtc-stats/#dom-rtcinboundrtpstreamstats-keyframesdecoded
+[rtcinboundrtpstreamstats-jitterbufferdelay]: https://w3c.github.io/webrtc-stats/#dom-rtcinboundrtpstreamstats-jitterbufferdelay
+[rtcinboundrtpstreamstats-jitterbufferemittedcount]: https://w3c.github.io/webrtc-stats/#dom-rtcinboundrtpstreamstats-jitterbufferemittedcount
+[rtcinboundrtpstreamstats-decoderimplementation]: https://w3c.github.io/webrtc-stats/#dom-rtcinboundrtpstreamstats-decoderimplementation
+[rtcreceivedrtpstreamstats-framesdropped]: https://www.w3.org/TR/webrtc-stats/#dom-rtcreceivedrtpstreamstats-framesdropped
+[rtcinboundrtpstreamstats-framesdecoded]: https://w3c.github.io/webrtc-stats/#dom-rtcinboundrtpstreamstats-framesdecoded
+[rtcinboundrtpstreamstats-framespersecond]: https://w3c.github.io/webrtc-stats/#dom-rtcinboundrtpstreamstats-framespersecond
+[rtcinboundrtpstreamstats-totaldecodetime]: https://w3c.github.io/webrtc-stats/#dom-rtcinboundrtpstreamstats-totaldecodetime
+[rtcinboundrtpstreamstats-qpsum]: https://w3c.github.io/webrtc-stats/#dom-rtcinboundrtpstreamstats-qpsum
+[rtcinboundrtpstreamstats-totalinterframedelay]: https://w3c.github.io/webrtc-stats/#dom-rtcinboundrtpstreamstats-totalinterframedelay
+[rtcinboundrtpstreamstats-totalsquaredinterframedelay]: https://w3c.github.io/webrtc-stats/#dom-rtcinboundrtpstreamstats-totalsquaredinterframedelay
+[rtcinboundrtpstreamstats-estimatedplayouttimestamp]: https://w3c.github.io/webrtc-stats/#dom-rtcinboundrtpstreamstats-estimatedplayouttimestamp
+[rtcinboundrtpstreamstats-framewidth]: https://w3c.github.io/webrtc-stats/#dom-rtcinboundrtpstreamstats-framewidth
+[rtcinboundrtpstreamstats-frameheight]: https://w3c.github.io/webrtc-stats/#dom-rtcinboundrtpstreamstats-frameheight
+[nackcount]: https://w3c.github.io/webrtc-stats/#dom-rtcinboundrtpstreamstats-nackcount
+[fircount]: https://w3c.github.io/webrtc-stats/#dom-rtcinboundrtpstreamstats-fircount
+[plicount]: https://w3c.github.io/webrtc-stats/#dom-rtcinboundrtpstreamstats-plicount
diff --git a/third_party/libwebrtc/video/pc_full_stack_tests.cc b/third_party/libwebrtc/video/pc_full_stack_tests.cc
new file mode 100644
index 0000000000..83b06830e0
--- /dev/null
+++ b/third_party/libwebrtc/video/pc_full_stack_tests.cc
@@ -0,0 +1,1833 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include <memory>
+#include <string>
+#include <utility>
+#include <vector>
+
+#include "api/media_stream_interface.h"
+#include "api/test/create_network_emulation_manager.h"
+#include "api/test/create_peer_connection_quality_test_frame_generator.h"
+#include "api/test/create_peerconnection_quality_test_fixture.h"
+#include "api/test/frame_generator_interface.h"
+#include "api/test/metrics/global_metrics_logger_and_exporter.h"
+#include "api/test/network_emulation_manager.h"
+#include "api/test/pclf/media_configuration.h"
+#include "api/test/pclf/media_quality_test_params.h"
+#include "api/test/pclf/peer_configurer.h"
+#include "api/test/peerconnection_quality_test_fixture.h"
+#include "api/test/simulated_network.h"
+#include "api/test/time_controller.h"
+#include "api/video_codecs/vp9_profile.h"
+#include "call/simulated_network.h"
+#include "modules/video_coding/codecs/vp9/include/vp9.h"
+#include "system_wrappers/include/field_trial.h"
+#include "test/field_trial.h"
+#include "test/gtest.h"
+#include "test/pc/e2e/network_quality_metrics_reporter.h"
+#include "test/testsupport/file_utils.h"
+
+namespace webrtc {
+
+using ::webrtc::webrtc_pc_e2e::AudioConfig;
+using ::webrtc::webrtc_pc_e2e::EmulatedSFUConfig;
+using ::webrtc::webrtc_pc_e2e::PeerConfigurer;
+using ::webrtc::webrtc_pc_e2e::RunParams;
+using ::webrtc::webrtc_pc_e2e::ScreenShareConfig;
+using ::webrtc::webrtc_pc_e2e::VideoCodecConfig;
+using ::webrtc::webrtc_pc_e2e::VideoConfig;
+using ::webrtc::webrtc_pc_e2e::VideoSimulcastConfig;
+
+namespace {
+
+constexpr int kTestDurationSec = 45;
+
+std::unique_ptr<webrtc_pc_e2e::PeerConnectionE2EQualityTestFixture>
+CreateTestFixture(const std::string& test_case_name,
+ TimeController& time_controller,
+ std::pair<EmulatedNetworkManagerInterface*,
+ EmulatedNetworkManagerInterface*> network_links,
+ rtc::FunctionView<void(PeerConfigurer*)> alice_configurer,
+ rtc::FunctionView<void(PeerConfigurer*)> bob_configurer) {
+ auto fixture = webrtc_pc_e2e::CreatePeerConnectionE2EQualityTestFixture(
+ test_case_name, time_controller, /*audio_quality_analyzer=*/nullptr,
+ /*video_quality_analyzer=*/nullptr);
+ auto alice = std::make_unique<PeerConfigurer>(
+ network_links.first->network_dependencies());
+ auto bob = std::make_unique<PeerConfigurer>(
+ network_links.second->network_dependencies());
+ alice_configurer(alice.get());
+ bob_configurer(bob.get());
+ fixture->AddPeer(std::move(alice));
+ fixture->AddPeer(std::move(bob));
+ fixture->AddQualityMetricsReporter(
+ std::make_unique<webrtc_pc_e2e::NetworkQualityMetricsReporter>(
+ network_links.first, network_links.second,
+ test::GetGlobalMetricsLogger()));
+ return fixture;
+}
+
+// Takes the current active field trials set, and appends some new trials.
+std::string AppendFieldTrials(std::string new_trial_string) {
+ return std::string(field_trial::GetFieldTrialString()) + new_trial_string;
+}
+
+std::string ClipNameToClipPath(const char* clip_name) {
+ return test::ResourcePath(clip_name, "yuv");
+}
+
+} // namespace
+
+struct PCFullStackTestParams {
+ bool use_network_thread_as_worker_thread = false;
+ std::string field_trials;
+ std::string test_case_name_postfix;
+};
+
+std::vector<PCFullStackTestParams> ParameterizedTestParams() {
+ return {// Run with default parameters and field trials.
+ {},
+ // Use the network thread as worker thread.
+ // Use the worker thread for sending packets.
+ // https://bugs.chromium.org/p/webrtc/issues/detail?id=14502
+ {.use_network_thread_as_worker_thread = true,
+ .field_trials = "WebRTC-SendPacketsOnWorkerThread/Enabled/",
+ .test_case_name_postfix = "_ReducedThreads"}};
+}
+
+class ParameterizedPCFullStackTest
+ : public ::testing::TestWithParam<PCFullStackTestParams> {
+ public:
+ ParameterizedPCFullStackTest() : field_trials_(GetParam().field_trials) {}
+
+ private:
+ test::ScopedFieldTrials field_trials_;
+};
+
+INSTANTIATE_TEST_SUITE_P(
+ ParameterizedPCFullStackTest,
+ ParameterizedPCFullStackTest,
+ testing::ValuesIn(ParameterizedTestParams()),
+ [](const testing::TestParamInfo<PCFullStackTestParams>& info) {
+ if (info.param.test_case_name_postfix.empty())
+ return std::string("Default");
+ return info.param.test_case_name_postfix;
+ });
+
+#if defined(RTC_ENABLE_VP9)
+TEST(PCFullStackTest, Pc_Foreman_Cif_Net_Delay_0_0_Plr_0_VP9) {
+ std::unique_ptr<NetworkEmulationManager> network_emulation_manager =
+ CreateNetworkEmulationManager();
+ auto fixture = CreateTestFixture(
+ "pc_foreman_cif_net_delay_0_0_plr_0_VP9",
+ *network_emulation_manager->time_controller(),
+ network_emulation_manager->CreateEndpointPairWithTwoWayRoutes(
+ BuiltInNetworkBehaviorConfig()),
+ [](PeerConfigurer* alice) {
+ VideoConfig video(352, 288, 30);
+ video.stream_label = "alice-video";
+ auto frame_generator = CreateFromYuvFileFrameGenerator(
+ video, ClipNameToClipPath("foreman_cif"));
+ alice->AddVideoConfig(std::move(video), std::move(frame_generator));
+ alice->SetVideoCodecs({VideoCodecConfig(
+ /*name=*/cricket::kVp9CodecName, /*required_params=*/{
+ {kVP9FmtpProfileId,
+ VP9ProfileToString(VP9Profile::kProfile0)}})});
+ },
+ [](PeerConfigurer* bob) {
+ bob->SetVideoCodecs({VideoCodecConfig(
+ /*name=*/cricket::kVp9CodecName, /*required_params=*/{
+ {kVP9FmtpProfileId,
+ VP9ProfileToString(VP9Profile::kProfile0)}})});
+ });
+ fixture->Run(RunParams(TimeDelta::Seconds(kTestDurationSec)));
+}
+
+TEST(PCGenericDescriptorTest,
+ Pc_Foreman_Cif_Delay_50_0_Plr_5_VP9_Generic_Descriptor) {
+ std::unique_ptr<NetworkEmulationManager> network_emulation_manager =
+ CreateNetworkEmulationManager();
+ BuiltInNetworkBehaviorConfig config;
+ config.loss_percent = 5;
+ config.queue_delay_ms = 50;
+ auto fixture = CreateTestFixture(
+ "pc_foreman_cif_delay_50_0_plr_5_VP9_generic_descriptor",
+ *network_emulation_manager->time_controller(),
+ network_emulation_manager->CreateEndpointPairWithTwoWayRoutes(config),
+ [](PeerConfigurer* alice) {
+ VideoConfig video(352, 288, 30);
+ video.stream_label = "alice-video";
+ auto frame_generator = CreateFromYuvFileFrameGenerator(
+ video, ClipNameToClipPath("foreman_cif"));
+ alice->AddVideoConfig(std::move(video), std::move(frame_generator));
+ alice->SetVideoCodecs({VideoCodecConfig(
+ /*name=*/cricket::kVp9CodecName, /*required_params=*/{
+ {kVP9FmtpProfileId,
+ VP9ProfileToString(VP9Profile::kProfile0)}})});
+ },
+ [](PeerConfigurer* bob) {
+ bob->SetVideoCodecs({VideoCodecConfig(
+ /*name=*/cricket::kVp9CodecName, /*required_params=*/{
+ {kVP9FmtpProfileId,
+ VP9ProfileToString(VP9Profile::kProfile0)}})});
+ });
+ fixture->Run(RunParams(TimeDelta::Seconds(kTestDurationSec)));
+}
+
+// VP9 2nd profile isn't supported on android arm and arm 64.
+#if (defined(WEBRTC_ANDROID) && \
+ (defined(WEBRTC_ARCH_ARM64) || defined(WEBRTC_ARCH_ARM))) || \
+ (defined(WEBRTC_IOS) && defined(WEBRTC_ARCH_ARM64))
+#define MAYBE_Pc_Generator_Net_Delay_0_0_Plr_0_VP9Profile2 \
+ DISABLED_Pc_Generator_Net_Delay_0_0_Plr_0_VP9Profile2
+#else
+#define MAYBE_Pc_Generator_Net_Delay_0_0_Plr_0_VP9Profile2 \
+ Pc_Generator_Net_Delay_0_0_Plr_0_VP9Profile2
+#endif
+TEST(PCFullStackTest, MAYBE_Pc_Generator_Net_Delay_0_0_Plr_0_VP9Profile2) {
+ std::unique_ptr<NetworkEmulationManager> network_emulation_manager =
+ CreateNetworkEmulationManager();
+ auto fixture = CreateTestFixture(
+ "pc_generator_net_delay_0_0_plr_0_VP9Profile2",
+ *network_emulation_manager->time_controller(),
+ network_emulation_manager->CreateEndpointPairWithTwoWayRoutes(
+ BuiltInNetworkBehaviorConfig()),
+ [](PeerConfigurer* alice) {
+ VideoConfig video(352, 288, 30);
+ video.stream_label = "alice-video";
+ auto frame_generator = CreateSquareFrameGenerator(
+ video, test::FrameGeneratorInterface::OutputType::kI010);
+ alice->AddVideoConfig(std::move(video), std::move(frame_generator));
+ alice->SetVideoCodecs({VideoCodecConfig(
+ /*name=*/cricket::kVp9CodecName, /*required_params=*/{
+ {kVP9FmtpProfileId,
+ VP9ProfileToString(VP9Profile::kProfile2)}})});
+ },
+ [](PeerConfigurer* bob) {
+ bob->SetVideoCodecs({VideoCodecConfig(
+ /*name=*/cricket::kVp9CodecName, /*required_params=*/{
+ {kVP9FmtpProfileId,
+ VP9ProfileToString(VP9Profile::kProfile2)}})});
+ });
+ fixture->Run(RunParams(TimeDelta::Seconds(kTestDurationSec)));
+}
+
+/*
+// TODO(bugs.webrtc.org/10639) migrate commented out test, when required
+// functionality will be supported in PeerConnection level framework.
+TEST(PCFullStackTest, ForemanCifWithoutPacketLossMultiplexI420Frame) {
+ auto fixture = CreateVideoQualityTestFixture();
+ ParamsWithLogging foreman_cif;
+ foreman_cif.call.send_side_bwe = true;
+ foreman_cif.video[0] = {
+ true, 352, 288, 30,
+ 700000, 700000, 700000, false,
+ "multiplex", 1, 0, 0,
+ false, false, false, ClipNameToClipPath("foreman_cif")};
+ foreman_cif.analyzer = {"foreman_cif_net_delay_0_0_plr_0_Multiplex", 0.0, 0.0,
+ kTestDurationSec};
+ fixture->RunWithAnalyzer(foreman_cif);
+}
+
+TEST(PCFullStackTest, GeneratorWithoutPacketLossMultiplexI420AFrame) {
+ auto fixture = CreateVideoQualityTestFixture();
+
+ ParamsWithLogging generator;
+ generator.call.send_side_bwe = true;
+ generator.video[0] = {
+ true, 352, 288, 30, 700000, 700000, 700000, false,
+ "multiplex", 1, 0, 0, false, false, false, "GeneratorI420A"};
+ generator.analyzer = {"generator_net_delay_0_0_plr_0_Multiplex", 0.0, 0.0,
+ kTestDurationSec};
+ fixture->RunWithAnalyzer(generator);
+}
+*/
+#endif // defined(RTC_ENABLE_VP9)
+
+TEST(PCFullStackTest, Pc_Net_Delay_0_0_Plr_0) {
+ std::unique_ptr<NetworkEmulationManager> network_emulation_manager =
+ CreateNetworkEmulationManager();
+ auto fixture = CreateTestFixture(
+ "pc_net_delay_0_0_plr_0", *network_emulation_manager->time_controller(),
+ network_emulation_manager->CreateEndpointPairWithTwoWayRoutes(
+ BuiltInNetworkBehaviorConfig()),
+ [](PeerConfigurer* alice) {
+ VideoConfig video(176, 144, 30);
+ video.stream_label = "alice-video";
+ auto frame_generator = CreateFromYuvFileFrameGenerator(
+ video, ClipNameToClipPath("paris_qcif"));
+ alice->AddVideoConfig(std::move(video), std::move(frame_generator));
+ },
+ [](PeerConfigurer* bob) {});
+ fixture->Run(RunParams(TimeDelta::Seconds(kTestDurationSec)));
+}
+
+TEST(PCGenericDescriptorTest,
+ Pc_Foreman_Cif_Net_Delay_0_0_Plr_0_Generic_Descriptor) {
+ std::unique_ptr<NetworkEmulationManager> network_emulation_manager =
+ CreateNetworkEmulationManager();
+ auto fixture = CreateTestFixture(
+ "pc_foreman_cif_net_delay_0_0_plr_0_generic_descriptor",
+ *network_emulation_manager->time_controller(),
+ network_emulation_manager->CreateEndpointPairWithTwoWayRoutes(
+ BuiltInNetworkBehaviorConfig()),
+ [](PeerConfigurer* alice) {
+ VideoConfig video(352, 288, 30);
+ video.stream_label = "alice-video";
+ auto frame_generator = CreateFromYuvFileFrameGenerator(
+ video, ClipNameToClipPath("foreman_cif"));
+ alice->AddVideoConfig(std::move(video), std::move(frame_generator));
+ },
+ [](PeerConfigurer* bob) {});
+ fixture->Run(RunParams(TimeDelta::Seconds(kTestDurationSec)));
+}
+
+TEST(PCGenericDescriptorTest,
+ Pc_Foreman_Cif_30kbps_Net_Delay_0_0_Plr_0_Generic_Descriptor) {
+ std::unique_ptr<NetworkEmulationManager> network_emulation_manager =
+ CreateNetworkEmulationManager();
+ BuiltInNetworkBehaviorConfig config;
+ auto fixture = CreateTestFixture(
+ "pc_foreman_cif_30kbps_net_delay_0_0_plr_0_generic_descriptor",
+ *network_emulation_manager->time_controller(),
+ network_emulation_manager->CreateEndpointPairWithTwoWayRoutes(config),
+ [](PeerConfigurer* alice) {
+ VideoConfig video(352, 288, 10);
+ video.stream_label = "alice-video";
+ auto frame_generator = CreateFromYuvFileFrameGenerator(
+ video, ClipNameToClipPath("foreman_cif"));
+ alice->AddVideoConfig(std::move(video), std::move(frame_generator));
+
+ BitrateSettings bitrate_settings;
+ bitrate_settings.min_bitrate_bps = 30000;
+ bitrate_settings.start_bitrate_bps = 30000;
+ bitrate_settings.max_bitrate_bps = 30000;
+ alice->SetBitrateSettings(bitrate_settings);
+ },
+ [](PeerConfigurer* bob) {});
+ RunParams run_params(TimeDelta::Seconds(kTestDurationSec));
+ fixture->Run(std::move(run_params));
+}
+
+// Link capacity below default start rate.
+TEST(PCFullStackTest, Pc_Foreman_Cif_Link_150kbps_Net_Delay_0_0_Plr_0) {
+ std::unique_ptr<NetworkEmulationManager> network_emulation_manager =
+ CreateNetworkEmulationManager();
+ BuiltInNetworkBehaviorConfig config;
+ config.link_capacity_kbps = 150;
+ auto fixture = CreateTestFixture(
+ "pc_foreman_cif_link_150kbps_net_delay_0_0_plr_0",
+ *network_emulation_manager->time_controller(),
+ network_emulation_manager->CreateEndpointPairWithTwoWayRoutes(config),
+ [](PeerConfigurer* alice) {
+ VideoConfig video(352, 288, 30);
+ video.stream_label = "alice-video";
+ auto frame_generator = CreateFromYuvFileFrameGenerator(
+ video, ClipNameToClipPath("foreman_cif"));
+ alice->AddVideoConfig(std::move(video), std::move(frame_generator));
+ },
+ [](PeerConfigurer* bob) {});
+ fixture->Run(RunParams(TimeDelta::Seconds(kTestDurationSec)));
+}
+
+TEST(PCFullStackTest, Pc_Foreman_Cif_Link_130kbps_Delay100ms_Loss1_Ulpfec) {
+ std::unique_ptr<NetworkEmulationManager> network_emulation_manager =
+ CreateNetworkEmulationManager();
+ BuiltInNetworkBehaviorConfig config;
+ config.link_capacity_kbps = 130;
+ config.queue_delay_ms = 100;
+ config.loss_percent = 1;
+ auto fixture = CreateTestFixture(
+ "pc_foreman_cif_link_130kbps_delay100ms_loss1_ulpfec",
+ *network_emulation_manager->time_controller(),
+ network_emulation_manager->CreateEndpointPairWithTwoWayRoutes(config),
+ [](PeerConfigurer* alice) {
+ VideoConfig video(352, 288, 30);
+ video.stream_label = "alice-video";
+ auto frame_generator = CreateFromYuvFileFrameGenerator(
+ video, ClipNameToClipPath("foreman_cif"));
+ alice->AddVideoConfig(std::move(video), std::move(frame_generator));
+ alice->SetUseUlpFEC(true);
+ },
+ [](PeerConfigurer* bob) { bob->SetUseUlpFEC(true); });
+ fixture->Run(RunParams(TimeDelta::Seconds(kTestDurationSec)));
+}
+
+TEST(PCFullStackTest, Pc_Foreman_Cif_Link_50kbps_Delay100ms_Loss1_Ulpfec) {
+ std::unique_ptr<NetworkEmulationManager> network_emulation_manager =
+ CreateNetworkEmulationManager();
+ BuiltInNetworkBehaviorConfig config;
+ config.link_capacity_kbps = 50;
+ config.queue_delay_ms = 100;
+ config.loss_percent = 1;
+ auto fixture = CreateTestFixture(
+ "pc_foreman_cif_link_50kbps_delay100ms_loss1_ulpfec",
+ *network_emulation_manager->time_controller(),
+ network_emulation_manager->CreateEndpointPairWithTwoWayRoutes(config),
+ [](PeerConfigurer* alice) {
+ VideoConfig video(352, 288, 30);
+ video.stream_label = "alice-video";
+ auto frame_generator = CreateFromYuvFileFrameGenerator(
+ video, ClipNameToClipPath("foreman_cif"));
+ alice->AddVideoConfig(std::move(video), std::move(frame_generator));
+ alice->SetUseUlpFEC(true);
+ },
+ [](PeerConfigurer* bob) { bob->SetUseUlpFEC(true); });
+ fixture->Run(RunParams(TimeDelta::Seconds(kTestDurationSec)));
+}
+
+// Restricted network and encoder overproducing by 30%.
+TEST(PCFullStackTest,
+ Pc_Foreman_Cif_Link_150kbps_Delay100ms_30pkts_Queue_Overshoot30) {
+ std::unique_ptr<NetworkEmulationManager> network_emulation_manager =
+ CreateNetworkEmulationManager();
+ BuiltInNetworkBehaviorConfig config;
+ config.link_capacity_kbps = 150;
+ config.queue_length_packets = 30;
+ config.queue_delay_ms = 100;
+ auto fixture = CreateTestFixture(
+ "pc_foreman_cif_link_150kbps_delay100ms_30pkts_queue_overshoot30",
+ *network_emulation_manager->time_controller(),
+ network_emulation_manager->CreateEndpointPairWithTwoWayRoutes(config),
+ [](PeerConfigurer* alice) {
+ VideoConfig video(352, 288, 30);
+ video.stream_label = "alice-video";
+ auto frame_generator = CreateFromYuvFileFrameGenerator(
+ video, ClipNameToClipPath("foreman_cif"));
+ alice->AddVideoConfig(std::move(video), std::move(frame_generator));
+ alice->SetVideoEncoderBitrateMultiplier(1.30);
+ },
+ [](PeerConfigurer* bob) { bob->SetVideoEncoderBitrateMultiplier(1.30); });
+ fixture->Run(RunParams(TimeDelta::Seconds(kTestDurationSec)));
+}
+
+// Weak 3G-style link: 250kbps, 1% loss, 100ms delay, 15 packets queue.
+// Packet rate and loss are low enough that loss will happen with ~3s interval.
+// This triggers protection overhead to toggle between zero and non-zero.
+// Link queue is restrictive enough to trigger loss on probes.
+TEST(PCFullStackTest, Pc_Foreman_Cif_Link_250kbps_Delay100ms_10pkts_Loss1) {
+ std::unique_ptr<NetworkEmulationManager> network_emulation_manager =
+ CreateNetworkEmulationManager();
+ BuiltInNetworkBehaviorConfig config;
+ config.link_capacity_kbps = 250;
+ config.queue_length_packets = 10;
+ config.queue_delay_ms = 100;
+ config.loss_percent = 1;
+ auto fixture = CreateTestFixture(
+ "pc_foreman_cif_link_250kbps_delay100ms_10pkts_loss1",
+ *network_emulation_manager->time_controller(),
+ network_emulation_manager->CreateEndpointPairWithTwoWayRoutes(config),
+ [](PeerConfigurer* alice) {
+ VideoConfig video(352, 288, 30);
+ video.stream_label = "alice-video";
+ auto frame_generator = CreateFromYuvFileFrameGenerator(
+ video, ClipNameToClipPath("foreman_cif"));
+ alice->AddVideoConfig(std::move(video), std::move(frame_generator));
+ alice->SetVideoEncoderBitrateMultiplier(1.30);
+ },
+ [](PeerConfigurer* bob) { bob->SetVideoEncoderBitrateMultiplier(1.30); });
+ fixture->Run(RunParams(TimeDelta::Seconds(kTestDurationSec)));
+}
+
+TEST(PCGenericDescriptorTest,
+ Pc_Foreman_Cif_Delay_50_0_Plr_5_Generic_Descriptor) {
+ std::unique_ptr<NetworkEmulationManager> network_emulation_manager =
+ CreateNetworkEmulationManager();
+ BuiltInNetworkBehaviorConfig config;
+ config.loss_percent = 5;
+ config.queue_delay_ms = 50;
+ auto fixture = CreateTestFixture(
+ "pc_foreman_cif_delay_50_0_plr_5_generic_descriptor",
+ *network_emulation_manager->time_controller(),
+ network_emulation_manager->CreateEndpointPairWithTwoWayRoutes(config),
+ [](PeerConfigurer* alice) {
+ VideoConfig video(352, 288, 30);
+ video.stream_label = "alice-video";
+ auto frame_generator = CreateFromYuvFileFrameGenerator(
+ video, ClipNameToClipPath("foreman_cif"));
+ alice->AddVideoConfig(std::move(video), std::move(frame_generator));
+ },
+ [](PeerConfigurer* bob) {});
+ fixture->Run(RunParams(TimeDelta::Seconds(kTestDurationSec)));
+}
+
+TEST(PCGenericDescriptorTest,
+ Pc_Foreman_Cif_Delay_50_0_Plr_5_Ulpfec_Generic_Descriptor) {
+ std::unique_ptr<NetworkEmulationManager> network_emulation_manager =
+ CreateNetworkEmulationManager();
+ BuiltInNetworkBehaviorConfig config;
+ config.loss_percent = 5;
+ config.queue_delay_ms = 50;
+ auto fixture = CreateTestFixture(
+ "pc_foreman_cif_delay_50_0_plr_5_ulpfec_generic_descriptor",
+ *network_emulation_manager->time_controller(),
+ network_emulation_manager->CreateEndpointPairWithTwoWayRoutes(config),
+ [](PeerConfigurer* alice) {
+ VideoConfig video(352, 288, 30);
+ video.stream_label = "alice-video";
+ auto frame_generator = CreateFromYuvFileFrameGenerator(
+ video, ClipNameToClipPath("foreman_cif"));
+ alice->AddVideoConfig(std::move(video), std::move(frame_generator));
+ alice->SetUseUlpFEC(true);
+ },
+ [](PeerConfigurer* bob) { bob->SetUseUlpFEC(true); });
+ fixture->Run(RunParams(TimeDelta::Seconds(kTestDurationSec)));
+}
+
+TEST(PCFullStackTest, Pc_Foreman_Cif_Delay_50_0_Plr_5_Flexfec) {
+ std::unique_ptr<NetworkEmulationManager> network_emulation_manager =
+ CreateNetworkEmulationManager();
+ BuiltInNetworkBehaviorConfig config;
+ config.loss_percent = 5;
+ config.queue_delay_ms = 50;
+ auto fixture = CreateTestFixture(
+ "pc_foreman_cif_delay_50_0_plr_5_flexfec",
+ *network_emulation_manager->time_controller(),
+ network_emulation_manager->CreateEndpointPairWithTwoWayRoutes(config),
+ [](PeerConfigurer* alice) {
+ VideoConfig video(352, 288, 30);
+ video.stream_label = "alice-video";
+ auto frame_generator = CreateFromYuvFileFrameGenerator(
+ video, ClipNameToClipPath("foreman_cif"));
+ alice->AddVideoConfig(std::move(video), std::move(frame_generator));
+ alice->SetUseFlexFEC(true);
+ },
+ [](PeerConfigurer* bob) { bob->SetUseFlexFEC(true); });
+ RunParams run_params(TimeDelta::Seconds(kTestDurationSec));
+ run_params.enable_flex_fec_support = true;
+ fixture->Run(std::move(run_params));
+}
+
+TEST(PCFullStackTest, Pc_Foreman_Cif_500kbps_Delay_50_0_Plr_3_Flexfec) {
+ std::unique_ptr<NetworkEmulationManager> network_emulation_manager =
+ CreateNetworkEmulationManager();
+ BuiltInNetworkBehaviorConfig config;
+ config.loss_percent = 3;
+ config.link_capacity_kbps = 500;
+ config.queue_delay_ms = 50;
+ auto fixture = CreateTestFixture(
+ "pc_foreman_cif_500kbps_delay_50_0_plr_3_flexfec",
+ *network_emulation_manager->time_controller(),
+ network_emulation_manager->CreateEndpointPairWithTwoWayRoutes(config),
+ [](PeerConfigurer* alice) {
+ VideoConfig video(352, 288, 30);
+ video.stream_label = "alice-video";
+ auto frame_generator = CreateFromYuvFileFrameGenerator(
+ video, ClipNameToClipPath("foreman_cif"));
+ alice->AddVideoConfig(std::move(video), std::move(frame_generator));
+ alice->SetUseFlexFEC(true);
+ },
+ [](PeerConfigurer* bob) { bob->SetUseFlexFEC(true); });
+ RunParams run_params(TimeDelta::Seconds(kTestDurationSec));
+ run_params.enable_flex_fec_support = true;
+ fixture->Run(std::move(run_params));
+}
+
+TEST(PCFullStackTest, Pc_Foreman_Cif_500kbps_Delay_50_0_Plr_3_Ulpfec) {
+ std::unique_ptr<NetworkEmulationManager> network_emulation_manager =
+ CreateNetworkEmulationManager();
+ BuiltInNetworkBehaviorConfig config;
+ config.loss_percent = 3;
+ config.link_capacity_kbps = 500;
+ config.queue_delay_ms = 50;
+ auto fixture = CreateTestFixture(
+ "pc_foreman_cif_500kbps_delay_50_0_plr_3_ulpfec",
+ *network_emulation_manager->time_controller(),
+ network_emulation_manager->CreateEndpointPairWithTwoWayRoutes(config),
+ [](PeerConfigurer* alice) {
+ VideoConfig video(352, 288, 30);
+ video.stream_label = "alice-video";
+ auto frame_generator = CreateFromYuvFileFrameGenerator(
+ video, ClipNameToClipPath("foreman_cif"));
+ alice->AddVideoConfig(std::move(video), std::move(frame_generator));
+ alice->SetUseUlpFEC(true);
+ },
+ [](PeerConfigurer* bob) { bob->SetUseUlpFEC(true); });
+ fixture->Run(RunParams(TimeDelta::Seconds(kTestDurationSec)));
+}
+
+#if defined(WEBRTC_USE_H264)
+TEST(PCFullStackTest, Pc_Foreman_Cif_Net_Delay_0_0_Plr_0_H264) {
+ std::unique_ptr<NetworkEmulationManager> network_emulation_manager =
+ CreateNetworkEmulationManager();
+ auto fixture = CreateTestFixture(
+ "pc_foreman_cif_net_delay_0_0_plr_0_H264",
+ *network_emulation_manager->time_controller(),
+ network_emulation_manager->CreateEndpointPairWithTwoWayRoutes(
+ BuiltInNetworkBehaviorConfig()),
+ [](PeerConfigurer* alice) {
+ VideoConfig video(352, 288, 30);
+ video.stream_label = "alice-video";
+ auto frame_generator = CreateFromYuvFileFrameGenerator(
+ video, ClipNameToClipPath("foreman_cif"));
+ alice->AddVideoConfig(std::move(video), std::move(frame_generator));
+ alice->SetVideoCodecs({VideoCodecConfig(cricket::kH264CodecName)});
+ },
+ [](PeerConfigurer* bob) {
+ bob->SetVideoCodecs({VideoCodecConfig(cricket::kH264CodecName)});
+ });
+ fixture->Run(RunParams(TimeDelta::Seconds(kTestDurationSec)));
+}
+
+TEST(PCFullStackTest, Pc_Foreman_Cif_30kbps_Net_Delay_0_0_Plr_0_H264) {
+ std::unique_ptr<NetworkEmulationManager> network_emulation_manager =
+ CreateNetworkEmulationManager();
+ BuiltInNetworkBehaviorConfig config;
+ auto fixture = CreateTestFixture(
+ "pc_foreman_cif_30kbps_net_delay_0_0_plr_0_H264",
+ *network_emulation_manager->time_controller(),
+ network_emulation_manager->CreateEndpointPairWithTwoWayRoutes(config),
+ [](PeerConfigurer* alice) {
+ VideoConfig video(352, 288, 10);
+ video.stream_label = "alice-video";
+ auto frame_generator = CreateFromYuvFileFrameGenerator(
+ video, ClipNameToClipPath("foreman_cif"));
+ alice->AddVideoConfig(std::move(video), std::move(frame_generator));
+
+ BitrateSettings bitrate_settings;
+ bitrate_settings.min_bitrate_bps = 30000;
+ bitrate_settings.start_bitrate_bps = 30000;
+ bitrate_settings.max_bitrate_bps = 30000;
+ alice->SetBitrateSettings(bitrate_settings);
+ alice->SetVideoCodecs({VideoCodecConfig(cricket::kH264CodecName)});
+ },
+ [](PeerConfigurer* bob) {
+ bob->SetVideoCodecs({VideoCodecConfig(cricket::kH264CodecName)});
+ });
+ fixture->Run(RunParams(TimeDelta::Seconds(kTestDurationSec)));
+}
+
+TEST(PCGenericDescriptorTest,
+ Pc_Foreman_Cif_Delay_50_0_Plr_5_H264_Generic_Descriptor) {
+ std::unique_ptr<NetworkEmulationManager> network_emulation_manager =
+ CreateNetworkEmulationManager();
+ BuiltInNetworkBehaviorConfig config;
+ config.loss_percent = 5;
+ config.queue_delay_ms = 50;
+ auto fixture = CreateTestFixture(
+ "pc_foreman_cif_delay_50_0_plr_5_H264_generic_descriptor",
+ *network_emulation_manager->time_controller(),
+ network_emulation_manager->CreateEndpointPairWithTwoWayRoutes(config),
+ [](PeerConfigurer* alice) {
+ VideoConfig video(352, 288, 30);
+ video.stream_label = "alice-video";
+ auto frame_generator = CreateFromYuvFileFrameGenerator(
+ video, ClipNameToClipPath("foreman_cif"));
+ alice->AddVideoConfig(std::move(video), std::move(frame_generator));
+ alice->SetVideoCodecs({VideoCodecConfig(cricket::kH264CodecName)});
+ },
+ [](PeerConfigurer* bob) {
+ bob->SetVideoCodecs({VideoCodecConfig(cricket::kH264CodecName)});
+ });
+ fixture->Run(RunParams(TimeDelta::Seconds(kTestDurationSec)));
+}
+
+TEST(PCFullStackTest, Pc_Foreman_Cif_Delay_50_0_Plr_5_H264_Sps_Pps_Idr) {
+ test::ScopedFieldTrials override_field_trials(
+ AppendFieldTrials("WebRTC-SpsPpsIdrIsH264Keyframe/Enabled/"));
+
+ std::unique_ptr<NetworkEmulationManager> network_emulation_manager =
+ CreateNetworkEmulationManager();
+ BuiltInNetworkBehaviorConfig config;
+ config.loss_percent = 5;
+ config.queue_delay_ms = 50;
+ auto fixture = CreateTestFixture(
+ "pc_foreman_cif_delay_50_0_plr_5_H264_sps_pps_idr",
+ *network_emulation_manager->time_controller(),
+ network_emulation_manager->CreateEndpointPairWithTwoWayRoutes(config),
+ [](PeerConfigurer* alice) {
+ VideoConfig video(352, 288, 30);
+ video.stream_label = "alice-video";
+ auto frame_generator = CreateFromYuvFileFrameGenerator(
+ video, ClipNameToClipPath("foreman_cif"));
+ alice->AddVideoConfig(std::move(video), std::move(frame_generator));
+ alice->SetVideoCodecs({VideoCodecConfig(cricket::kH264CodecName)});
+ },
+ [](PeerConfigurer* bob) {
+ bob->SetVideoCodecs({VideoCodecConfig(cricket::kH264CodecName)});
+ });
+ fixture->Run(RunParams(TimeDelta::Seconds(kTestDurationSec)));
+}
+
+TEST(PCFullStackTest, Pc_Foreman_Cif_Delay_50_0_Plr_5_H264_Flexfec) {
+ std::unique_ptr<NetworkEmulationManager> network_emulation_manager =
+ CreateNetworkEmulationManager();
+ BuiltInNetworkBehaviorConfig config;
+ config.loss_percent = 5;
+ config.queue_delay_ms = 50;
+ auto fixture = CreateTestFixture(
+ "pc_foreman_cif_delay_50_0_plr_5_H264_flexfec",
+ *network_emulation_manager->time_controller(),
+ network_emulation_manager->CreateEndpointPairWithTwoWayRoutes(config),
+ [](PeerConfigurer* alice) {
+ VideoConfig video(352, 288, 30);
+ video.stream_label = "alice-video";
+ auto frame_generator = CreateFromYuvFileFrameGenerator(
+ video, ClipNameToClipPath("foreman_cif"));
+ alice->AddVideoConfig(std::move(video), std::move(frame_generator));
+ alice->SetVideoCodecs({VideoCodecConfig(cricket::kH264CodecName)});
+ alice->SetUseFlexFEC(true);
+ },
+ [](PeerConfigurer* bob) {
+ bob->SetVideoCodecs({VideoCodecConfig(cricket::kH264CodecName)});
+ bob->SetUseFlexFEC(true);
+ });
+ RunParams run_params(TimeDelta::Seconds(kTestDurationSec));
+ run_params.enable_flex_fec_support = true;
+ fixture->Run(std::move(run_params));
+}
+
+// Ulpfec with H264 is an unsupported combination, so this test is only useful
+// for debugging. It is therefore disabled by default.
+TEST(PCFullStackTest, DISABLED_Pc_Foreman_Cif_Delay_50_0_Plr_5_H264_Ulpfec) {
+ std::unique_ptr<NetworkEmulationManager> network_emulation_manager =
+ CreateNetworkEmulationManager();
+ BuiltInNetworkBehaviorConfig config;
+ config.loss_percent = 5;
+ config.queue_delay_ms = 50;
+ auto fixture = CreateTestFixture(
+ "pc_foreman_cif_delay_50_0_plr_5_H264_ulpfec",
+ *network_emulation_manager->time_controller(),
+ network_emulation_manager->CreateEndpointPairWithTwoWayRoutes(config),
+ [](PeerConfigurer* alice) {
+ VideoConfig video(352, 288, 30);
+ video.stream_label = "alice-video";
+ auto frame_generator = CreateFromYuvFileFrameGenerator(
+ video, ClipNameToClipPath("foreman_cif"));
+ alice->AddVideoConfig(std::move(video), std::move(frame_generator));
+ alice->SetVideoCodecs({VideoCodecConfig(cricket::kH264CodecName)});
+ alice->SetUseUlpFEC(true);
+ },
+ [](PeerConfigurer* bob) {
+ bob->SetVideoCodecs({VideoCodecConfig(cricket::kH264CodecName)});
+ bob->SetUseUlpFEC(true);
+ });
+ fixture->Run(RunParams(TimeDelta::Seconds(kTestDurationSec)));
+}
+#endif // defined(WEBRTC_USE_H264)
+
+TEST(PCFullStackTest, Pc_Foreman_Cif_500kbps) {
+ std::unique_ptr<NetworkEmulationManager> network_emulation_manager =
+ CreateNetworkEmulationManager();
+ BuiltInNetworkBehaviorConfig config;
+ config.queue_length_packets = 0;
+ config.queue_delay_ms = 0;
+ config.link_capacity_kbps = 500;
+ auto fixture = CreateTestFixture(
+ "pc_foreman_cif_500kbps", *network_emulation_manager->time_controller(),
+ network_emulation_manager->CreateEndpointPairWithTwoWayRoutes(config),
+ [](PeerConfigurer* alice) {
+ VideoConfig video(352, 288, 30);
+ video.stream_label = "alice-video";
+ auto frame_generator = CreateFromYuvFileFrameGenerator(
+ video, ClipNameToClipPath("foreman_cif"));
+ alice->AddVideoConfig(std::move(video), std::move(frame_generator));
+ },
+ [](PeerConfigurer* bob) {});
+ fixture->Run(RunParams(TimeDelta::Seconds(kTestDurationSec)));
+}
+
+TEST_P(ParameterizedPCFullStackTest, Pc_Foreman_Cif_500kbps_32pkts_Queue) {
+ std::unique_ptr<NetworkEmulationManager> network_emulation_manager =
+ CreateNetworkEmulationManager();
+ BuiltInNetworkBehaviorConfig config;
+ config.queue_length_packets = 32;
+ config.queue_delay_ms = 0;
+ config.link_capacity_kbps = 500;
+ auto fixture = CreateTestFixture(
+ "pc_foreman_cif_500kbps_32pkts_queue" + GetParam().test_case_name_postfix,
+ *network_emulation_manager->time_controller(),
+ network_emulation_manager->CreateEndpointPairWithTwoWayRoutes(config),
+ [](PeerConfigurer* alice) {
+ VideoConfig video(352, 288, 30);
+ video.stream_label = "alice-video";
+ auto frame_generator = CreateFromYuvFileFrameGenerator(
+ video, ClipNameToClipPath("foreman_cif"));
+ alice->AddVideoConfig(std::move(video), std::move(frame_generator));
+ if (GetParam().use_network_thread_as_worker_thread) {
+ alice->SetUseNetworkThreadAsWorkerThread();
+ }
+ },
+ [](PeerConfigurer* bob) {
+ if (GetParam().use_network_thread_as_worker_thread) {
+ bob->SetUseNetworkThreadAsWorkerThread();
+ }
+ });
+ fixture->Run(RunParams(TimeDelta::Seconds(kTestDurationSec)));
+}
+
+TEST(PCFullStackTest, Pc_Foreman_Cif_500kbps_100ms) {
+ std::unique_ptr<NetworkEmulationManager> network_emulation_manager =
+ CreateNetworkEmulationManager();
+ BuiltInNetworkBehaviorConfig config;
+ config.queue_length_packets = 0;
+ config.queue_delay_ms = 100;
+ config.link_capacity_kbps = 500;
+ auto fixture = CreateTestFixture(
+ "pc_foreman_cif_500kbps_100ms",
+ *network_emulation_manager->time_controller(),
+ network_emulation_manager->CreateEndpointPairWithTwoWayRoutes(config),
+ [](PeerConfigurer* alice) {
+ VideoConfig video(352, 288, 30);
+ video.stream_label = "alice-video";
+ auto frame_generator = CreateFromYuvFileFrameGenerator(
+ video, ClipNameToClipPath("foreman_cif"));
+ alice->AddVideoConfig(std::move(video), std::move(frame_generator));
+ },
+ [](PeerConfigurer* bob) {});
+ fixture->Run(RunParams(TimeDelta::Seconds(kTestDurationSec)));
+}
+
+TEST(PCGenericDescriptorTest,
+ Pc_Foreman_Cif_500kbps_100ms_32pkts_Queue_Generic_Descriptor) {
+ std::unique_ptr<NetworkEmulationManager> network_emulation_manager =
+ CreateNetworkEmulationManager();
+ BuiltInNetworkBehaviorConfig config;
+ config.queue_length_packets = 32;
+ config.queue_delay_ms = 100;
+ config.link_capacity_kbps = 500;
+ auto fixture = CreateTestFixture(
+ "pc_foreman_cif_500kbps_100ms_32pkts_queue_generic_descriptor",
+ *network_emulation_manager->time_controller(),
+ network_emulation_manager->CreateEndpointPairWithTwoWayRoutes(config),
+ [](PeerConfigurer* alice) {
+ VideoConfig video(352, 288, 30);
+ video.stream_label = "alice-video";
+ auto frame_generator = CreateFromYuvFileFrameGenerator(
+ video, ClipNameToClipPath("foreman_cif"));
+ alice->AddVideoConfig(std::move(video), std::move(frame_generator));
+ },
+ [](PeerConfigurer* bob) {});
+ fixture->Run(RunParams(TimeDelta::Seconds(kTestDurationSec)));
+}
+
+/*
+// TODO(bugs.webrtc.org/10639) we need to disable send side bwe, but it isn't
+// supported in PC level framework.
+TEST(PCFullStackTest, ForemanCif500kbps100msLimitedQueueRecvBwe) {
+ auto fixture = CreateVideoQualityTestFixture();
+ ParamsWithLogging foreman_cif;
+ foreman_cif.call.send_side_bwe = false;
+ foreman_cif.video[0] = {
+ true, 352, 288, 30,
+ 30000, 500000, 2000000, false,
+ "VP8", 1, 0, 0,
+ false, false, true, ClipNameToClipPath("foreman_cif")};
+ foreman_cif.analyzer = {"foreman_cif_500kbps_100ms_32pkts_queue_recv_bwe",
+ 0.0, 0.0, kTestDurationSec};
+ foreman_cif.config->queue_length_packets = 32;
+ foreman_cif.config->queue_delay_ms = 100;
+ foreman_cif.config->link_capacity_kbps = 500;
+ fixture->RunWithAnalyzer(foreman_cif);
+}
+*/
+
+TEST(PCFullStackTest, Pc_Foreman_Cif_1000kbps_100ms_32pkts_Queue) {
+ std::unique_ptr<NetworkEmulationManager> network_emulation_manager =
+ CreateNetworkEmulationManager();
+ BuiltInNetworkBehaviorConfig config;
+ config.queue_length_packets = 32;
+ config.queue_delay_ms = 100;
+ config.link_capacity_kbps = 1000;
+ auto fixture = CreateTestFixture(
+ "pc_foreman_cif_1000kbps_100ms_32pkts_queue",
+ *network_emulation_manager->time_controller(),
+ network_emulation_manager->CreateEndpointPairWithTwoWayRoutes(config),
+ [](PeerConfigurer* alice) {
+ VideoConfig video(352, 288, 30);
+ video.stream_label = "alice-video";
+ auto frame_generator = CreateFromYuvFileFrameGenerator(
+ video, ClipNameToClipPath("foreman_cif"));
+ alice->AddVideoConfig(std::move(video), std::move(frame_generator));
+ },
+ [](PeerConfigurer* bob) {});
+ fixture->Run(RunParams(TimeDelta::Seconds(kTestDurationSec)));
+}
+
+// TODO(sprang): Remove this if we have the similar ModerateLimits below?
+TEST(PCFullStackTest, Pc_Conference_Motion_Hd_2000kbps_100ms_32pkts_Queue) {
+ std::unique_ptr<NetworkEmulationManager> network_emulation_manager =
+ CreateNetworkEmulationManager();
+ BuiltInNetworkBehaviorConfig config;
+ config.queue_length_packets = 32;
+ config.queue_delay_ms = 100;
+ config.link_capacity_kbps = 2000;
+ auto fixture = CreateTestFixture(
+ "pc_conference_motion_hd_2000kbps_100ms_32pkts_queue",
+ *network_emulation_manager->time_controller(),
+ network_emulation_manager->CreateEndpointPairWithTwoWayRoutes(config),
+ [](PeerConfigurer* alice) {
+ VideoConfig video(1280, 720, 50);
+ video.stream_label = "alice-video";
+ auto frame_generator = CreateFromYuvFileFrameGenerator(
+ video, ClipNameToClipPath("ConferenceMotion_1280_720_50"));
+ alice->AddVideoConfig(std::move(video), std::move(frame_generator));
+ },
+ [](PeerConfigurer* bob) {});
+ fixture->Run(RunParams(TimeDelta::Seconds(kTestDurationSec)));
+}
+
+/*
+// TODO(bugs.webrtc.org/10639) requires simulcast/SVC support in PC framework
+TEST(PCGenericDescriptorTest, ConferenceMotionHd2TLModerateLimits) {
+ auto fixture = CreateVideoQualityTestFixture();
+ ParamsWithLogging conf_motion_hd;
+ conf_motion_hd.call.send_side_bwe = true;
+ conf_motion_hd.video[0] = {
+ true, 1280,
+ 720, 50,
+ 30000, 3000000,
+ 3000000, false,
+ "VP8", 2,
+ -1, 0,
+ false, false,
+ false, ClipNameToClipPath("ConferenceMotion_1280_720_50")};
+ conf_motion_hd.analyzer = {
+ "conference_motion_hd_2tl_moderate_limits_generic_descriptor", 0.0, 0.0,
+ kTestDurationSec};
+ conf_motion_hd.config->queue_length_packets = 50;
+ conf_motion_hd.config->loss_percent = 3;
+ conf_motion_hd.config->queue_delay_ms = 100;
+ conf_motion_hd.config->link_capacity_kbps = 2000;
+ conf_motion_hd.call.generic_descriptor = GenericDescriptorEnabled();
+ fixture->RunWithAnalyzer(conf_motion_hd);
+}
+
+// TODO(bugs.webrtc.org/10639) requires simulcast/SVC support in PC framework
+TEST(PCFullStackTest, ConferenceMotionHd3TLModerateLimits) {
+ auto fixture = CreateVideoQualityTestFixture();
+ ParamsWithLogging conf_motion_hd;
+ conf_motion_hd.call.send_side_bwe = true;
+ conf_motion_hd.video[0] = {
+ true, 1280,
+ 720, 50,
+ 30000, 3000000,
+ 3000000, false,
+ "VP8", 3,
+ -1, 0,
+ false, false,
+ false, ClipNameToClipPath("ConferenceMotion_1280_720_50")};
+ conf_motion_hd.analyzer = {"conference_motion_hd_3tl_moderate_limits", 0.0,
+ 0.0, kTestDurationSec};
+ conf_motion_hd.config->queue_length_packets = 50;
+ conf_motion_hd.config->loss_percent = 3;
+ conf_motion_hd.config->queue_delay_ms = 100;
+ conf_motion_hd.config->link_capacity_kbps = 2000;
+ fixture->RunWithAnalyzer(conf_motion_hd);
+}
+
+// TODO(bugs.webrtc.org/10639) requires simulcast/SVC support in PC framework
+TEST(PCFullStackTest, ConferenceMotionHd4TLModerateLimits) {
+ auto fixture = CreateVideoQualityTestFixture();
+ ParamsWithLogging conf_motion_hd;
+ conf_motion_hd.call.send_side_bwe = true;
+ conf_motion_hd.video[0] = {
+ true, 1280,
+ 720, 50,
+ 30000, 3000000,
+ 3000000, false,
+ "VP8", 4,
+ -1, 0,
+ false, false,
+ false, ClipNameToClipPath("ConferenceMotion_1280_720_50")};
+ conf_motion_hd.analyzer = {"conference_motion_hd_4tl_moderate_limits", 0.0,
+ 0.0, kTestDurationSec};
+ conf_motion_hd.config->queue_length_packets = 50;
+ conf_motion_hd.config->loss_percent = 3;
+ conf_motion_hd.config->queue_delay_ms = 100;
+ conf_motion_hd.config->link_capacity_kbps = 2000;
+ fixture->RunWithAnalyzer(conf_motion_hd);
+}
+
+// TODO(bugs.webrtc.org/10639) requires simulcast/SVC support in PC framework
+TEST(PCFullStackTest, ConferenceMotionHd3TLModerateLimitsAltTLPattern) {
+ test::ScopedFieldTrials field_trial(
+ AppendFieldTrials("WebRTC-UseShortVP8TL3Pattern/Enabled/"));
+ auto fixture = CreateVideoQualityTestFixture();
+ ParamsWithLogging conf_motion_hd;
+ conf_motion_hd.call.send_side_bwe = true;
+ conf_motion_hd.video[0] = {
+ true, 1280,
+ 720, 50,
+ 30000, 3000000,
+ 3000000, false,
+ "VP8", 3,
+ -1, 0,
+ false, false,
+ false, ClipNameToClipPath("ConferenceMotion_1280_720_50")};
+ conf_motion_hd.analyzer = {"conference_motion_hd_3tl_alt_moderate_limits",
+ 0.0, 0.0, kTestDurationSec};
+ conf_motion_hd.config->queue_length_packets = 50;
+ conf_motion_hd.config->loss_percent = 3;
+ conf_motion_hd.config->queue_delay_ms = 100;
+ conf_motion_hd.config->link_capacity_kbps = 2000;
+ fixture->RunWithAnalyzer(conf_motion_hd);
+}
+
+// TODO(bugs.webrtc.org/10639) requires simulcast/SVC support in PC framework
+TEST(PCFullStackTest,
+ ConferenceMotionHd3TLModerateLimitsAltTLPatternAndBaseHeavyTLAllocation) {
+ auto fixture = CreateVideoQualityTestFixture();
+ test::ScopedFieldTrials field_trial(
+ AppendFieldTrials("WebRTC-UseShortVP8TL3Pattern/Enabled/"
+ "WebRTC-UseBaseHeavyVP8TL3RateAllocation/Enabled/"));
+ ParamsWithLogging conf_motion_hd;
+ conf_motion_hd.call.send_side_bwe = true;
+ conf_motion_hd.video[0] = {
+ true, 1280,
+ 720, 50,
+ 30000, 3000000,
+ 3000000, false,
+ "VP8", 3,
+ -1, 0,
+ false, false,
+ false, ClipNameToClipPath("ConferenceMotion_1280_720_50")};
+ conf_motion_hd.analyzer = {
+ "conference_motion_hd_3tl_alt_heavy_moderate_limits", 0.0, 0.0,
+ kTestDurationSec};
+ conf_motion_hd.config->queue_length_packets = 50;
+ conf_motion_hd.config->loss_percent = 3;
+ conf_motion_hd.config->queue_delay_ms = 100;
+ conf_motion_hd.config->link_capacity_kbps = 2000;
+ fixture->RunWithAnalyzer(conf_motion_hd);
+}
+*/
+
+#if defined(RTC_ENABLE_VP9)
+TEST_P(ParameterizedPCFullStackTest,
+ Pc_Conference_Motion_Hd_2000kbps_100ms_32pkts_Queue_Vp9) {
+ std::unique_ptr<NetworkEmulationManager> network_emulation_manager =
+ CreateNetworkEmulationManager();
+ BuiltInNetworkBehaviorConfig config;
+ config.queue_length_packets = 32;
+ config.queue_delay_ms = 100;
+ config.link_capacity_kbps = 2000;
+ auto fixture = CreateTestFixture(
+ "pc_conference_motion_hd_2000kbps_100ms_32pkts_queue_vp9" +
+ GetParam().test_case_name_postfix,
+ *network_emulation_manager->time_controller(),
+ network_emulation_manager->CreateEndpointPairWithTwoWayRoutes(config),
+ [](PeerConfigurer* alice) {
+ VideoConfig video(1280, 720, 50);
+ video.stream_label = "alice-video";
+ auto frame_generator = CreateFromYuvFileFrameGenerator(
+ video, ClipNameToClipPath("ConferenceMotion_1280_720_50"));
+ alice->AddVideoConfig(std::move(video), std::move(frame_generator));
+ alice->SetVideoCodecs({VideoCodecConfig(
+ /*name=*/cricket::kVp9CodecName, /*required_params=*/{
+ {kVP9FmtpProfileId,
+ VP9ProfileToString(VP9Profile::kProfile0)}})});
+ if (GetParam().use_network_thread_as_worker_thread) {
+ alice->SetUseNetworkThreadAsWorkerThread();
+ }
+ },
+ [](PeerConfigurer* bob) {
+ bob->SetVideoCodecs({VideoCodecConfig(
+ /*name=*/cricket::kVp9CodecName, /*required_params=*/{
+ {kVP9FmtpProfileId,
+ VP9ProfileToString(VP9Profile::kProfile0)}})});
+ if (GetParam().use_network_thread_as_worker_thread) {
+ bob->SetUseNetworkThreadAsWorkerThread();
+ }
+ });
+ fixture->Run(RunParams(TimeDelta::Seconds(kTestDurationSec)));
+}
+#endif
+
+TEST(PCFullStackTest, Pc_Screenshare_Slides_No_Conference_Mode) {
+ std::unique_ptr<NetworkEmulationManager> network_emulation_manager =
+ CreateNetworkEmulationManager();
+ auto fixture = CreateTestFixture(
+ "pc_screenshare_slides_no_conference_mode",
+ *network_emulation_manager->time_controller(),
+ network_emulation_manager->CreateEndpointPairWithTwoWayRoutes(
+ BuiltInNetworkBehaviorConfig()),
+ [](PeerConfigurer* alice) {
+ VideoConfig video(1850, 1110, 5);
+ video.stream_label = "alice-video";
+ video.content_hint = VideoTrackInterface::ContentHint::kText;
+ auto frame_generator = CreateScreenShareFrameGenerator(
+ video, ScreenShareConfig(TimeDelta::Seconds(10)));
+ alice->AddVideoConfig(std::move(video), std::move(frame_generator));
+ },
+ [](PeerConfigurer* bob) {});
+ fixture->Run(RunParams(TimeDelta::Seconds(kTestDurationSec)));
+}
+
+TEST(PCFullStackTest, Pc_Screenshare_Slides) {
+ std::unique_ptr<NetworkEmulationManager> network_emulation_manager =
+ CreateNetworkEmulationManager();
+ auto fixture = CreateTestFixture(
+ "pc_screenshare_slides", *network_emulation_manager->time_controller(),
+ network_emulation_manager->CreateEndpointPairWithTwoWayRoutes(
+ BuiltInNetworkBehaviorConfig()),
+ [](PeerConfigurer* alice) {
+ VideoConfig video(1850, 1110, 5);
+ video.stream_label = "alice-video";
+ video.content_hint = VideoTrackInterface::ContentHint::kText;
+ auto frame_generator = CreateScreenShareFrameGenerator(
+ video, ScreenShareConfig(TimeDelta::Seconds(10)));
+ alice->AddVideoConfig(std::move(video), std::move(frame_generator));
+ },
+ [](PeerConfigurer* bob) {});
+ RunParams run_params(TimeDelta::Seconds(kTestDurationSec));
+ run_params.use_conference_mode = true;
+ fixture->Run(std::move(run_params));
+}
+
+// TODO(bugs.webrtc.org/9840): Investigate why is this test flaky on Win/Mac.
+#if !defined(WEBRTC_MAC) && !defined(WEBRTC_WIN)
+TEST(PCFullStackTest, Pc_Screenshare_Slides_Simulcast_No_Conference_Mode) {
+ std::unique_ptr<NetworkEmulationManager> network_emulation_manager =
+ CreateNetworkEmulationManager();
+ auto fixture = CreateTestFixture(
+ "pc_screenshare_slides_simulcast_no_conference_mode",
+ *network_emulation_manager->time_controller(),
+ network_emulation_manager->CreateEndpointPairWithTwoWayRoutes(
+ BuiltInNetworkBehaviorConfig()),
+ [](PeerConfigurer* alice) {
+ VideoConfig video(1850, 1110, 30);
+ video.simulcast_config = VideoSimulcastConfig(2);
+ video.emulated_sfu_config = EmulatedSFUConfig(1);
+ video.temporal_layers_count = 2;
+ video.stream_label = "alice-video";
+ video.content_hint = VideoTrackInterface::ContentHint::kText;
+ auto frame_generator = CreateScreenShareFrameGenerator(
+ video, ScreenShareConfig(TimeDelta::Seconds(10)));
+ alice->AddVideoConfig(std::move(video), std::move(frame_generator));
+ },
+ [](PeerConfigurer* bob) {});
+ fixture->Run(RunParams(TimeDelta::Seconds(kTestDurationSec)));
+}
+
+TEST_P(ParameterizedPCFullStackTest, Pc_Screenshare_Slides_Simulcast) {
+ std::unique_ptr<NetworkEmulationManager> network_emulation_manager =
+ CreateNetworkEmulationManager();
+ auto fixture = CreateTestFixture(
+ "pc_screenshare_slides_simulcast" + GetParam().test_case_name_postfix,
+ *network_emulation_manager->time_controller(),
+ network_emulation_manager->CreateEndpointPairWithTwoWayRoutes(
+ BuiltInNetworkBehaviorConfig()),
+ [](PeerConfigurer* alice) {
+ VideoConfig video(1850, 1110, 30);
+ video.simulcast_config = VideoSimulcastConfig(2);
+ video.emulated_sfu_config = EmulatedSFUConfig(1);
+ video.temporal_layers_count = 2;
+ video.stream_label = "alice-video";
+ video.content_hint = VideoTrackInterface::ContentHint::kText;
+ auto frame_generator = CreateScreenShareFrameGenerator(
+ video, ScreenShareConfig(TimeDelta::Seconds(10)));
+ alice->AddVideoConfig(std::move(video), std::move(frame_generator));
+ if (GetParam().use_network_thread_as_worker_thread) {
+ alice->SetUseNetworkThreadAsWorkerThread();
+ }
+ },
+ [](PeerConfigurer* bob) {
+ if (GetParam().use_network_thread_as_worker_thread) {
+ bob->SetUseNetworkThreadAsWorkerThread();
+ }
+ });
+ RunParams run_params(TimeDelta::Seconds(kTestDurationSec));
+ run_params.use_conference_mode = true;
+ fixture->Run(std::move(run_params));
+}
+#endif // !defined(WEBRTC_MAC) && !defined(WEBRTC_WIN)
+
+/*
+#if !defined(WEBRTC_MAC)
+// TODO(bugs.webrtc.org/9840): Investigate why is this test flaky on Win/Mac.
+#if !defined(WEBRTC_WIN)
+// TODO(bugs.webrtc.org/10639) requires simulcast/SVC support in PC framework
+TEST(PCFullStackTest, ScreenshareSlidesVP8_2TL_Simulcast_low) {
+ auto fixture = CreateVideoQualityTestFixture();
+ ParamsWithLogging screenshare;
+ screenshare.call.send_side_bwe = true;
+ screenshare.screenshare[0] = {true, false, 10};
+ screenshare.video[0] = {true, 1850, 1110, 30, 800000, 2500000,
+ 2500000, false, "VP8", 2, 1, 400000,
+ false, false, false, ""};
+ screenshare.analyzer = {"screenshare_slides_simulcast_low", 0.0, 0.0,
+ kTestDurationSec};
+ VideoQualityTest::Params screenshare_params_high;
+ screenshare_params_high.video[0] = {
+ true, 1850, 1110, 60, 600000, 1250000, 1250000, false,
+ "VP8", 2, 0, 400000, false, false, false, ""};
+ VideoQualityTest::Params screenshare_params_low;
+ screenshare_params_low.video[0] = {true, 1850, 1110, 5, 30000, 200000,
+ 1000000, false, "VP8", 2, 0, 400000,
+ false, false, false, ""};
+
+ std::vector<VideoStream> streams = {
+ VideoQualityTest::DefaultVideoStream(screenshare_params_low, 0),
+ VideoQualityTest::DefaultVideoStream(screenshare_params_high, 0)};
+ screenshare.ss[0] = {
+ streams, 0, 1, 0, InterLayerPredMode::kOn, std::vector<SpatialLayer>(),
+ false};
+ fixture->RunWithAnalyzer(screenshare);
+}
+
+#endif // !defined(WEBRTC_WIN)
+#endif // !defined(WEBRTC_MAC)
+
+// TODO(bugs.webrtc.org/10639) requires simulcast/SVC support in PC framework
+TEST(PCFullStackTest, ScreenshareSlidesVP8_2TL_Scroll) {
+ auto fixture = CreateVideoQualityTestFixture();
+ ParamsWithLogging config;
+ config.call.send_side_bwe = true;
+ config.video[0] = {true, 1850, 1110 / 2, 5, 50000, 200000,
+ 1000000, false, "VP8", 2, 1, 400000,
+ false, false, false, ""};
+ config.screenshare[0] = {true, false, 10, 2};
+ config.analyzer = {"screenshare_slides_scrolling", 0.0, 0.0,
+ kTestDurationSec};
+ fixture->RunWithAnalyzer(config);
+}
+
+// TODO(bugs.webrtc.org/10639) requires simulcast/SVC support in PC framework
+TEST(PCGenericDescriptorTest, Screenshare_Slides_Lossy_Net_Generic_Descriptor) {
+ auto fixture = CreateVideoQualityTestFixture();
+ ParamsWithLogging screenshare;
+ screenshare.call.send_side_bwe = true;
+ screenshare.video[0] = {true, 1850, 1110, 5, 50000, 200000,
+ 1000000, false, "VP8", 2, 1, 400000,
+ false, false, false, ""};
+ screenshare.screenshare[0] = {true, false, 10};
+ screenshare.analyzer = {"screenshare_slides_lossy_net_generic_descriptor",
+ 0.0, 0.0, kTestDurationSec};
+ screenshare.config->loss_percent = 5;
+ screenshare.config->queue_delay_ms = 200;
+ screenshare.config->link_capacity_kbps = 500;
+ screenshare.call.generic_descriptor = true;
+ fixture->RunWithAnalyzer(screenshare);
+}
+
+// TODO(bugs.webrtc.org/10639) requires simulcast/SVC support in PC framework
+TEST(PCFullStackTest, ScreenshareSlidesVP8_2TL_VeryLossyNet) {
+ auto fixture = CreateVideoQualityTestFixture();
+ ParamsWithLogging screenshare;
+ screenshare.call.send_side_bwe = true;
+ screenshare.video[0] = {true, 1850, 1110, 5, 50000, 200000,
+ 1000000, false, "VP8", 2, 1, 400000,
+ false, false, false, ""};
+ screenshare.screenshare[0] = {true, false, 10};
+ screenshare.analyzer = {"screenshare_slides_very_lossy", 0.0, 0.0,
+ kTestDurationSec};
+ screenshare.config->loss_percent = 10;
+ screenshare.config->queue_delay_ms = 200;
+ screenshare.config->link_capacity_kbps = 500;
+ fixture->RunWithAnalyzer(screenshare);
+}
+
+// TODO(bugs.webrtc.org/10639) requires simulcast/SVC support in PC framework
+TEST(PCFullStackTest, ScreenshareSlidesVP8_2TL_LossyNetRestrictedQueue) {
+ auto fixture = CreateVideoQualityTestFixture();
+ ParamsWithLogging screenshare;
+ screenshare.call.send_side_bwe = true;
+ screenshare.video[0] = {true, 1850, 1110, 5, 50000, 200000,
+ 1000000, false, "VP8", 2, 1, 400000,
+ false, false, false, ""};
+ screenshare.screenshare[0] = {true, false, 10};
+ screenshare.analyzer = {"screenshare_slides_lossy_limited", 0.0, 0.0,
+ kTestDurationSec};
+ screenshare.config->loss_percent = 5;
+ screenshare.config->link_capacity_kbps = 200;
+ screenshare.config->queue_length_packets = 30;
+
+ fixture->RunWithAnalyzer(screenshare);
+}
+
+// TODO(bugs.webrtc.org/10639) requires simulcast/SVC support in PC framework
+TEST(PCFullStackTest, ScreenshareSlidesVP8_2TL_ModeratelyRestricted) {
+ auto fixture = CreateVideoQualityTestFixture();
+ ParamsWithLogging screenshare;
+ screenshare.call.send_side_bwe = true;
+ screenshare.video[0] = {true, 1850, 1110, 5, 50000, 200000,
+ 1000000, false, "VP8", 2, 1, 400000,
+ false, false, false, ""};
+ screenshare.screenshare[0] = {true, false, 10};
+ screenshare.analyzer = {"screenshare_slides_moderately_restricted", 0.0, 0.0,
+ kTestDurationSec};
+ screenshare.config->loss_percent = 1;
+ screenshare.config->link_capacity_kbps = 1200;
+ screenshare.config->queue_length_packets = 30;
+
+ fixture->RunWithAnalyzer(screenshare);
+}
+
+namespace {
+// TODO(bugs.webrtc.org/10639) requires simulcast/SVC support in PC framework
+// Since ParamsWithLogging::Video is not trivially destructible, we can't
+// store these structs as const globals.
+ParamsWithLogging::Video SvcVp9Video() {
+ return ParamsWithLogging::Video{
+ true, 1280,
+ 720, 30,
+ 800000, 2500000,
+ 2500000, false,
+ "VP9", 3,
+ 2, 400000,
+ false, false,
+ false, ClipNameToClipPath("ConferenceMotion_1280_720_50")};
+}
+
+ParamsWithLogging::Video SimulcastVp8VideoHigh() {
+ return ParamsWithLogging::Video{
+ true, 1280,
+ 720, 30,
+ 800000, 2500000,
+ 2500000, false,
+ "VP8", 3,
+ 2, 400000,
+ false, false,
+ false, ClipNameToClipPath("ConferenceMotion_1280_720_50")};
+}
+
+ParamsWithLogging::Video SimulcastVp8VideoMedium() {
+ return ParamsWithLogging::Video{
+ true, 640,
+ 360, 30,
+ 150000, 500000,
+ 700000, false,
+ "VP8", 3,
+ 2, 400000,
+ false, false,
+ false, ClipNameToClipPath("ConferenceMotion_1280_720_50")};
+}
+
+ParamsWithLogging::Video SimulcastVp8VideoLow() {
+ return ParamsWithLogging::Video{
+ true, 320,
+ 180, 30,
+ 30000, 150000,
+ 200000, false,
+ "VP8", 3,
+ 2, 400000,
+ false, false,
+ false, ClipNameToClipPath("ConferenceMotion_1280_720_50")};
+}
+} // namespace
+*/
+
+#if defined(RTC_ENABLE_VP9)
+
+TEST(PCFullStackTest, Pc_Screenshare_Slides_Vp9_3sl_High_Fps) {
+ webrtc::test::ScopedFieldTrials override_trials(
+ AppendFieldTrials("WebRTC-Vp9InterLayerPred/"
+ "Enabled,inter_layer_pred_mode:on/"));
+ std::unique_ptr<NetworkEmulationManager> network_emulation_manager =
+ CreateNetworkEmulationManager();
+ auto fixture = CreateTestFixture(
+ "pc_screenshare_slides_vp9_3sl_high_fps",
+ *network_emulation_manager->time_controller(),
+ network_emulation_manager->CreateEndpointPairWithTwoWayRoutes(
+ BuiltInNetworkBehaviorConfig()),
+ [](PeerConfigurer* alice) {
+ VideoConfig video(1850, 1110, 30);
+ video.stream_label = "alice-video";
+ video.simulcast_config = VideoSimulcastConfig(3);
+ video.emulated_sfu_config = EmulatedSFUConfig(2);
+ video.content_hint = VideoTrackInterface::ContentHint::kText;
+ auto frame_generator = CreateScreenShareFrameGenerator(
+ video, ScreenShareConfig(TimeDelta::Seconds(10)));
+ alice->AddVideoConfig(std::move(video), std::move(frame_generator));
+ alice->SetVideoCodecs({VideoCodecConfig(
+ /*name=*/cricket::kVp9CodecName, /*required_params=*/{
+ {kVP9FmtpProfileId,
+ VP9ProfileToString(VP9Profile::kProfile0)}})});
+ },
+ [](PeerConfigurer* bob) {
+ bob->SetVideoCodecs({VideoCodecConfig(
+ /*name=*/cricket::kVp9CodecName, /*required_params=*/{
+ {kVP9FmtpProfileId,
+ VP9ProfileToString(VP9Profile::kProfile0)}})});
+ });
+ fixture->Run(RunParams(TimeDelta::Seconds(kTestDurationSec)));
+}
+
+TEST(PCFullStackTest, Pc_Vp9svc_3sl_High) {
+ webrtc::test::ScopedFieldTrials override_trials(
+ AppendFieldTrials("WebRTC-Vp9InterLayerPred/"
+ "Enabled,inter_layer_pred_mode:on/"));
+ std::unique_ptr<NetworkEmulationManager> network_emulation_manager =
+ CreateNetworkEmulationManager();
+ auto fixture = CreateTestFixture(
+ "pc_vp9svc_3sl_high", *network_emulation_manager->time_controller(),
+ network_emulation_manager->CreateEndpointPairWithTwoWayRoutes(
+ BuiltInNetworkBehaviorConfig()),
+ [](PeerConfigurer* alice) {
+ VideoConfig video(1280, 720, 30);
+ video.stream_label = "alice-video";
+ video.simulcast_config = VideoSimulcastConfig(3);
+ video.emulated_sfu_config = EmulatedSFUConfig(2);
+ video.temporal_layers_count = 3;
+ auto frame_generator = CreateFromYuvFileFrameGenerator(
+ video, ClipNameToClipPath("ConferenceMotion_1280_720_50"));
+ alice->AddVideoConfig(std::move(video), std::move(frame_generator));
+ alice->SetVideoCodecs({VideoCodecConfig(
+ /*name=*/cricket::kVp9CodecName, /*required_params=*/{
+ {kVP9FmtpProfileId,
+ VP9ProfileToString(VP9Profile::kProfile0)}})});
+ },
+ [](PeerConfigurer* bob) {
+ bob->SetVideoCodecs({VideoCodecConfig(
+ /*name=*/cricket::kVp9CodecName, /*required_params=*/{
+ {kVP9FmtpProfileId,
+ VP9ProfileToString(VP9Profile::kProfile0)}})});
+ });
+ fixture->Run(RunParams(TimeDelta::Seconds(kTestDurationSec)));
+}
+
+TEST(PCFullStackTest, Pc_Vp9svc_3sl_Low) {
+ webrtc::test::ScopedFieldTrials override_trials(
+ AppendFieldTrials("WebRTC-Vp9InterLayerPred/"
+ "Enabled,inter_layer_pred_mode:on/"));
+ std::unique_ptr<NetworkEmulationManager> network_emulation_manager =
+ CreateNetworkEmulationManager();
+ auto fixture = CreateTestFixture(
+ "pc_vp9svc_3sl_low", *network_emulation_manager->time_controller(),
+ network_emulation_manager->CreateEndpointPairWithTwoWayRoutes(
+ BuiltInNetworkBehaviorConfig()),
+ [](PeerConfigurer* alice) {
+ VideoConfig video(1280, 720, 30);
+ video.stream_label = "alice-video";
+ video.simulcast_config = VideoSimulcastConfig(3);
+ video.emulated_sfu_config = EmulatedSFUConfig(0);
+ video.temporal_layers_count = 3;
+ auto frame_generator = CreateFromYuvFileFrameGenerator(
+ video, ClipNameToClipPath("ConferenceMotion_1280_720_50"));
+ alice->AddVideoConfig(std::move(video), std::move(frame_generator));
+ alice->SetVideoCodecs({VideoCodecConfig(
+ /*name=*/cricket::kVp9CodecName, /*required_params=*/{
+ {kVP9FmtpProfileId,
+ VP9ProfileToString(VP9Profile::kProfile0)}})});
+ },
+ [](PeerConfigurer* bob) {
+ bob->SetVideoCodecs({VideoCodecConfig(
+ /*name=*/cricket::kVp9CodecName, /*required_params=*/{
+ {kVP9FmtpProfileId,
+ VP9ProfileToString(VP9Profile::kProfile0)}})});
+ });
+ fixture->Run(RunParams(TimeDelta::Seconds(kTestDurationSec)));
+}
+
+#endif // defined(RTC_ENABLE_VP9)
+
+/*
+// bugs.webrtc.org/9506
+#if !defined(WEBRTC_MAC)
+
+// TODO(bugs.webrtc.org/10639) requires simulcast/SVC support in PC framework
+TEST(PCFullStackTest, VP9KSVC_3SL_High) {
+ webrtc::test::ScopedFieldTrials override_trials(
+ AppendFieldTrials("WebRTC-Vp9IssueKeyFrameOnLayerDeactivation/Enabled/"));
+ auto fixture = CreateVideoQualityTestFixture();
+ ParamsWithLogging simulcast;
+ simulcast.call.send_side_bwe = true;
+ simulcast.video[0] = SvcVp9Video();
+ simulcast.analyzer = {"vp9ksvc_3sl_high", 0.0, 0.0, kTestDurationSec};
+ simulcast.ss[0] = {
+ std::vector<VideoStream>(), 0, 3, 2, InterLayerPredMode::kOnKeyPic,
+ std::vector<SpatialLayer>(), false};
+ fixture->RunWithAnalyzer(simulcast);
+}
+
+// TODO(bugs.webrtc.org/10639) requires simulcast/SVC support in PC framework
+TEST(PCFullStackTest, VP9KSVC_3SL_Medium) {
+ webrtc::test::ScopedFieldTrials override_trials(
+ AppendFieldTrials("WebRTC-Vp9IssueKeyFrameOnLayerDeactivation/Enabled/"));
+ auto fixture = CreateVideoQualityTestFixture();
+ ParamsWithLogging simulcast;
+ simulcast.call.send_side_bwe = true;
+ simulcast.video[0] = SvcVp9Video();
+ simulcast.analyzer = {"vp9ksvc_3sl_medium", 0.0, 0.0, kTestDurationSec};
+ simulcast.ss[0] = {
+ std::vector<VideoStream>(), 0, 3, 1, InterLayerPredMode::kOnKeyPic,
+ std::vector<SpatialLayer>(), false};
+ fixture->RunWithAnalyzer(simulcast);
+}
+
+// TODO(bugs.webrtc.org/10639) requires simulcast/SVC support in PC framework
+TEST(PCFullStackTest, VP9KSVC_3SL_Low) {
+ webrtc::test::ScopedFieldTrials override_trials(
+ AppendFieldTrials("WebRTC-Vp9IssueKeyFrameOnLayerDeactivation/Enabled/"));
+ auto fixture = CreateVideoQualityTestFixture();
+ ParamsWithLogging simulcast;
+ simulcast.call.send_side_bwe = true;
+ simulcast.video[0] = SvcVp9Video();
+ simulcast.analyzer = {"vp9ksvc_3sl_low", 0.0, 0.0, kTestDurationSec};
+ simulcast.ss[0] = {
+ std::vector<VideoStream>(), 0, 3, 0, InterLayerPredMode::kOnKeyPic,
+ std::vector<SpatialLayer>(), false};
+ fixture->RunWithAnalyzer(simulcast);
+}
+
+// TODO(bugs.webrtc.org/10639) requires simulcast/SVC support in PC framework
+TEST(PCFullStackTest, VP9KSVC_3SL_Medium_Network_Restricted) {
+ webrtc::test::ScopedFieldTrials override_trials(
+ AppendFieldTrials("WebRTC-Vp9IssueKeyFrameOnLayerDeactivation/Enabled/"));
+ auto fixture = CreateVideoQualityTestFixture();
+ ParamsWithLogging simulcast;
+ simulcast.call.send_side_bwe = true;
+ simulcast.video[0] = SvcVp9Video();
+ simulcast.analyzer = {"vp9ksvc_3sl_medium_network_restricted", 0.0, 0.0,
+ kTestDurationSec};
+ simulcast.ss[0] = {
+ std::vector<VideoStream>(), 0, 3, -1, InterLayerPredMode::kOnKeyPic,
+ std::vector<SpatialLayer>(), false};
+ simulcast.config->link_capacity_kbps = 1000;
+ simulcast.config->queue_delay_ms = 100;
+ fixture->RunWithAnalyzer(simulcast);
+}
+
+// TODO(bugs.webrtc.org/10639) requires simulcast/SVC support in PC framework
+// TODO(webrtc:9722): Remove when experiment is cleaned up.
+TEST(PCFullStackTest, VP9KSVC_3SL_Medium_Network_Restricted_Trusted_Rate) {
+ webrtc::test::ScopedFieldTrials override_trials(
+ AppendFieldTrials("WebRTC-Vp9IssueKeyFrameOnLayerDeactivation/Enabled/"));
+ auto fixture = CreateVideoQualityTestFixture();
+ ParamsWithLogging simulcast;
+ simulcast.call.send_side_bwe = true;
+ simulcast.video[0] = SvcVp9Video();
+ simulcast.analyzer = {"vp9ksvc_3sl_medium_network_restricted_trusted_rate",
+ 0.0, 0.0, kTestDurationSec};
+ simulcast.ss[0] = {
+ std::vector<VideoStream>(), 0, 3, -1, InterLayerPredMode::kOnKeyPic,
+ std::vector<SpatialLayer>(), false};
+ simulcast.config->link_capacity_kbps = 1000;
+ simulcast.config->queue_delay_ms = 100;
+ fixture->RunWithAnalyzer(simulcast);
+}
+#endif // !defined(WEBRTC_MAC)
+
+#endif // defined(RTC_ENABLE_VP9)
+*/
+
+// Android bots can't handle FullHD, so disable the test.
+// TODO(bugs.webrtc.org/9220): Investigate source of flakiness on Mac.
+#if defined(WEBRTC_ANDROID) || defined(WEBRTC_MAC)
+#define MAYBE_Pc_Simulcast_HD_High DISABLED_Pc_Simulcast_HD_High
+#else
+#define MAYBE_Pc_Simulcast_HD_High Pc_Simulcast_HD_High
+#endif
+TEST(PCFullStackTest, MAYBE_Pc_Simulcast_HD_High) {
+ webrtc::test::ScopedFieldTrials override_trials(AppendFieldTrials(
+ "WebRTC-ForceSimulatedOveruseIntervalMs/1000-50000-300/"));
+ std::unique_ptr<NetworkEmulationManager> network_emulation_manager =
+ CreateNetworkEmulationManager();
+ BuiltInNetworkBehaviorConfig config;
+ config.loss_percent = 0;
+ config.queue_delay_ms = 100;
+ auto fixture = CreateTestFixture(
+ "pc_simulcast_HD_high", *network_emulation_manager->time_controller(),
+ network_emulation_manager->CreateEndpointPairWithTwoWayRoutes(config),
+ [](PeerConfigurer* alice) {
+ VideoConfig video(1920, 1080, 30);
+ video.simulcast_config = VideoSimulcastConfig(3);
+ video.emulated_sfu_config = EmulatedSFUConfig(2);
+ video.temporal_layers_count = 3;
+ video.stream_label = "alice-video";
+ alice->AddVideoConfig(std::move(video));
+ },
+ [](PeerConfigurer* bob) {});
+ fixture->Run(RunParams(TimeDelta::Seconds(kTestDurationSec)));
+}
+
+TEST_P(ParameterizedPCFullStackTest, Pc_Simulcast_Vp8_3sl_High) {
+ std::unique_ptr<NetworkEmulationManager> network_emulation_manager =
+ CreateNetworkEmulationManager();
+ BuiltInNetworkBehaviorConfig config;
+ config.loss_percent = 0;
+ config.queue_delay_ms = 100;
+ auto fixture = CreateTestFixture(
+ "pc_simulcast_vp8_3sl_high" + GetParam().test_case_name_postfix,
+ *network_emulation_manager->time_controller(),
+ network_emulation_manager->CreateEndpointPairWithTwoWayRoutes(config),
+ [](PeerConfigurer* alice) {
+ VideoConfig video(1280, 720, 30);
+ video.simulcast_config = VideoSimulcastConfig(3);
+ video.emulated_sfu_config = EmulatedSFUConfig(2);
+ video.stream_label = "alice-video";
+ auto frame_generator = CreateFromYuvFileFrameGenerator(
+ video, ClipNameToClipPath("ConferenceMotion_1280_720_50"));
+ alice->AddVideoConfig(std::move(video), std::move(frame_generator));
+ if (GetParam().use_network_thread_as_worker_thread) {
+ alice->SetUseNetworkThreadAsWorkerThread();
+ }
+ },
+ [](PeerConfigurer* bob) {
+ if (GetParam().use_network_thread_as_worker_thread) {
+ bob->SetUseNetworkThreadAsWorkerThread();
+ }
+ });
+ fixture->Run(RunParams(TimeDelta::Seconds(kTestDurationSec)));
+}
+
+TEST(PCFullStackTest, Pc_Simulcast_Vp8_3sl_Low) {
+ std::unique_ptr<NetworkEmulationManager> network_emulation_manager =
+ CreateNetworkEmulationManager();
+ BuiltInNetworkBehaviorConfig config;
+ config.loss_percent = 0;
+ config.queue_delay_ms = 100;
+ auto fixture = CreateTestFixture(
+ "pc_simulcast_vp8_3sl_low", *network_emulation_manager->time_controller(),
+ network_emulation_manager->CreateEndpointPairWithTwoWayRoutes(config),
+ [](PeerConfigurer* alice) {
+ VideoConfig video(1280, 720, 30);
+ video.simulcast_config = VideoSimulcastConfig(3);
+ video.emulated_sfu_config = EmulatedSFUConfig(0);
+ video.stream_label = "alice-video";
+ auto frame_generator = CreateFromYuvFileFrameGenerator(
+ video, ClipNameToClipPath("ConferenceMotion_1280_720_50"));
+ alice->AddVideoConfig(std::move(video), std::move(frame_generator));
+ },
+ [](PeerConfigurer* bob) {});
+ fixture->Run(RunParams(TimeDelta::Seconds(kTestDurationSec)));
+}
+
+/*
+// This test assumes ideal network conditions with target bandwidth being
+// available and exercises WebRTC calls with a high target bitrate(100 Mbps).
+// Android32 bots can't handle this high bitrate, so disable test for those.
+#if defined(WEBRTC_ANDROID)
+#define MAYBE_HighBitrateWithFakeCodec DISABLED_HighBitrateWithFakeCodec
+#else
+#define MAYBE_HighBitrateWithFakeCodec HighBitrateWithFakeCodec
+#endif // defined(WEBRTC_ANDROID)
+// TODO(bugs.webrtc.org/10639) Disabled because target bitrate can't be
+configured yet. TEST(PCFullStackTest, MAYBE_HighBitrateWithFakeCodec) { auto
+fixture = CreateVideoQualityTestFixture(); const int target_bitrate = 100000000;
+ ParamsWithLogging generator;
+ generator.call.send_side_bwe = true;
+ generator.call.call_bitrate_config.min_bitrate_bps = target_bitrate;
+ generator.call.call_bitrate_config.start_bitrate_bps = target_bitrate;
+ generator.call.call_bitrate_config.max_bitrate_bps = target_bitrate;
+ generator.video[0] = {true,
+ 360,
+ 240,
+ 30,
+ target_bitrate / 2,
+ target_bitrate,
+ target_bitrate * 2,
+ false,
+ "FakeCodec",
+ 1,
+ 0,
+ 0,
+ false,
+ false,
+ false,
+ "Generator"};
+ generator.analyzer = {"high_bitrate_with_fake_codec", 0.0, 0.0,
+ kTestDurationSec};
+ fixture->RunWithAnalyzer(generator);
+}
+
+// TODO(bugs.webrtc.org/10639) requires simulcast/SVC support in PC framework
+TEST(PCFullStackTest, LargeRoomVP8_5thumb) {
+ auto fixture = CreateVideoQualityTestFixture();
+ ParamsWithLogging large_room;
+ large_room.call.send_side_bwe = true;
+ large_room.video[0] = SimulcastVp8VideoHigh();
+ large_room.analyzer = {"largeroom_5thumb", 0.0, 0.0, kTestDurationSec};
+ large_room.config->loss_percent = 0;
+ large_room.config->queue_delay_ms = 100;
+ ParamsWithLogging video_params_high;
+ video_params_high.video[0] = SimulcastVp8VideoHigh();
+ ParamsWithLogging video_params_medium;
+ video_params_medium.video[0] = SimulcastVp8VideoMedium();
+ ParamsWithLogging video_params_low;
+ video_params_low.video[0] = SimulcastVp8VideoLow();
+
+ std::vector<VideoStream> streams = {
+ VideoQualityTest::DefaultVideoStream(video_params_low, 0),
+ VideoQualityTest::DefaultVideoStream(video_params_medium, 0),
+ VideoQualityTest::DefaultVideoStream(video_params_high, 0)};
+ large_room.call.num_thumbnails = 5;
+ large_room.ss[0] = {
+ streams, 2, 1, 0, InterLayerPredMode::kOn, std::vector<SpatialLayer>(),
+ false};
+ fixture->RunWithAnalyzer(large_room);
+}
+
+#if defined(WEBRTC_ANDROID) || defined(WEBRTC_IOS)
+// Fails on mobile devices:
+// https://bugs.chromium.org/p/webrtc/issues/detail?id=7301
+#define MAYBE_LargeRoomVP8_50thumb DISABLED_LargeRoomVP8_50thumb
+#define MAYBE_LargeRoomVP8_15thumb DISABLED_LargeRoomVP8_15thumb
+#else
+#define MAYBE_LargeRoomVP8_50thumb LargeRoomVP8_50thumb
+#define MAYBE_LargeRoomVP8_15thumb LargeRoomVP8_15thumb
+#endif
+// TODO(bugs.webrtc.org/10639) requires simulcast/SVC support in PC framework
+TEST(PCFullStackTest, MAYBE_LargeRoomVP8_15thumb) {
+ auto fixture = CreateVideoQualityTestFixture();
+ ParamsWithLogging large_room;
+ large_room.call.send_side_bwe = true;
+ large_room.video[0] = SimulcastVp8VideoHigh();
+ large_room.analyzer = {"largeroom_15thumb", 0.0, 0.0, kTestDurationSec};
+ large_room.config->loss_percent = 0;
+ large_room.config->queue_delay_ms = 100;
+ ParamsWithLogging video_params_high;
+ video_params_high.video[0] = SimulcastVp8VideoHigh();
+ ParamsWithLogging video_params_medium;
+ video_params_medium.video[0] = SimulcastVp8VideoMedium();
+ ParamsWithLogging video_params_low;
+ video_params_low.video[0] = SimulcastVp8VideoLow();
+
+ std::vector<VideoStream> streams = {
+ VideoQualityTest::DefaultVideoStream(video_params_low, 0),
+ VideoQualityTest::DefaultVideoStream(video_params_medium, 0),
+ VideoQualityTest::DefaultVideoStream(video_params_high, 0)};
+ large_room.call.num_thumbnails = 15;
+ large_room.ss[0] = {
+ streams, 2, 1, 0, InterLayerPredMode::kOn, std::vector<SpatialLayer>(),
+ false};
+ fixture->RunWithAnalyzer(large_room);
+}
+
+// TODO(bugs.webrtc.org/10639) requires simulcast/SVC support in PC framework
+TEST(PCFullStackTest, MAYBE_LargeRoomVP8_50thumb) {
+ auto fixture = CreateVideoQualityTestFixture();
+ ParamsWithLogging large_room;
+ large_room.call.send_side_bwe = true;
+ large_room.video[0] = SimulcastVp8VideoHigh();
+ large_room.analyzer = {"largeroom_50thumb", 0.0, 0.0, kTestDurationSec};
+ large_room.config->loss_percent = 0;
+ large_room.config->queue_delay_ms = 100;
+ ParamsWithLogging video_params_high;
+ video_params_high.video[0] = SimulcastVp8VideoHigh();
+ ParamsWithLogging video_params_medium;
+ video_params_medium.video[0] = SimulcastVp8VideoMedium();
+ ParamsWithLogging video_params_low;
+ video_params_low.video[0] = SimulcastVp8VideoLow();
+
+ std::vector<VideoStream> streams = {
+ VideoQualityTest::DefaultVideoStream(video_params_low, 0),
+ VideoQualityTest::DefaultVideoStream(video_params_medium, 0),
+ VideoQualityTest::DefaultVideoStream(video_params_high, 0)};
+ large_room.call.num_thumbnails = 50;
+ large_room.ss[0] = {
+ streams, 2, 1, 0, InterLayerPredMode::kOn, std::vector<SpatialLayer>(),
+ false};
+ fixture->RunWithAnalyzer(large_room);
+}
+*/
+
+/*
+class PCDualStreamsTest : public ::testing::TestWithParam<int> {};
+
+// Disable dual video test on mobile device becuase it's too heavy.
+// TODO(bugs.webrtc.org/9840): Investigate why is this test flaky on MAC.
+#if !defined(WEBRTC_ANDROID) && !defined(WEBRTC_IOS) && !defined(WEBRTC_MAC)
+// TODO(bugs.webrtc.org/10639) requires simulcast/SVC support in PC framework
+TEST_P(PCDualStreamsTest,
+ ModeratelyRestricted_SlidesVp8_2TL_Simulcast_Video_Simulcast_High) {
+ const int first_stream = GetParam();
+ ParamsWithLogging dual_streams;
+
+ // Screenshare Settings.
+ dual_streams.screenshare[first_stream] = {true, false, 10};
+ dual_streams.video[first_stream] = {true, 1850, 1110, 5, 800000, 2500000,
+ 2500000, false, "VP8", 2, 1, 400000,
+ false, false, false, ""};
+
+ ParamsWithLogging screenshare_params_high;
+ screenshare_params_high.video[0] = {
+ true, 1850, 1110, 60, 600000, 1250000, 1250000, false,
+ "VP8", 2, 0, 400000, false, false, false, ""};
+ VideoQualityTest::Params screenshare_params_low;
+ screenshare_params_low.video[0] = {true, 1850, 1110, 5, 30000, 200000,
+ 1000000, false, "VP8", 2, 0, 400000,
+ false, false, false, ""};
+ std::vector<VideoStream> screenhsare_streams = {
+ VideoQualityTest::DefaultVideoStream(screenshare_params_low, 0),
+ VideoQualityTest::DefaultVideoStream(screenshare_params_high, 0)};
+
+ dual_streams.ss[first_stream] = {
+ screenhsare_streams, 1, 1, 0, InterLayerPredMode::kOn,
+ std::vector<SpatialLayer>(), false};
+
+ // Video settings.
+ dual_streams.video[1 - first_stream] = SimulcastVp8VideoHigh();
+
+ ParamsWithLogging video_params_high;
+ video_params_high.video[0] = SimulcastVp8VideoHigh();
+ ParamsWithLogging video_params_medium;
+ video_params_medium.video[0] = SimulcastVp8VideoMedium();
+ ParamsWithLogging video_params_low;
+ video_params_low.video[0] = SimulcastVp8VideoLow();
+ std::vector<VideoStream> streams = {
+ VideoQualityTest::DefaultVideoStream(video_params_low, 0),
+ VideoQualityTest::DefaultVideoStream(video_params_medium, 0),
+ VideoQualityTest::DefaultVideoStream(video_params_high, 0)};
+
+ dual_streams.ss[1 - first_stream] = {
+ streams, 2, 1, 0, InterLayerPredMode::kOn, std::vector<SpatialLayer>(),
+ false};
+
+ // Call settings.
+ dual_streams.call.send_side_bwe = true;
+ dual_streams.call.dual_video = true;
+ std::string test_label = "dualstreams_moderately_restricted_screenshare_" +
+ std::to_string(first_stream);
+ dual_streams.analyzer = {test_label, 0.0, 0.0, kTestDurationSec};
+ dual_streams.config->loss_percent = 1;
+ dual_streams.config->link_capacity_kbps = 7500;
+ dual_streams.config->queue_length_packets = 30;
+ dual_streams.config->queue_delay_ms = 100;
+
+ auto fixture = CreateVideoQualityTestFixture();
+ fixture->RunWithAnalyzer(dual_streams);
+}
+#endif // !defined(WEBRTC_ANDROID) && !defined(WEBRTC_IOS) &&
+ // !defined(WEBRTC_MAC)
+
+// TODO(bugs.webrtc.org/10639) requires simulcast/SVC support in PC framework
+TEST_P(PCDualStreamsTest, Conference_Restricted) {
+ const int first_stream = GetParam();
+ ParamsWithLogging dual_streams;
+
+ // Screenshare Settings.
+ dual_streams.screenshare[first_stream] = {true, false, 10};
+ dual_streams.video[first_stream] = {true, 1850, 1110, 5, 800000, 2500000,
+ 2500000, false, "VP8", 3, 2, 400000,
+ false, false, false, ""};
+ // Video settings.
+ dual_streams.video[1 - first_stream] = {
+ true, 1280,
+ 720, 30,
+ 150000, 500000,
+ 700000, false,
+ "VP8", 3,
+ 2, 400000,
+ false, false,
+ false, ClipNameToClipPath("ConferenceMotion_1280_720_50")};
+
+ // Call settings.
+ dual_streams.call.send_side_bwe = true;
+ dual_streams.call.dual_video = true;
+ std::string test_label = "dualstreams_conference_restricted_screenshare_" +
+ std::to_string(first_stream);
+ dual_streams.analyzer = {test_label, 0.0, 0.0, kTestDurationSec};
+ dual_streams.config->loss_percent = 1;
+ dual_streams.config->link_capacity_kbps = 5000;
+ dual_streams.config->queue_length_packets = 30;
+ dual_streams.config->queue_delay_ms = 100;
+
+ auto fixture = CreateVideoQualityTestFixture();
+ fixture->RunWithAnalyzer(dual_streams);
+}
+
+INSTANTIATE_TEST_SUITE_P(PCFullStackTest,
+ PCDualStreamsTest,
+ ::testing::Values(0, 1));
+*/
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/video/picture_id_tests.cc b/third_party/libwebrtc/video/picture_id_tests.cc
new file mode 100644
index 0000000000..06491b924a
--- /dev/null
+++ b/third_party/libwebrtc/video/picture_id_tests.cc
@@ -0,0 +1,428 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <memory>
+
+#include "api/test/simulated_network.h"
+#include "api/test/video/function_video_encoder_factory.h"
+#include "call/fake_network_pipe.h"
+#include "call/simulated_network.h"
+#include "media/engine/internal_encoder_factory.h"
+#include "media/engine/simulcast_encoder_adapter.h"
+#include "modules/rtp_rtcp/source/create_video_rtp_depacketizer.h"
+#include "modules/rtp_rtcp/source/rtp_packet.h"
+#include "modules/video_coding/codecs/vp8/include/vp8.h"
+#include "modules/video_coding/codecs/vp9/include/vp9.h"
+#include "rtc_base/numerics/safe_conversions.h"
+#include "rtc_base/numerics/sequence_number_util.h"
+#include "rtc_base/synchronization/mutex.h"
+#include "rtc_base/task_queue_for_test.h"
+#include "test/call_test.h"
+
+namespace webrtc {
+namespace {
+const int kFrameMaxWidth = 1280;
+const int kFrameMaxHeight = 720;
+const int kFrameRate = 30;
+const int kMaxSecondsLost = 5;
+const int kMaxFramesLost = kFrameRate * kMaxSecondsLost;
+const int kMinPacketsToObserve = 10;
+const int kEncoderBitrateBps = 300000;
+const uint32_t kPictureIdWraparound = (1 << 15);
+const size_t kNumTemporalLayers[] = {1, 2, 3};
+
+} // namespace
+
+class PictureIdObserver : public test::RtpRtcpObserver {
+ public:
+ explicit PictureIdObserver(VideoCodecType codec_type)
+ : test::RtpRtcpObserver(test::CallTest::kDefaultTimeout),
+ depacketizer_(CreateVideoRtpDepacketizer(codec_type)),
+ max_expected_picture_id_gap_(0),
+ max_expected_tl0_idx_gap_(0),
+ num_ssrcs_to_observe_(1) {}
+
+ void SetExpectedSsrcs(size_t num_expected_ssrcs) {
+ MutexLock lock(&mutex_);
+ num_ssrcs_to_observe_ = num_expected_ssrcs;
+ }
+
+ void ResetObservedSsrcs() {
+ MutexLock lock(&mutex_);
+ // Do not clear the timestamp and picture_id, to ensure that we check
+ // consistency between reinits and recreations.
+ num_packets_sent_.clear();
+ observed_ssrcs_.clear();
+ }
+
+ void SetMaxExpectedPictureIdGap(int max_expected_picture_id_gap) {
+ MutexLock lock(&mutex_);
+ max_expected_picture_id_gap_ = max_expected_picture_id_gap;
+ // Expect smaller gap for `tl0_pic_idx` (running index for temporal_idx 0).
+ max_expected_tl0_idx_gap_ = max_expected_picture_id_gap_ / 2;
+ }
+
+ private:
+ struct ParsedPacket {
+ uint32_t timestamp;
+ uint32_t ssrc;
+ int16_t picture_id;
+ int16_t tl0_pic_idx;
+ uint8_t temporal_idx;
+ VideoFrameType frame_type;
+ };
+
+ bool ParsePayload(const uint8_t* packet,
+ size_t length,
+ ParsedPacket* parsed) const {
+ RtpPacket rtp_packet;
+ EXPECT_TRUE(rtp_packet.Parse(packet, length));
+ EXPECT_TRUE(rtp_packet.Ssrc() == test::CallTest::kVideoSendSsrcs[0] ||
+ rtp_packet.Ssrc() == test::CallTest::kVideoSendSsrcs[1] ||
+ rtp_packet.Ssrc() == test::CallTest::kVideoSendSsrcs[2])
+ << "Unknown SSRC sent.";
+
+ if (rtp_packet.payload_size() == 0) {
+ return false; // Padding packet.
+ }
+
+ parsed->timestamp = rtp_packet.Timestamp();
+ parsed->ssrc = rtp_packet.Ssrc();
+
+ absl::optional<VideoRtpDepacketizer::ParsedRtpPayload> parsed_payload =
+ depacketizer_->Parse(rtp_packet.PayloadBuffer());
+ EXPECT_TRUE(parsed_payload);
+
+ if (const auto* vp8_header = absl::get_if<RTPVideoHeaderVP8>(
+ &parsed_payload->video_header.video_type_header)) {
+ parsed->picture_id = vp8_header->pictureId;
+ parsed->tl0_pic_idx = vp8_header->tl0PicIdx;
+ parsed->temporal_idx = vp8_header->temporalIdx;
+ } else if (const auto* vp9_header = absl::get_if<RTPVideoHeaderVP9>(
+ &parsed_payload->video_header.video_type_header)) {
+ parsed->picture_id = vp9_header->picture_id;
+ parsed->tl0_pic_idx = vp9_header->tl0_pic_idx;
+ parsed->temporal_idx = vp9_header->temporal_idx;
+ } else {
+ RTC_DCHECK_NOTREACHED();
+ }
+
+ parsed->frame_type = parsed_payload->video_header.frame_type;
+ return true;
+ }
+
+ // Verify continuity and monotonicity of picture_id sequence.
+ void VerifyPictureId(const ParsedPacket& current,
+ const ParsedPacket& last) const
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(&mutex_) {
+ if (current.timestamp == last.timestamp) {
+ EXPECT_EQ(last.picture_id, current.picture_id);
+ return; // Same frame.
+ }
+
+ // Packet belongs to a new frame.
+ // Picture id should be increasing.
+ EXPECT_TRUE((AheadOf<uint16_t, kPictureIdWraparound>(current.picture_id,
+ last.picture_id)));
+
+ // Expect continuously increasing picture id.
+ int diff = ForwardDiff<uint16_t, kPictureIdWraparound>(last.picture_id,
+ current.picture_id);
+ EXPECT_LE(diff - 1, max_expected_picture_id_gap_);
+ if (diff > 2) {
+ // If the VideoSendStream is destroyed, any frames still in queue is lost.
+ // This can result in a two-frame gap, which will result in logs like
+ // "packet transmission failed, no matching RTP module found, or
+ // transmission error".
+ // A larger gap is only possible for first frame after a recreation, i.e.
+ // key frames.
+ EXPECT_EQ(VideoFrameType::kVideoFrameKey, current.frame_type);
+ }
+ }
+
+ void VerifyTl0Idx(const ParsedPacket& current, const ParsedPacket& last) const
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(&mutex_) {
+ if (current.tl0_pic_idx == kNoTl0PicIdx ||
+ current.temporal_idx == kNoTemporalIdx) {
+ return; // No temporal layers.
+ }
+
+ if (current.timestamp == last.timestamp || current.temporal_idx != 0) {
+ EXPECT_EQ(last.tl0_pic_idx, current.tl0_pic_idx);
+ return;
+ }
+
+ // New frame with `temporal_idx` 0.
+ // `tl0_pic_idx` should be increasing.
+ EXPECT_TRUE(AheadOf<uint8_t>(current.tl0_pic_idx, last.tl0_pic_idx));
+
+ // Expect continuously increasing idx.
+ int diff = ForwardDiff<uint8_t>(last.tl0_pic_idx, current.tl0_pic_idx);
+ if (diff > 1) {
+ // If the VideoSendStream is destroyed, any frames still in queue is lost.
+ // Gaps only possible for first frame after a recreation, i.e. key frames.
+ EXPECT_EQ(VideoFrameType::kVideoFrameKey, current.frame_type);
+ EXPECT_LE(diff - 1, max_expected_tl0_idx_gap_);
+ }
+ }
+
+ Action OnSendRtp(const uint8_t* packet, size_t length) override {
+ MutexLock lock(&mutex_);
+
+ ParsedPacket parsed;
+ if (!ParsePayload(packet, length, &parsed))
+ return SEND_PACKET;
+
+ uint32_t ssrc = parsed.ssrc;
+ if (last_observed_packet_.find(ssrc) != last_observed_packet_.end()) {
+ // Compare to last packet.
+ VerifyPictureId(parsed, last_observed_packet_[ssrc]);
+ VerifyTl0Idx(parsed, last_observed_packet_[ssrc]);
+ }
+
+ last_observed_packet_[ssrc] = parsed;
+
+ // Pass the test when enough media packets have been received on all
+ // streams.
+ if (++num_packets_sent_[ssrc] >= kMinPacketsToObserve &&
+ observed_ssrcs_.find(ssrc) == observed_ssrcs_.end()) {
+ observed_ssrcs_.insert(ssrc);
+ if (observed_ssrcs_.size() == num_ssrcs_to_observe_) {
+ observation_complete_.Set();
+ }
+ }
+ return SEND_PACKET;
+ }
+
+ Mutex mutex_;
+ const std::unique_ptr<VideoRtpDepacketizer> depacketizer_;
+ std::map<uint32_t, ParsedPacket> last_observed_packet_ RTC_GUARDED_BY(mutex_);
+ std::map<uint32_t, size_t> num_packets_sent_ RTC_GUARDED_BY(mutex_);
+ int max_expected_picture_id_gap_ RTC_GUARDED_BY(mutex_);
+ int max_expected_tl0_idx_gap_ RTC_GUARDED_BY(mutex_);
+ size_t num_ssrcs_to_observe_ RTC_GUARDED_BY(mutex_);
+ std::set<uint32_t> observed_ssrcs_ RTC_GUARDED_BY(mutex_);
+};
+
+class PictureIdTest : public test::CallTest,
+ public ::testing::WithParamInterface<size_t> {
+ public:
+ PictureIdTest() : num_temporal_layers_(GetParam()) {}
+
+ virtual ~PictureIdTest() {
+ SendTask(task_queue(), [this]() {
+ send_transport_.reset();
+ receive_transport_.reset();
+ DestroyCalls();
+ });
+ }
+
+ void SetupEncoder(VideoEncoderFactory* encoder_factory,
+ const std::string& payload_name);
+ void SetVideoEncoderConfig(int num_streams);
+ void TestPictureIdContinuousAfterReconfigure(
+ const std::vector<int>& ssrc_counts);
+ void TestPictureIdIncreaseAfterRecreateStreams(
+ const std::vector<int>& ssrc_counts);
+
+ private:
+ const size_t num_temporal_layers_;
+ std::unique_ptr<PictureIdObserver> observer_;
+};
+
+// TODO(bugs.webrtc.org/13725): Enable on android when flakiness fixed.
+#if defined(WEBRTC_ANDROID)
+#define MAYBE_TemporalLayers DISABLED_TemporalLayers
+#else
+#define MAYBE_TemporalLayers TemporalLayers
+#endif
+
+INSTANTIATE_TEST_SUITE_P(MAYBE_TemporalLayers,
+ PictureIdTest,
+ ::testing::ValuesIn(kNumTemporalLayers));
+
+void PictureIdTest::SetupEncoder(VideoEncoderFactory* encoder_factory,
+ const std::string& payload_name) {
+ observer_.reset(
+ new PictureIdObserver(PayloadStringToCodecType(payload_name)));
+
+ SendTask(
+ task_queue(), [this, encoder_factory, payload_name]() {
+ CreateCalls();
+ CreateSendTransport(BuiltInNetworkBehaviorConfig(), observer_.get());
+ CreateSendConfig(kNumSimulcastStreams, 0, 0, send_transport_.get());
+ GetVideoSendConfig()->encoder_settings.encoder_factory =
+ encoder_factory;
+ GetVideoSendConfig()->rtp.payload_name = payload_name;
+ GetVideoEncoderConfig()->codec_type =
+ PayloadStringToCodecType(payload_name);
+ SetVideoEncoderConfig(/* number_of_streams */ 1);
+ });
+}
+
+void PictureIdTest::SetVideoEncoderConfig(int num_streams) {
+ GetVideoEncoderConfig()->number_of_streams = num_streams;
+ GetVideoEncoderConfig()->max_bitrate_bps = kEncoderBitrateBps;
+
+ // Always divide the same total bitrate across all streams so that sending a
+ // single stream avoids lowering the bitrate estimate and requiring a
+ // subsequent rampup.
+ const int encoder_stream_bps = kEncoderBitrateBps / num_streams;
+ double scale_factor = 1.0;
+ for (int i = num_streams - 1; i >= 0; --i) {
+ VideoStream& stream = GetVideoEncoderConfig()->simulcast_layers[i];
+ // Reduce the min bitrate by 10% to account for overhead that might
+ // otherwise cause streams to not be enabled.
+ stream.min_bitrate_bps = static_cast<int>(encoder_stream_bps * 0.9);
+ stream.target_bitrate_bps = encoder_stream_bps;
+ stream.max_bitrate_bps = encoder_stream_bps;
+ stream.num_temporal_layers = num_temporal_layers_;
+ stream.scale_resolution_down_by = scale_factor;
+ scale_factor *= 2.0;
+ }
+}
+
+void PictureIdTest::TestPictureIdContinuousAfterReconfigure(
+ const std::vector<int>& ssrc_counts) {
+ SendTask(task_queue(), [this]() {
+ CreateVideoStreams();
+ CreateFrameGeneratorCapturer(kFrameRate, kFrameMaxWidth, kFrameMaxHeight);
+
+ // Initial test with a single stream.
+ Start();
+ });
+
+ EXPECT_TRUE(observer_->Wait()) << "Timed out waiting for packets.";
+
+ // Reconfigure VideoEncoder and test picture id increase.
+ // Expect continuously increasing picture id, equivalent to no gaps.
+ observer_->SetMaxExpectedPictureIdGap(0);
+ for (int ssrc_count : ssrc_counts) {
+ SetVideoEncoderConfig(ssrc_count);
+ observer_->SetExpectedSsrcs(ssrc_count);
+ observer_->ResetObservedSsrcs();
+ // Make sure the picture_id sequence is continuous on reinit and recreate.
+ SendTask(task_queue(), [this]() {
+ GetVideoSendStream()->ReconfigureVideoEncoder(
+ GetVideoEncoderConfig()->Copy());
+ });
+ EXPECT_TRUE(observer_->Wait()) << "Timed out waiting for packets.";
+ }
+
+ SendTask(task_queue(), [this]() {
+ Stop();
+ DestroyStreams();
+ });
+}
+
+void PictureIdTest::TestPictureIdIncreaseAfterRecreateStreams(
+ const std::vector<int>& ssrc_counts) {
+ SendTask(task_queue(), [this]() {
+ CreateVideoStreams();
+ CreateFrameGeneratorCapturer(kFrameRate, kFrameMaxWidth, kFrameMaxHeight);
+
+ // Initial test with a single stream.
+ Start();
+ });
+
+ EXPECT_TRUE(observer_->Wait()) << "Timed out waiting for packets.";
+
+ // Recreate VideoSendStream and test picture id increase.
+ // When the VideoSendStream is destroyed, any frames still in queue is lost
+ // with it, therefore it is expected that some frames might be lost.
+ observer_->SetMaxExpectedPictureIdGap(kMaxFramesLost);
+ for (int ssrc_count : ssrc_counts) {
+ SendTask(task_queue(), [this, &ssrc_count]() {
+ DestroyVideoSendStreams();
+
+ SetVideoEncoderConfig(ssrc_count);
+ observer_->SetExpectedSsrcs(ssrc_count);
+ observer_->ResetObservedSsrcs();
+
+ CreateVideoSendStreams();
+ GetVideoSendStream()->Start();
+ CreateFrameGeneratorCapturer(kFrameRate, kFrameMaxWidth, kFrameMaxHeight);
+ });
+
+ EXPECT_TRUE(observer_->Wait()) << "Timed out waiting for packets.";
+ }
+
+ SendTask(task_queue(), [this]() {
+ Stop();
+ DestroyStreams();
+ });
+}
+
+TEST_P(PictureIdTest, ContinuousAfterReconfigureVp8) {
+ test::FunctionVideoEncoderFactory encoder_factory(
+ []() { return VP8Encoder::Create(); });
+ SetupEncoder(&encoder_factory, "VP8");
+ TestPictureIdContinuousAfterReconfigure({1, 3, 3, 1, 1});
+}
+
+TEST_P(PictureIdTest, IncreasingAfterRecreateStreamVp8) {
+ test::FunctionVideoEncoderFactory encoder_factory(
+ []() { return VP8Encoder::Create(); });
+ SetupEncoder(&encoder_factory, "VP8");
+ TestPictureIdIncreaseAfterRecreateStreams({1, 3, 3, 1, 1});
+}
+
+TEST_P(PictureIdTest, ContinuousAfterStreamCountChangeVp8) {
+ test::FunctionVideoEncoderFactory encoder_factory(
+ []() { return VP8Encoder::Create(); });
+ // Make sure that the picture id is not reset if the stream count goes
+ // down and then up.
+ SetupEncoder(&encoder_factory, "VP8");
+ TestPictureIdContinuousAfterReconfigure({3, 1, 3});
+}
+
+TEST_P(PictureIdTest, ContinuousAfterReconfigureSimulcastEncoderAdapter) {
+ InternalEncoderFactory internal_encoder_factory;
+ test::FunctionVideoEncoderFactory encoder_factory(
+ [&internal_encoder_factory]() {
+ return std::make_unique<SimulcastEncoderAdapter>(
+ &internal_encoder_factory, SdpVideoFormat("VP8"));
+ });
+ SetupEncoder(&encoder_factory, "VP8");
+ TestPictureIdContinuousAfterReconfigure({1, 3, 3, 1, 1});
+}
+
+TEST_P(PictureIdTest, IncreasingAfterRecreateStreamSimulcastEncoderAdapter) {
+ InternalEncoderFactory internal_encoder_factory;
+ test::FunctionVideoEncoderFactory encoder_factory(
+ [&internal_encoder_factory]() {
+ return std::make_unique<SimulcastEncoderAdapter>(
+ &internal_encoder_factory, SdpVideoFormat("VP8"));
+ });
+ SetupEncoder(&encoder_factory, "VP8");
+ TestPictureIdIncreaseAfterRecreateStreams({1, 3, 3, 1, 1});
+}
+
+TEST_P(PictureIdTest, ContinuousAfterStreamCountChangeSimulcastEncoderAdapter) {
+ InternalEncoderFactory internal_encoder_factory;
+ test::FunctionVideoEncoderFactory encoder_factory(
+ [&internal_encoder_factory]() {
+ return std::make_unique<SimulcastEncoderAdapter>(
+ &internal_encoder_factory, SdpVideoFormat("VP8"));
+ });
+ // Make sure that the picture id is not reset if the stream count goes
+ // down and then up.
+ SetupEncoder(&encoder_factory, "VP8");
+ TestPictureIdContinuousAfterReconfigure({3, 1, 3});
+}
+
+TEST_P(PictureIdTest, IncreasingAfterRecreateStreamVp9) {
+ test::FunctionVideoEncoderFactory encoder_factory(
+ []() { return VP9Encoder::Create(); });
+ SetupEncoder(&encoder_factory, "VP9");
+ TestPictureIdIncreaseAfterRecreateStreams({1, 1});
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/video/quality_limitation_reason_tracker.cc b/third_party/libwebrtc/video/quality_limitation_reason_tracker.cc
new file mode 100644
index 0000000000..c2b2cc4043
--- /dev/null
+++ b/third_party/libwebrtc/video/quality_limitation_reason_tracker.cc
@@ -0,0 +1,52 @@
+/*
+ * Copyright 2019 The WebRTC Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video/quality_limitation_reason_tracker.h"
+
+#include <utility>
+
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+QualityLimitationReasonTracker::QualityLimitationReasonTracker(Clock* clock)
+ : clock_(clock),
+ current_reason_(QualityLimitationReason::kNone),
+ current_reason_updated_timestamp_ms_(clock_->TimeInMilliseconds()),
+ durations_ms_({std::make_pair(QualityLimitationReason::kNone, 0),
+ std::make_pair(QualityLimitationReason::kCpu, 0),
+ std::make_pair(QualityLimitationReason::kBandwidth, 0),
+ std::make_pair(QualityLimitationReason::kOther, 0)}) {}
+
+QualityLimitationReason QualityLimitationReasonTracker::current_reason() const {
+ return current_reason_;
+}
+
+void QualityLimitationReasonTracker::SetReason(QualityLimitationReason reason) {
+ if (reason == current_reason_)
+ return;
+ int64_t now_ms = clock_->TimeInMilliseconds();
+ durations_ms_[current_reason_] +=
+ now_ms - current_reason_updated_timestamp_ms_;
+ current_reason_ = reason;
+ current_reason_updated_timestamp_ms_ = now_ms;
+}
+
+std::map<QualityLimitationReason, int64_t>
+QualityLimitationReasonTracker::DurationsMs() const {
+ std::map<QualityLimitationReason, int64_t> total_durations_ms = durations_ms_;
+ auto it = total_durations_ms.find(current_reason_);
+ RTC_DCHECK(it != total_durations_ms.end());
+ it->second +=
+ clock_->TimeInMilliseconds() - current_reason_updated_timestamp_ms_;
+ return total_durations_ms;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/video/quality_limitation_reason_tracker.h b/third_party/libwebrtc/video/quality_limitation_reason_tracker.h
new file mode 100644
index 0000000000..15bc90773a
--- /dev/null
+++ b/third_party/libwebrtc/video/quality_limitation_reason_tracker.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2019 The WebRTC Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VIDEO_QUALITY_LIMITATION_REASON_TRACKER_H_
+#define VIDEO_QUALITY_LIMITATION_REASON_TRACKER_H_
+
+#include <map>
+
+#include "common_video/include/quality_limitation_reason.h"
+#include "system_wrappers/include/clock.h"
+
+namespace webrtc {
+
+// A tracker of quality limitation reasons. The quality limitation reason is the
+// primary reason for limiting resolution and/or framerate (such as CPU or
+// bandwidth limitations). The tracker keeps track of the current reason and the
+// duration of time spent in each reason. See qualityLimitationReason[1],
+// qualityLimitationDurations[2], and qualityLimitationResolutionChanges[3] in
+// the webrtc-stats spec.
+// Note that the specification defines the durations in seconds while the
+// internal data structures defines it in milliseconds.
+// [1]
+// https://w3c.github.io/webrtc-stats/#dom-rtcoutboundrtpstreamstats-qualitylimitationreason
+// [2]
+// https://w3c.github.io/webrtc-stats/#dom-rtcoutboundrtpstreamstats-qualitylimitationdurations
+// [3]
+// https://w3c.github.io/webrtc-stats/#dom-rtcoutboundrtpstreamstats-qualitylimitationresolutionchanges
+class QualityLimitationReasonTracker {
+ public:
+ // The caller is responsible for making sure `clock` outlives the tracker.
+ explicit QualityLimitationReasonTracker(Clock* clock);
+
+ // The current reason defaults to QualityLimitationReason::kNone.
+ QualityLimitationReason current_reason() const;
+ void SetReason(QualityLimitationReason reason);
+ std::map<QualityLimitationReason, int64_t> DurationsMs() const;
+
+ private:
+ Clock* const clock_;
+ QualityLimitationReason current_reason_;
+ int64_t current_reason_updated_timestamp_ms_;
+ // The total amount of time spent in each reason at time
+ // `current_reason_updated_timestamp_ms_`. To get the total amount duration
+ // so-far, including the time spent in `current_reason_` elapsed since the
+ // last time `current_reason_` was updated, see DurationsMs().
+ std::map<QualityLimitationReason, int64_t> durations_ms_;
+};
+
+} // namespace webrtc
+
+#endif // VIDEO_QUALITY_LIMITATION_REASON_TRACKER_H_
diff --git a/third_party/libwebrtc/video/quality_limitation_reason_tracker_unittest.cc b/third_party/libwebrtc/video/quality_limitation_reason_tracker_unittest.cc
new file mode 100644
index 0000000000..f550c0d8e4
--- /dev/null
+++ b/third_party/libwebrtc/video/quality_limitation_reason_tracker_unittest.cc
@@ -0,0 +1,115 @@
+/*
+ * Copyright 2019 The WebRTC Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video/quality_limitation_reason_tracker.h"
+
+#include "common_video/include/quality_limitation_reason.h"
+#include "system_wrappers/include/clock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+class QualityLimitationReasonTrackerTest : public ::testing::Test {
+ public:
+ QualityLimitationReasonTrackerTest()
+ : fake_clock_(1234), tracker_(&fake_clock_) {}
+
+ protected:
+ SimulatedClock fake_clock_;
+ QualityLimitationReasonTracker tracker_;
+};
+
+TEST_F(QualityLimitationReasonTrackerTest, DefaultValues) {
+ EXPECT_EQ(QualityLimitationReason::kNone, tracker_.current_reason());
+ auto durations_ms = tracker_.DurationsMs();
+ EXPECT_EQ(4u, durations_ms.size());
+ EXPECT_EQ(0, durations_ms.find(QualityLimitationReason::kNone)->second);
+ EXPECT_EQ(0, durations_ms.find(QualityLimitationReason::kCpu)->second);
+ EXPECT_EQ(0, durations_ms.find(QualityLimitationReason::kBandwidth)->second);
+ EXPECT_EQ(0, durations_ms.find(QualityLimitationReason::kOther)->second);
+}
+
+TEST_F(QualityLimitationReasonTrackerTest, NoneDurationIncreasesByDefault) {
+ int64_t initial_duration_ms =
+ tracker_.DurationsMs()[QualityLimitationReason::kNone];
+ fake_clock_.AdvanceTimeMilliseconds(9999);
+ EXPECT_EQ(initial_duration_ms + 9999,
+ tracker_.DurationsMs()[QualityLimitationReason::kNone]);
+}
+
+TEST_F(QualityLimitationReasonTrackerTest,
+ RememberDurationAfterSwitchingReason) {
+ tracker_.SetReason(QualityLimitationReason::kCpu);
+ int64_t initial_duration_ms =
+ tracker_.DurationsMs()[QualityLimitationReason::kCpu];
+ fake_clock_.AdvanceTimeMilliseconds(50);
+ tracker_.SetReason(QualityLimitationReason::kOther);
+ fake_clock_.AdvanceTimeMilliseconds(50);
+ EXPECT_EQ(initial_duration_ms + 50,
+ tracker_.DurationsMs()[QualityLimitationReason::kCpu]);
+}
+
+class QualityLimitationReasonTrackerTestWithParamReason
+ : public QualityLimitationReasonTrackerTest,
+ public ::testing::WithParamInterface<QualityLimitationReason> {
+ public:
+ QualityLimitationReasonTrackerTestWithParamReason()
+ : reason_(GetParam()),
+ different_reason_(reason_ != QualityLimitationReason::kCpu
+ ? QualityLimitationReason::kCpu
+ : QualityLimitationReason::kOther) {}
+
+ protected:
+ QualityLimitationReason reason_;
+ QualityLimitationReason different_reason_;
+};
+
+TEST_P(QualityLimitationReasonTrackerTestWithParamReason,
+ DurationIncreasesOverTime) {
+ int64_t initial_duration_ms = tracker_.DurationsMs()[reason_];
+ tracker_.SetReason(reason_);
+ EXPECT_EQ(initial_duration_ms, tracker_.DurationsMs()[reason_]);
+ fake_clock_.AdvanceTimeMilliseconds(4321);
+ EXPECT_EQ(initial_duration_ms + 4321, tracker_.DurationsMs()[reason_]);
+}
+
+TEST_P(QualityLimitationReasonTrackerTestWithParamReason,
+ SwitchBetweenReasonsBackAndForth) {
+ int64_t initial_duration_ms = tracker_.DurationsMs()[reason_];
+ // Spend 100 ms in `different_reason_`.
+ tracker_.SetReason(different_reason_);
+ fake_clock_.AdvanceTimeMilliseconds(100);
+ EXPECT_EQ(initial_duration_ms, tracker_.DurationsMs()[reason_]);
+ // Spend 50 ms in `reason_`.
+ tracker_.SetReason(reason_);
+ fake_clock_.AdvanceTimeMilliseconds(50);
+ EXPECT_EQ(initial_duration_ms + 50, tracker_.DurationsMs()[reason_]);
+ // Spend another 1000 ms in `different_reason_`.
+ tracker_.SetReason(different_reason_);
+ fake_clock_.AdvanceTimeMilliseconds(1000);
+ EXPECT_EQ(initial_duration_ms + 50, tracker_.DurationsMs()[reason_]);
+ // Spend another 100 ms in `reason_`.
+ tracker_.SetReason(reason_);
+ fake_clock_.AdvanceTimeMilliseconds(100);
+ EXPECT_EQ(initial_duration_ms + 150, tracker_.DurationsMs()[reason_]);
+ // Change reason one last time without advancing time.
+ tracker_.SetReason(different_reason_);
+ EXPECT_EQ(initial_duration_ms + 150, tracker_.DurationsMs()[reason_]);
+}
+
+INSTANTIATE_TEST_SUITE_P(
+ All,
+ QualityLimitationReasonTrackerTestWithParamReason,
+ ::testing::Values(QualityLimitationReason::kNone, // "/0"
+ QualityLimitationReason::kCpu, // "/1"
+ QualityLimitationReason::kBandwidth, // "/2"
+ QualityLimitationReason::kOther)); // "/3"
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/video/quality_scaling_tests.cc b/third_party/libwebrtc/video/quality_scaling_tests.cc
new file mode 100644
index 0000000000..7eaf14831b
--- /dev/null
+++ b/third_party/libwebrtc/video/quality_scaling_tests.cc
@@ -0,0 +1,613 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <string>
+
+#include "api/test/video/function_video_encoder_factory.h"
+#include "media/engine/internal_encoder_factory.h"
+#include "modules/video_coding/codecs/h264/include/h264.h"
+#include "modules/video_coding/codecs/vp8/include/vp8.h"
+#include "modules/video_coding/codecs/vp9/include/vp9.h"
+#include "rtc_base/experiments/encoder_info_settings.h"
+#include "test/call_test.h"
+#include "test/field_trial.h"
+#include "test/frame_generator_capturer.h"
+#include "video/config/encoder_stream_factory.h"
+
+namespace webrtc {
+namespace {
+constexpr int kInitialWidth = 1280;
+constexpr int kInitialHeight = 720;
+constexpr int kLowStartBps = 100000;
+constexpr int kHighStartBps = 1000000;
+constexpr int kDefaultVgaMinStartBps = 500000; // From video_stream_encoder.cc
+constexpr TimeDelta kTimeout =
+ TimeDelta::Seconds(10); // Some tests are expected to time out.
+
+void SetEncoderSpecific(VideoEncoderConfig* encoder_config,
+ VideoCodecType type,
+ bool automatic_resize,
+ size_t num_spatial_layers) {
+ if (type == kVideoCodecVP8) {
+ VideoCodecVP8 vp8 = VideoEncoder::GetDefaultVp8Settings();
+ vp8.automaticResizeOn = automatic_resize;
+ encoder_config->encoder_specific_settings =
+ rtc::make_ref_counted<VideoEncoderConfig::Vp8EncoderSpecificSettings>(
+ vp8);
+ } else if (type == kVideoCodecVP9) {
+ VideoCodecVP9 vp9 = VideoEncoder::GetDefaultVp9Settings();
+ vp9.automaticResizeOn = automatic_resize;
+ vp9.numberOfSpatialLayers = num_spatial_layers;
+ encoder_config->encoder_specific_settings =
+ rtc::make_ref_counted<VideoEncoderConfig::Vp9EncoderSpecificSettings>(
+ vp9);
+ }
+}
+} // namespace
+
+class QualityScalingTest : public test::CallTest {
+ protected:
+ const std::string kPrefix = "WebRTC-Video-QualityScaling/Enabled-";
+ const std::string kEnd = ",0,0,0.9995,0.9999,1/";
+ const absl::optional<VideoEncoder::ResolutionBitrateLimits>
+ kSinglecastLimits720pVp8 =
+ EncoderInfoSettings::GetDefaultSinglecastBitrateLimitsForResolution(
+ kVideoCodecVP8,
+ 1280 * 720);
+ const absl::optional<VideoEncoder::ResolutionBitrateLimits>
+ kSinglecastLimits360pVp9 =
+ EncoderInfoSettings::GetDefaultSinglecastBitrateLimitsForResolution(
+ kVideoCodecVP9,
+ 640 * 360);
+ const absl::optional<VideoEncoder::ResolutionBitrateLimits>
+ kSinglecastLimits720pVp9 =
+ EncoderInfoSettings::GetDefaultSinglecastBitrateLimitsForResolution(
+ kVideoCodecVP9,
+ 1280 * 720);
+};
+
+class ScalingObserver : public test::SendTest {
+ protected:
+ struct TestParams {
+ bool active;
+ absl::optional<ScalabilityMode> scalability_mode;
+ };
+ ScalingObserver(const std::string& payload_name,
+ const std::vector<TestParams>& test_params,
+ int start_bps,
+ bool automatic_resize,
+ bool expect_scaling)
+ : SendTest(expect_scaling ? kTimeout * 4 : kTimeout),
+ encoder_factory_(
+ [](const SdpVideoFormat& format) -> std::unique_ptr<VideoEncoder> {
+ if (format.name == "VP8")
+ return VP8Encoder::Create();
+ if (format.name == "VP9")
+ return VP9Encoder::Create();
+ if (format.name == "H264")
+ return H264Encoder::Create(cricket::VideoCodec("H264"));
+ RTC_DCHECK_NOTREACHED() << format.name;
+ return nullptr;
+ }),
+ payload_name_(payload_name),
+ test_params_(test_params),
+ start_bps_(start_bps),
+ automatic_resize_(automatic_resize),
+ expect_scaling_(expect_scaling) {}
+
+ DegradationPreference degradation_preference_ =
+ DegradationPreference::MAINTAIN_FRAMERATE;
+
+ private:
+ void ModifySenderBitrateConfig(BitrateConstraints* bitrate_config) override {
+ bitrate_config->start_bitrate_bps = start_bps_;
+ }
+
+ void ModifyVideoDegradationPreference(
+ DegradationPreference* degradation_preference) override {
+ *degradation_preference = degradation_preference_;
+ }
+
+ size_t GetNumVideoStreams() const override {
+ return (payload_name_ == "VP9") ? 1 : test_params_.size();
+ }
+
+ void ModifyVideoConfigs(
+ VideoSendStream::Config* send_config,
+ std::vector<VideoReceiveStreamInterface::Config>* receive_configs,
+ VideoEncoderConfig* encoder_config) override {
+ VideoEncoder::EncoderInfo encoder_info;
+ send_config->encoder_settings.encoder_factory = &encoder_factory_;
+ send_config->rtp.payload_name = payload_name_;
+ send_config->rtp.payload_type = test::CallTest::kVideoSendPayloadType;
+ encoder_config->video_format.name = payload_name_;
+ const VideoCodecType codec_type = PayloadStringToCodecType(payload_name_);
+ encoder_config->codec_type = codec_type;
+ encoder_config->video_stream_factory =
+ rtc::make_ref_counted<cricket::EncoderStreamFactory>(
+ payload_name_, /*max_qp=*/0, /*is_screenshare=*/false,
+ /*conference_mode=*/false, encoder_info);
+ encoder_config->max_bitrate_bps =
+ std::max(start_bps_, encoder_config->max_bitrate_bps);
+ if (payload_name_ == "VP9") {
+ // Simulcast layers indicates which spatial layers are active.
+ encoder_config->simulcast_layers.resize(test_params_.size());
+ encoder_config->simulcast_layers[0].max_bitrate_bps =
+ encoder_config->max_bitrate_bps;
+ }
+ double scale_factor = 1.0;
+ for (int i = test_params_.size() - 1; i >= 0; --i) {
+ VideoStream& stream = encoder_config->simulcast_layers[i];
+ stream.active = test_params_[i].active;
+ stream.scalability_mode = test_params_[i].scalability_mode;
+ stream.scale_resolution_down_by = scale_factor;
+ scale_factor *= (payload_name_ == "VP9") ? 1.0 : 2.0;
+ }
+ encoder_config->frame_drop_enabled = true;
+ SetEncoderSpecific(encoder_config, codec_type, automatic_resize_,
+ test_params_.size());
+ }
+
+ void PerformTest() override { EXPECT_EQ(expect_scaling_, Wait()); }
+
+ test::FunctionVideoEncoderFactory encoder_factory_;
+ const std::string payload_name_;
+ const std::vector<TestParams> test_params_;
+ const int start_bps_;
+ const bool automatic_resize_;
+ const bool expect_scaling_;
+};
+
+class DownscalingObserver
+ : public ScalingObserver,
+ public test::FrameGeneratorCapturer::SinkWantsObserver {
+ public:
+ DownscalingObserver(const std::string& payload_name,
+ const std::vector<TestParams>& test_params,
+ int start_bps,
+ bool automatic_resize,
+ bool expect_downscale)
+ : ScalingObserver(payload_name,
+ test_params,
+ start_bps,
+ automatic_resize,
+ expect_downscale) {}
+
+ private:
+ void OnFrameGeneratorCapturerCreated(
+ test::FrameGeneratorCapturer* frame_generator_capturer) override {
+ frame_generator_capturer->SetSinkWantsObserver(this);
+ frame_generator_capturer->ChangeResolution(kInitialWidth, kInitialHeight);
+ }
+
+ void OnSinkWantsChanged(rtc::VideoSinkInterface<VideoFrame>* sink,
+ const rtc::VideoSinkWants& wants) override {
+ if (wants.max_pixel_count < kInitialWidth * kInitialHeight)
+ observation_complete_.Set();
+ }
+};
+
+class UpscalingObserver
+ : public ScalingObserver,
+ public test::FrameGeneratorCapturer::SinkWantsObserver {
+ public:
+ UpscalingObserver(const std::string& payload_name,
+ const std::vector<TestParams>& test_params,
+ int start_bps,
+ bool automatic_resize,
+ bool expect_upscale)
+ : ScalingObserver(payload_name,
+ test_params,
+ start_bps,
+ automatic_resize,
+ expect_upscale) {}
+
+ void SetDegradationPreference(DegradationPreference preference) {
+ degradation_preference_ = preference;
+ }
+
+ private:
+ void OnFrameGeneratorCapturerCreated(
+ test::FrameGeneratorCapturer* frame_generator_capturer) override {
+ frame_generator_capturer->SetSinkWantsObserver(this);
+ frame_generator_capturer->ChangeResolution(kInitialWidth, kInitialHeight);
+ }
+
+ void OnSinkWantsChanged(rtc::VideoSinkInterface<VideoFrame>* sink,
+ const rtc::VideoSinkWants& wants) override {
+ if (wants.max_pixel_count > last_wants_.max_pixel_count) {
+ if (wants.max_pixel_count == std::numeric_limits<int>::max())
+ observation_complete_.Set();
+ }
+ last_wants_ = wants;
+ }
+
+ rtc::VideoSinkWants last_wants_;
+};
+
+TEST_F(QualityScalingTest, AdaptsDownForHighQp_Vp8) {
+ // qp_low:1, qp_high:1 -> kHighQp
+ test::ScopedKeyValueConfig field_trials(field_trials_,
+ kPrefix + "1,1,0,0,0,0" + kEnd);
+
+ DownscalingObserver test("VP8", {{.active = true}}, kHighStartBps,
+ /*automatic_resize=*/true,
+ /*expect_downscale=*/true);
+ RunBaseTest(&test);
+}
+
+TEST_F(QualityScalingTest, NoAdaptDownForHighQpIfScalingOff_Vp8) {
+ // qp_low:1, qp_high:1 -> kHighQp
+ test::ScopedKeyValueConfig field_trials(field_trials_,
+ kPrefix + "1,1,0,0,0,0" + kEnd);
+
+ DownscalingObserver test("VP8", {{.active = true}}, kHighStartBps,
+ /*automatic_resize=*/false,
+ /*expect_downscale=*/false);
+ RunBaseTest(&test);
+}
+
+TEST_F(QualityScalingTest, NoAdaptDownForNormalQp_Vp8) {
+ // qp_low:1, qp_high:127 -> kNormalQp
+ test::ScopedKeyValueConfig field_trials(field_trials_,
+ kPrefix + "1,127,0,0,0,0" + kEnd);
+
+ DownscalingObserver test("VP8", {{.active = true}}, kHighStartBps,
+ /*automatic_resize=*/true,
+ /*expect_downscale=*/false);
+ RunBaseTest(&test);
+}
+
+TEST_F(QualityScalingTest, AdaptsDownForLowStartBitrate_Vp8) {
+ // qp_low:1, qp_high:127 -> kNormalQp
+ test::ScopedKeyValueConfig field_trials(field_trials_,
+ kPrefix + "1,127,0,0,0,0" + kEnd);
+
+ DownscalingObserver test("VP8", {{.active = true}}, kLowStartBps,
+ /*automatic_resize=*/true,
+ /*expect_downscale=*/true);
+ RunBaseTest(&test);
+}
+
+TEST_F(QualityScalingTest, AdaptsDownForLowStartBitrateAndThenUp) {
+ // qp_low:127, qp_high:127 -> kLowQp
+ test::ScopedKeyValueConfig field_trials(
+ field_trials_,
+ kPrefix + "127,127,0,0,0,0" + kEnd +
+ "WebRTC-Video-BalancedDegradationSettings/"
+ "pixels:230400|921600,fps:20|30,kbps:300|500/"); // should not affect
+
+ UpscalingObserver test("VP8", {{.active = true}}, kDefaultVgaMinStartBps - 1,
+ /*automatic_resize=*/true, /*expect_upscale=*/true);
+ RunBaseTest(&test);
+}
+
+TEST_F(QualityScalingTest, AdaptsDownAndThenUpWithBalanced) {
+ // qp_low:127, qp_high:127 -> kLowQp
+ test::ScopedKeyValueConfig field_trials(
+ field_trials_, kPrefix + "127,127,0,0,0,0" + kEnd +
+ "WebRTC-Video-BalancedDegradationSettings/"
+ "pixels:230400|921600,fps:20|30,kbps:300|499/");
+
+ UpscalingObserver test("VP8", {{.active = true}}, kDefaultVgaMinStartBps - 1,
+ /*automatic_resize=*/true, /*expect_upscale=*/true);
+ test.SetDegradationPreference(DegradationPreference::BALANCED);
+ RunBaseTest(&test);
+}
+
+TEST_F(QualityScalingTest, AdaptsDownButNotUpWithBalancedIfBitrateNotEnough) {
+ // qp_low:127, qp_high:127 -> kLowQp
+ test::ScopedKeyValueConfig field_trials(
+ field_trials_, kPrefix + "127,127,0,0,0,0" + kEnd +
+ "WebRTC-Video-BalancedDegradationSettings/"
+ "pixels:230400|921600,fps:20|30,kbps:300|500/");
+
+ UpscalingObserver test("VP8", {{.active = true}}, kDefaultVgaMinStartBps - 1,
+ /*automatic_resize=*/true, /*expect_upscale=*/false);
+ test.SetDegradationPreference(DegradationPreference::BALANCED);
+ RunBaseTest(&test);
+}
+
+TEST_F(QualityScalingTest, NoAdaptDownForLowStartBitrate_Simulcast) {
+ // qp_low:1, qp_high:127 -> kNormalQp
+ test::ScopedKeyValueConfig field_trials(field_trials_,
+ kPrefix + "1,127,0,0,0,0" + kEnd);
+
+ DownscalingObserver test("VP8", {{.active = true}, {.active = true}},
+ kLowStartBps,
+ /*automatic_resize=*/false,
+ /*expect_downscale=*/false);
+ RunBaseTest(&test);
+}
+
+TEST_F(QualityScalingTest, AdaptsDownForHighQp_HighestStreamActive_Vp8) {
+ // qp_low:1, qp_high:1 -> kHighQp
+ test::ScopedKeyValueConfig field_trials(field_trials_,
+ kPrefix + "1,1,0,0,0,0" + kEnd);
+
+ DownscalingObserver test(
+ "VP8", {{.active = false}, {.active = false}, {.active = true}},
+ kHighStartBps,
+ /*automatic_resize=*/true,
+ /*expect_downscale=*/true);
+ RunBaseTest(&test);
+}
+
+TEST_F(QualityScalingTest,
+ AdaptsDownForLowStartBitrate_HighestStreamActive_Vp8) {
+ // qp_low:1, qp_high:127 -> kNormalQp
+ test::ScopedKeyValueConfig field_trials(field_trials_,
+ kPrefix + "1,127,0,0,0,0" + kEnd);
+
+ DownscalingObserver test(
+ "VP8", {{.active = false}, {.active = false}, {.active = true}},
+ kSinglecastLimits720pVp8->min_start_bitrate_bps - 1,
+ /*automatic_resize=*/true,
+ /*expect_downscale=*/true);
+ RunBaseTest(&test);
+}
+
+TEST_F(QualityScalingTest, AdaptsDownButNotUpWithMinStartBitrateLimit) {
+ // qp_low:127, qp_high:127 -> kLowQp
+ test::ScopedKeyValueConfig field_trials(field_trials_,
+ kPrefix + "127,127,0,0,0,0" + kEnd);
+
+ UpscalingObserver test("VP8", {{.active = false}, {.active = true}},
+ kSinglecastLimits720pVp8->min_start_bitrate_bps - 1,
+ /*automatic_resize=*/true, /*expect_upscale=*/false);
+ RunBaseTest(&test);
+}
+
+TEST_F(QualityScalingTest, NoAdaptDownForLowStartBitrateIfBitrateEnough_Vp8) {
+ // qp_low:1, qp_high:127 -> kNormalQp
+ test::ScopedKeyValueConfig field_trials(field_trials_,
+ kPrefix + "1,127,0,0,0,0" + kEnd);
+
+ DownscalingObserver test(
+ "VP8", {{.active = false}, {.active = false}, {.active = true}},
+ kSinglecastLimits720pVp8->min_start_bitrate_bps,
+ /*automatic_resize=*/true,
+ /*expect_downscale=*/false);
+ RunBaseTest(&test);
+}
+
+TEST_F(QualityScalingTest,
+ NoAdaptDownForLowStartBitrateIfDefaultLimitsDisabled_Vp8) {
+ // qp_low:1, qp_high:127 -> kNormalQp
+ test::ScopedKeyValueConfig field_trials(
+ field_trials_, kPrefix + "1,127,0,0,0,0" + kEnd +
+ "WebRTC-DefaultBitrateLimitsKillSwitch/Enabled/");
+
+ DownscalingObserver test(
+ "VP8", {{.active = false}, {.active = false}, {.active = true}},
+ kSinglecastLimits720pVp8->min_start_bitrate_bps - 1,
+ /*automatic_resize=*/true,
+ /*expect_downscale=*/false);
+ RunBaseTest(&test);
+}
+
+TEST_F(QualityScalingTest,
+ NoAdaptDownForLowStartBitrate_OneStreamSinglecastLimitsNotUsed_Vp8) {
+ // qp_low:1, qp_high:127 -> kNormalQp
+ test::ScopedKeyValueConfig field_trials(field_trials_,
+ kPrefix + "1,127,0,0,0,0" + kEnd);
+
+ DownscalingObserver test("VP8", {{.active = true}},
+ kSinglecastLimits720pVp8->min_start_bitrate_bps - 1,
+ /*automatic_resize=*/true,
+ /*expect_downscale=*/false);
+ RunBaseTest(&test);
+}
+
+TEST_F(QualityScalingTest, NoAdaptDownForHighQp_LowestStreamActive_Vp8) {
+ // qp_low:1, qp_high:1 -> kHighQp
+ test::ScopedKeyValueConfig field_trials(field_trials_,
+ kPrefix + "1,1,0,0,0,0" + kEnd);
+
+ DownscalingObserver test(
+ "VP8", {{.active = true}, {.active = false}, {.active = false}},
+ kHighStartBps,
+ /*automatic_resize=*/true,
+ /*expect_downscale=*/false);
+ RunBaseTest(&test);
+}
+
+TEST_F(QualityScalingTest,
+ NoAdaptDownForLowStartBitrate_LowestStreamActive_Vp8) {
+ // qp_low:1, qp_high:127 -> kNormalQp
+ test::ScopedKeyValueConfig field_trials(field_trials_,
+ kPrefix + "1,127,0,0,0,0" + kEnd);
+
+ DownscalingObserver test(
+ "VP8", {{.active = true}, {.active = false}, {.active = false}},
+ kLowStartBps,
+ /*automatic_resize=*/true,
+ /*expect_downscale=*/false);
+ RunBaseTest(&test);
+}
+
+TEST_F(QualityScalingTest, NoAdaptDownForLowStartBitrateIfScalingOff_Vp8) {
+ // qp_low:1, qp_high:127 -> kNormalQp
+ test::ScopedKeyValueConfig field_trials(field_trials_,
+ kPrefix + "1,127,0,0,0,0" + kEnd);
+
+ DownscalingObserver test("VP8", {{.active = true}}, kLowStartBps,
+ /*automatic_resize=*/false,
+ /*expect_downscale=*/false);
+ RunBaseTest(&test);
+}
+
+TEST_F(QualityScalingTest, AdaptsDownForHighQp_Vp9) {
+ // qp_low:1, qp_high:1 -> kHighQp
+ test::ScopedKeyValueConfig field_trials(field_trials_,
+ kPrefix + "0,0,1,1,0,0" + kEnd);
+
+ DownscalingObserver test("VP9", {{.active = true}}, kHighStartBps,
+ /*automatic_resize=*/true,
+ /*expect_downscale=*/true);
+ RunBaseTest(&test);
+}
+
+TEST_F(QualityScalingTest, NoAdaptDownForHighQpIfScalingOff_Vp9) {
+ // qp_low:1, qp_high:1 -> kHighQp
+ test::ScopedKeyValueConfig field_trials(
+ field_trials_,
+ kPrefix + "0,0,1,1,0,0" + kEnd + "WebRTC-VP9QualityScaler/Disabled/");
+
+ DownscalingObserver test("VP9", {{.active = true}}, kHighStartBps,
+ /*automatic_resize=*/true,
+ /*expect_downscale=*/false);
+ RunBaseTest(&test);
+}
+
+TEST_F(QualityScalingTest, AdaptsDownForLowStartBitrate_Vp9) {
+ // qp_low:1, qp_high:255 -> kNormalQp
+ test::ScopedKeyValueConfig field_trials(field_trials_,
+ kPrefix + "0,0,1,255,0,0" + kEnd);
+
+ DownscalingObserver test("VP9", {{.active = true}}, kLowStartBps,
+ /*automatic_resize=*/true,
+ /*expect_downscale=*/true);
+ RunBaseTest(&test);
+}
+
+TEST_F(QualityScalingTest, NoAdaptDownForHighStartBitrate_Vp9) {
+ DownscalingObserver test(
+ "VP9", {{.active = false}, {.active = false}, {.active = true}},
+ kHighStartBps,
+ /*automatic_resize=*/true,
+ /*expect_downscale=*/false);
+ RunBaseTest(&test);
+}
+
+TEST_F(QualityScalingTest, NoAdaptDownForHighQp_LowestStreamActive_Vp9) {
+ // qp_low:1, qp_high:1 -> kHighQp
+ test::ScopedKeyValueConfig field_trials(field_trials_,
+ kPrefix + "0,0,1,1,0,0" + kEnd);
+
+ DownscalingObserver test(
+ "VP9", {{.active = true}, {.active = false}, {.active = false}},
+ kHighStartBps,
+ /*automatic_resize=*/true,
+ /*expect_downscale=*/false);
+ RunBaseTest(&test);
+}
+
+TEST_F(QualityScalingTest,
+ NoAdaptDownForLowStartBitrate_LowestStreamActive_Vp9) {
+ // qp_low:1, qp_high:255 -> kNormalQp
+ test::ScopedKeyValueConfig field_trials(field_trials_,
+ kPrefix + "0,0,1,255,0,0" + kEnd);
+
+ DownscalingObserver test(
+ "VP9", {{.active = true}, {.active = false}, {.active = false}},
+ kLowStartBps,
+ /*automatic_resize=*/true,
+ /*expect_downscale=*/false);
+ RunBaseTest(&test);
+}
+
+TEST_F(QualityScalingTest, AdaptsDownForHighQp_MiddleStreamActive_Vp9) {
+ // qp_low:1, qp_high:1 -> kHighQp
+ test::ScopedKeyValueConfig field_trials(field_trials_,
+ kPrefix + "0,0,1,1,0,0" + kEnd);
+
+ DownscalingObserver test(
+ "VP9", {{.active = false}, {.active = true}, {.active = false}},
+ kHighStartBps,
+ /*automatic_resize=*/true,
+ /*expect_downscale=*/true);
+ RunBaseTest(&test);
+}
+
+TEST_F(QualityScalingTest,
+ AdaptsDownForLowStartBitrate_MiddleStreamActive_Vp9) {
+ // qp_low:1, qp_high:255 -> kNormalQp
+ test::ScopedKeyValueConfig field_trials(field_trials_,
+ kPrefix + "0,0,1,255,0,0" + kEnd);
+
+ DownscalingObserver test(
+ "VP9", {{.active = false}, {.active = true}, {.active = false}},
+ kSinglecastLimits360pVp9->min_start_bitrate_bps - 1,
+ /*automatic_resize=*/true,
+ /*expect_downscale=*/true);
+ RunBaseTest(&test);
+}
+
+TEST_F(QualityScalingTest, NoAdaptDownForLowStartBitrateIfBitrateEnough_Vp9) {
+ // qp_low:1, qp_high:255 -> kNormalQp
+ test::ScopedKeyValueConfig field_trials(field_trials_,
+ kPrefix + "0,0,1,255,0,0" + kEnd);
+
+ DownscalingObserver test(
+ "VP9", {{.active = false}, {.active = true}, {.active = false}},
+ kSinglecastLimits360pVp9->min_start_bitrate_bps,
+ /*automatic_resize=*/true,
+ /*expect_downscale=*/false);
+ RunBaseTest(&test);
+}
+
+TEST_F(QualityScalingTest,
+ AdaptsDownButNotUpWithMinStartBitrateLimitWithScalabilityMode_VP9) {
+ // qp_low:255, qp_high:255 -> kLowQp
+ test::ScopedKeyValueConfig field_trials(field_trials_,
+ kPrefix + "0,0,255,255,0,0" + kEnd);
+
+ UpscalingObserver test(
+ "VP9",
+ {{.active = true, .scalability_mode = ScalabilityMode::kL1T3},
+ {.active = false}},
+ kSinglecastLimits720pVp9->min_start_bitrate_bps - 1,
+ /*automatic_resize=*/true, /*expect_upscale=*/false);
+ RunBaseTest(&test);
+}
+
+TEST_F(QualityScalingTest,
+ NoAdaptDownForLowStartBitrateIfBitrateEnoughWithScalabilityMode_Vp9) {
+ // qp_low:1, qp_high:255 -> kNormalQp
+ test::ScopedKeyValueConfig field_trials(field_trials_,
+ kPrefix + "0,0,1,255,0,0" + kEnd);
+
+ DownscalingObserver test(
+ "VP9",
+ {{.active = true, .scalability_mode = ScalabilityMode::kL1T3},
+ {.active = false},
+ {.active = false}},
+ kSinglecastLimits720pVp9->min_start_bitrate_bps,
+ /*automatic_resize=*/true,
+ /*expect_downscale=*/false);
+ RunBaseTest(&test);
+}
+
+#if defined(WEBRTC_USE_H264)
+TEST_F(QualityScalingTest, AdaptsDownForHighQp_H264) {
+ // qp_low:1, qp_high:1 -> kHighQp
+ test::ScopedKeyValueConfig field_trials(field_trials_,
+ kPrefix + "0,0,0,0,1,1" + kEnd);
+
+ DownscalingObserver test("H264", {{.active = true}}, kHighStartBps,
+ /*automatic_resize=*/true,
+ /*expect_downscale=*/true);
+ RunBaseTest(&test);
+}
+
+TEST_F(QualityScalingTest, AdaptsDownForLowStartBitrate_H264) {
+ // qp_low:1, qp_high:51 -> kNormalQp
+ test::ScopedKeyValueConfig field_trials(field_trials_,
+ kPrefix + "0,0,0,0,1,51" + kEnd);
+
+ DownscalingObserver test("H264", {{.active = true}}, kLowStartBps,
+ /*automatic_resize=*/true,
+ /*expect_downscale=*/true);
+ RunBaseTest(&test);
+}
+#endif // defined(WEBRTC_USE_H264)
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/video/quality_threshold.cc b/third_party/libwebrtc/video/quality_threshold.cc
new file mode 100644
index 0000000000..931b0b20f9
--- /dev/null
+++ b/third_party/libwebrtc/video/quality_threshold.cc
@@ -0,0 +1,104 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video/quality_threshold.h"
+
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+QualityThreshold::QualityThreshold(int low_threshold,
+ int high_threshold,
+ float fraction,
+ int max_measurements)
+ : buffer_(new int[max_measurements]),
+ max_measurements_(max_measurements),
+ fraction_(fraction),
+ low_threshold_(low_threshold),
+ high_threshold_(high_threshold),
+ until_full_(max_measurements),
+ next_index_(0),
+ sum_(0),
+ count_low_(0),
+ count_high_(0),
+ num_high_states_(0),
+ num_certain_states_(0) {
+ RTC_CHECK_GT(fraction, 0.5f);
+ RTC_CHECK_GT(max_measurements, 1);
+ RTC_CHECK_LT(low_threshold, high_threshold);
+}
+
+QualityThreshold::~QualityThreshold() = default;
+
+void QualityThreshold::AddMeasurement(int measurement) {
+ int prev_val = until_full_ > 0 ? 0 : buffer_[next_index_];
+ buffer_[next_index_] = measurement;
+ next_index_ = (next_index_ + 1) % max_measurements_;
+
+ sum_ += measurement - prev_val;
+
+ if (until_full_ == 0) {
+ if (prev_val <= low_threshold_) {
+ --count_low_;
+ } else if (prev_val >= high_threshold_) {
+ --count_high_;
+ }
+ }
+
+ if (measurement <= low_threshold_) {
+ ++count_low_;
+ } else if (measurement >= high_threshold_) {
+ ++count_high_;
+ }
+
+ float sufficient_majority = fraction_ * max_measurements_;
+ if (count_high_ >= sufficient_majority) {
+ is_high_ = true;
+ } else if (count_low_ >= sufficient_majority) {
+ is_high_ = false;
+ }
+
+ if (until_full_ > 0)
+ --until_full_;
+
+ if (is_high_) {
+ if (*is_high_)
+ ++num_high_states_;
+ ++num_certain_states_;
+ }
+}
+
+absl::optional<bool> QualityThreshold::IsHigh() const {
+ return is_high_;
+}
+
+absl::optional<double> QualityThreshold::CalculateVariance() const {
+ if (until_full_ > 0) {
+ return absl::nullopt;
+ }
+
+ double variance = 0;
+ double mean = static_cast<double>(sum_) / max_measurements_;
+ for (int i = 0; i < max_measurements_; ++i) {
+ variance += (buffer_[i] - mean) * (buffer_[i] - mean);
+ }
+ return variance / (max_measurements_ - 1);
+}
+
+absl::optional<double> QualityThreshold::FractionHigh(
+ int min_required_samples) const {
+ RTC_DCHECK_GT(min_required_samples, 0);
+ if (num_certain_states_ < min_required_samples)
+ return absl::nullopt;
+
+ return static_cast<double>(num_high_states_) / num_certain_states_;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/video/quality_threshold.h b/third_party/libwebrtc/video/quality_threshold.h
new file mode 100644
index 0000000000..a193aa7f01
--- /dev/null
+++ b/third_party/libwebrtc/video/quality_threshold.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VIDEO_QUALITY_THRESHOLD_H_
+#define VIDEO_QUALITY_THRESHOLD_H_
+
+#include <memory>
+
+#include "absl/types/optional.h"
+
+namespace webrtc {
+
+class QualityThreshold {
+ public:
+ // Both thresholds are inclusive, i.e. measurement >= high signifies a high
+ // state, while measurement <= low signifies a low state.
+ QualityThreshold(int low_threshold,
+ int high_threshold,
+ float fraction,
+ int max_measurements);
+ ~QualityThreshold();
+
+ void AddMeasurement(int measurement);
+ absl::optional<bool> IsHigh() const;
+ absl::optional<double> CalculateVariance() const;
+ absl::optional<double> FractionHigh(int min_required_samples) const;
+
+ private:
+ const std::unique_ptr<int[]> buffer_;
+ const int max_measurements_;
+ const float fraction_;
+ const int low_threshold_;
+ const int high_threshold_;
+ int until_full_;
+ int next_index_;
+ absl::optional<bool> is_high_;
+ int sum_;
+ int count_low_;
+ int count_high_;
+ int num_high_states_;
+ int num_certain_states_;
+};
+
+} // namespace webrtc
+
+#endif // VIDEO_QUALITY_THRESHOLD_H_
diff --git a/third_party/libwebrtc/video/quality_threshold_unittest.cc b/third_party/libwebrtc/video/quality_threshold_unittest.cc
new file mode 100644
index 0000000000..c9396d7188
--- /dev/null
+++ b/third_party/libwebrtc/video/quality_threshold_unittest.cc
@@ -0,0 +1,133 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video/quality_threshold.h"
+
+#include "test/gtest.h"
+
+namespace webrtc {
+
+TEST(QualityThresholdTest, BackAndForth) {
+ const int kLowThreshold = 0;
+ const int kHighThreshold = 1;
+ const float kFraction = 0.75f;
+ const int kMaxMeasurements = 10;
+
+ QualityThreshold thresh(kLowThreshold, kHighThreshold, kFraction,
+ kMaxMeasurements);
+
+ const int kNeededMeasurements =
+ static_cast<int>(kFraction * kMaxMeasurements + 1);
+ for (int i = 0; i < kNeededMeasurements; ++i) {
+ EXPECT_FALSE(thresh.IsHigh());
+ thresh.AddMeasurement(kLowThreshold);
+ }
+ ASSERT_TRUE(thresh.IsHigh());
+ for (int i = 0; i < kNeededMeasurements; ++i) {
+ EXPECT_FALSE(*thresh.IsHigh());
+ thresh.AddMeasurement(kHighThreshold);
+ }
+ EXPECT_TRUE(*thresh.IsHigh());
+
+ for (int i = 0; i < kNeededMeasurements; ++i) {
+ EXPECT_TRUE(*thresh.IsHigh());
+ thresh.AddMeasurement(kLowThreshold);
+ }
+ EXPECT_FALSE(*thresh.IsHigh());
+}
+
+TEST(QualityThresholdTest, Variance) {
+ const int kLowThreshold = 0;
+ const int kHighThreshold = 1;
+ const float kFraction = 0.8f;
+ const int kMaxMeasurements = 10;
+ const double kMaxError = 0.01;
+
+ // Previously randomly generated values...
+ int values[] = {51, 79, 80, 56, 19, 20, 48, 57, 48, 25, 2, 25, 38, 37, 25};
+ // ...with precomputed variances.
+ double variances[] = {476.9, 687.6, 552, 336.4, 278.767, 265.167};
+
+ QualityThreshold thresh(kLowThreshold, kHighThreshold, kFraction,
+ kMaxMeasurements);
+
+ for (int i = 0; i < kMaxMeasurements; ++i) {
+ EXPECT_FALSE(thresh.CalculateVariance());
+ thresh.AddMeasurement(values[i]);
+ }
+
+ ASSERT_TRUE(thresh.CalculateVariance());
+ EXPECT_NEAR(variances[0], *thresh.CalculateVariance(), kMaxError);
+ for (unsigned int i = 1; i < sizeof(variances) / sizeof(double); ++i) {
+ thresh.AddMeasurement(values[i + kMaxMeasurements - 1]);
+ EXPECT_NEAR(variances[i], *thresh.CalculateVariance(), kMaxError);
+ }
+
+ for (int i = 0; i < kMaxMeasurements; ++i) {
+ thresh.AddMeasurement(42);
+ }
+ EXPECT_NEAR(0, *thresh.CalculateVariance(), kMaxError);
+}
+
+TEST(QualityThresholdTest, BetweenThresholds) {
+ const int kLowThreshold = 0;
+ const int kHighThreshold = 2;
+ const float kFraction = 0.6f;
+ const int kMaxMeasurements = 10;
+
+ const int kBetweenThresholds = (kLowThreshold + kHighThreshold) / 2;
+
+ QualityThreshold thresh(kLowThreshold, kHighThreshold, kFraction,
+ kMaxMeasurements);
+
+ for (int i = 0; i < 2 * kMaxMeasurements; ++i) {
+ EXPECT_FALSE(thresh.IsHigh());
+ thresh.AddMeasurement(kBetweenThresholds);
+ }
+ EXPECT_FALSE(thresh.IsHigh());
+}
+
+TEST(QualityThresholdTest, FractionHigh) {
+ const int kLowThreshold = 0;
+ const int kHighThreshold = 2;
+ const float kFraction = 0.75f;
+ const int kMaxMeasurements = 10;
+
+ const int kBetweenThresholds = (kLowThreshold + kHighThreshold) / 2;
+ const int kNeededMeasurements =
+ static_cast<int>(kFraction * kMaxMeasurements + 1);
+
+ QualityThreshold thresh(kLowThreshold, kHighThreshold, kFraction,
+ kMaxMeasurements);
+
+ for (int i = 0; i < kMaxMeasurements; ++i) {
+ EXPECT_FALSE(thresh.FractionHigh(1));
+ thresh.AddMeasurement(kBetweenThresholds);
+ }
+
+ for (int i = 0; i < kNeededMeasurements; i++) {
+ EXPECT_FALSE(thresh.FractionHigh(1));
+ thresh.AddMeasurement(kHighThreshold);
+ }
+ EXPECT_FALSE(thresh.FractionHigh(2));
+ ASSERT_TRUE(thresh.FractionHigh(1));
+ EXPECT_NEAR(*thresh.FractionHigh(1), 1, 0.001);
+
+ for (int i = 0; i < kNeededMeasurements; i++) {
+ EXPECT_NEAR(*thresh.FractionHigh(1), 1, 0.001);
+ thresh.AddMeasurement(kLowThreshold);
+ }
+ EXPECT_NEAR(
+ *thresh.FractionHigh(1),
+ static_cast<double>(kNeededMeasurements) / (kNeededMeasurements + 1),
+ 0.001);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/video/receive_statistics_proxy2.cc b/third_party/libwebrtc/video/receive_statistics_proxy2.cc
new file mode 100644
index 0000000000..508c36eaaf
--- /dev/null
+++ b/third_party/libwebrtc/video/receive_statistics_proxy2.cc
@@ -0,0 +1,1037 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video/receive_statistics_proxy2.h"
+
+#include <algorithm>
+#include <cmath>
+#include <utility>
+
+#include "modules/video_coding/include/video_codec_interface.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/strings/string_builder.h"
+#include "rtc_base/thread.h"
+#include "rtc_base/time_utils.h"
+#include "rtc_base/trace_event.h"
+#include "system_wrappers/include/clock.h"
+#include "system_wrappers/include/metrics.h"
+#include "video/video_receive_stream2.h"
+
+namespace webrtc {
+namespace internal {
+namespace {
+// Periodic time interval for processing samples for `freq_offset_counter_`.
+const int64_t kFreqOffsetProcessIntervalMs = 40000;
+
+// Configuration for bad call detection.
+const int kBadCallMinRequiredSamples = 10;
+const int kMinSampleLengthMs = 990;
+const int kNumMeasurements = 10;
+const int kNumMeasurementsVariance = kNumMeasurements * 1.5;
+const float kBadFraction = 0.8f;
+// For fps:
+// Low means low enough to be bad, high means high enough to be good
+const int kLowFpsThreshold = 12;
+const int kHighFpsThreshold = 14;
+// For qp and fps variance:
+// Low means low enough to be good, high means high enough to be bad
+const int kLowQpThresholdVp8 = 60;
+const int kHighQpThresholdVp8 = 70;
+const int kLowVarianceThreshold = 1;
+const int kHighVarianceThreshold = 2;
+
+// Some metrics are reported as a maximum over this period.
+// This should be synchronized with a typical getStats polling interval in
+// the clients.
+const int kMovingMaxWindowMs = 1000;
+
+// How large window we use to calculate the framerate/bitrate.
+const int kRateStatisticsWindowSizeMs = 1000;
+
+// Some sane ballpark estimate for maximum common value of inter-frame delay.
+// Values below that will be stored explicitly in the array,
+// values above - in the map.
+const int kMaxCommonInterframeDelayMs = 500;
+
+const char* UmaPrefixForContentType(VideoContentType content_type) {
+ if (videocontenttypehelpers::IsScreenshare(content_type))
+ return "WebRTC.Video.Screenshare";
+ return "WebRTC.Video";
+}
+
+std::string UmaSuffixForContentType(VideoContentType content_type) {
+ char ss_buf[1024];
+ rtc::SimpleStringBuilder ss(ss_buf);
+ int simulcast_id = videocontenttypehelpers::GetSimulcastId(content_type);
+ if (simulcast_id > 0) {
+ ss << ".S" << simulcast_id - 1;
+ }
+ int experiment_id = videocontenttypehelpers::GetExperimentId(content_type);
+ if (experiment_id > 0) {
+ ss << ".ExperimentGroup" << experiment_id - 1;
+ }
+ return ss.str();
+}
+
+// TODO(https://bugs.webrtc.org/11572): Workaround for an issue with some
+// rtc::Thread instances and/or implementations that don't register as the
+// current task queue.
+bool IsCurrentTaskQueueOrThread(TaskQueueBase* task_queue) {
+ if (task_queue->IsCurrent())
+ return true;
+
+ rtc::Thread* current_thread = rtc::ThreadManager::Instance()->CurrentThread();
+ if (!current_thread)
+ return false;
+
+ return static_cast<TaskQueueBase*>(current_thread) == task_queue;
+}
+
+} // namespace
+
+ReceiveStatisticsProxy::ReceiveStatisticsProxy(uint32_t remote_ssrc,
+ Clock* clock,
+ TaskQueueBase* worker_thread)
+ : clock_(clock),
+ start_ms_(clock->TimeInMilliseconds()),
+ last_sample_time_(clock->TimeInMilliseconds()),
+ fps_threshold_(kLowFpsThreshold,
+ kHighFpsThreshold,
+ kBadFraction,
+ kNumMeasurements),
+ qp_threshold_(kLowQpThresholdVp8,
+ kHighQpThresholdVp8,
+ kBadFraction,
+ kNumMeasurements),
+ variance_threshold_(kLowVarianceThreshold,
+ kHighVarianceThreshold,
+ kBadFraction,
+ kNumMeasurementsVariance),
+ num_bad_states_(0),
+ num_certain_states_(0),
+ remote_ssrc_(remote_ssrc),
+ // 1000ms window, scale 1000 for ms to s.
+ decode_fps_estimator_(1000, 1000),
+ renders_fps_estimator_(1000, 1000),
+ render_fps_tracker_(100, 10u),
+ render_pixel_tracker_(100, 10u),
+ video_quality_observer_(new VideoQualityObserver()),
+ interframe_delay_max_moving_(kMovingMaxWindowMs),
+ freq_offset_counter_(clock, nullptr, kFreqOffsetProcessIntervalMs),
+ last_content_type_(VideoContentType::UNSPECIFIED),
+ last_codec_type_(kVideoCodecVP8),
+ num_delayed_frames_rendered_(0),
+ sum_missed_render_deadline_ms_(0),
+ timing_frame_info_counter_(kMovingMaxWindowMs),
+ worker_thread_(worker_thread) {
+ RTC_DCHECK(worker_thread);
+ decode_queue_.Detach();
+ incoming_render_queue_.Detach();
+ stats_.ssrc = remote_ssrc_;
+}
+
+ReceiveStatisticsProxy::~ReceiveStatisticsProxy() {
+ RTC_DCHECK_RUN_ON(&main_thread_);
+}
+
+void ReceiveStatisticsProxy::UpdateHistograms(
+ absl::optional<int> fraction_lost,
+ const StreamDataCounters& rtp_stats,
+ const StreamDataCounters* rtx_stats) {
+ RTC_DCHECK_RUN_ON(&main_thread_);
+
+ char log_stream_buf[8 * 1024];
+ rtc::SimpleStringBuilder log_stream(log_stream_buf);
+
+ int stream_duration_sec = (clock_->TimeInMilliseconds() - start_ms_) / 1000;
+
+ if (stats_.frame_counts.key_frames > 0 ||
+ stats_.frame_counts.delta_frames > 0) {
+ RTC_HISTOGRAM_COUNTS_100000("WebRTC.Video.ReceiveStreamLifetimeInSeconds",
+ stream_duration_sec);
+ log_stream << "WebRTC.Video.ReceiveStreamLifetimeInSeconds "
+ << stream_duration_sec << '\n';
+ }
+
+ log_stream << "Frames decoded " << stats_.frames_decoded << '\n';
+
+ if (num_unique_frames_) {
+ int num_dropped_frames = *num_unique_frames_ - stats_.frames_decoded;
+ RTC_HISTOGRAM_COUNTS_1000("WebRTC.Video.DroppedFrames.Receiver",
+ num_dropped_frames);
+ log_stream << "WebRTC.Video.DroppedFrames.Receiver " << num_dropped_frames
+ << '\n';
+ }
+
+ if (fraction_lost && stream_duration_sec >= metrics::kMinRunTimeInSeconds) {
+ RTC_HISTOGRAM_PERCENTAGE("WebRTC.Video.ReceivedPacketsLostInPercent",
+ *fraction_lost);
+ log_stream << "WebRTC.Video.ReceivedPacketsLostInPercent " << *fraction_lost
+ << '\n';
+ }
+
+ if (first_decoded_frame_time_ms_) {
+ const int64_t elapsed_ms =
+ (clock_->TimeInMilliseconds() - *first_decoded_frame_time_ms_);
+ if (elapsed_ms >=
+ metrics::kMinRunTimeInSeconds * rtc::kNumMillisecsPerSec) {
+ int decoded_fps = static_cast<int>(
+ (stats_.frames_decoded * 1000.0f / elapsed_ms) + 0.5f);
+ RTC_HISTOGRAM_COUNTS_100("WebRTC.Video.DecodedFramesPerSecond",
+ decoded_fps);
+ log_stream << "WebRTC.Video.DecodedFramesPerSecond " << decoded_fps
+ << '\n';
+
+ const uint32_t frames_rendered = stats_.frames_rendered;
+ if (frames_rendered > 0) {
+ RTC_HISTOGRAM_PERCENTAGE("WebRTC.Video.DelayedFramesToRenderer",
+ static_cast<int>(num_delayed_frames_rendered_ *
+ 100 / frames_rendered));
+ if (num_delayed_frames_rendered_ > 0) {
+ RTC_HISTOGRAM_COUNTS_1000(
+ "WebRTC.Video.DelayedFramesToRenderer_AvgDelayInMs",
+ static_cast<int>(sum_missed_render_deadline_ms_ /
+ num_delayed_frames_rendered_));
+ }
+ }
+ }
+ }
+
+ const int kMinRequiredSamples = 200;
+ int samples = static_cast<int>(render_fps_tracker_.TotalSampleCount());
+ if (samples >= kMinRequiredSamples) {
+ int rendered_fps = round(render_fps_tracker_.ComputeTotalRate());
+ RTC_HISTOGRAM_COUNTS_100("WebRTC.Video.RenderFramesPerSecond",
+ rendered_fps);
+ log_stream << "WebRTC.Video.RenderFramesPerSecond " << rendered_fps << '\n';
+ RTC_HISTOGRAM_COUNTS_100000(
+ "WebRTC.Video.RenderSqrtPixelsPerSecond",
+ round(render_pixel_tracker_.ComputeTotalRate()));
+ }
+
+ absl::optional<int> sync_offset_ms =
+ sync_offset_counter_.Avg(kMinRequiredSamples);
+ if (sync_offset_ms) {
+ RTC_HISTOGRAM_COUNTS_10000("WebRTC.Video.AVSyncOffsetInMs",
+ *sync_offset_ms);
+ log_stream << "WebRTC.Video.AVSyncOffsetInMs " << *sync_offset_ms << '\n';
+ }
+ AggregatedStats freq_offset_stats = freq_offset_counter_.GetStats();
+ if (freq_offset_stats.num_samples > 0) {
+ RTC_HISTOGRAM_COUNTS_10000("WebRTC.Video.RtpToNtpFreqOffsetInKhz",
+ freq_offset_stats.average);
+ log_stream << "WebRTC.Video.RtpToNtpFreqOffsetInKhz "
+ << freq_offset_stats.ToString() << '\n';
+ }
+
+ int num_total_frames =
+ stats_.frame_counts.key_frames + stats_.frame_counts.delta_frames;
+ if (num_total_frames >= kMinRequiredSamples) {
+ int num_key_frames = stats_.frame_counts.key_frames;
+ int key_frames_permille =
+ (num_key_frames * 1000 + num_total_frames / 2) / num_total_frames;
+ RTC_HISTOGRAM_COUNTS_1000("WebRTC.Video.KeyFramesReceivedInPermille",
+ key_frames_permille);
+ log_stream << "WebRTC.Video.KeyFramesReceivedInPermille "
+ << key_frames_permille << '\n';
+ }
+
+ absl::optional<int> qp = qp_counters_.vp8.Avg(kMinRequiredSamples);
+ if (qp) {
+ RTC_HISTOGRAM_COUNTS_200("WebRTC.Video.Decoded.Vp8.Qp", *qp);
+ log_stream << "WebRTC.Video.Decoded.Vp8.Qp " << *qp << '\n';
+ }
+
+ absl::optional<int> decode_ms = decode_time_counter_.Avg(kMinRequiredSamples);
+ if (decode_ms) {
+ RTC_HISTOGRAM_COUNTS_1000("WebRTC.Video.DecodeTimeInMs", *decode_ms);
+ log_stream << "WebRTC.Video.DecodeTimeInMs " << *decode_ms << '\n';
+ }
+ absl::optional<int> jb_delay_ms =
+ jitter_buffer_delay_counter_.Avg(kMinRequiredSamples);
+ if (jb_delay_ms) {
+ RTC_HISTOGRAM_COUNTS_10000("WebRTC.Video.JitterBufferDelayInMs",
+ *jb_delay_ms);
+ log_stream << "WebRTC.Video.JitterBufferDelayInMs " << *jb_delay_ms << '\n';
+ }
+
+ absl::optional<int> target_delay_ms =
+ target_delay_counter_.Avg(kMinRequiredSamples);
+ if (target_delay_ms) {
+ RTC_HISTOGRAM_COUNTS_10000("WebRTC.Video.TargetDelayInMs",
+ *target_delay_ms);
+ log_stream << "WebRTC.Video.TargetDelayInMs " << *target_delay_ms << '\n';
+ }
+ absl::optional<int> current_delay_ms =
+ current_delay_counter_.Avg(kMinRequiredSamples);
+ if (current_delay_ms) {
+ RTC_HISTOGRAM_COUNTS_10000("WebRTC.Video.CurrentDelayInMs",
+ *current_delay_ms);
+ log_stream << "WebRTC.Video.CurrentDelayInMs " << *current_delay_ms << '\n';
+ }
+ absl::optional<int> delay_ms = delay_counter_.Avg(kMinRequiredSamples);
+ if (delay_ms)
+ RTC_HISTOGRAM_COUNTS_10000("WebRTC.Video.OnewayDelayInMs", *delay_ms);
+
+ // Aggregate content_specific_stats_ by removing experiment or simulcast
+ // information;
+ std::map<VideoContentType, ContentSpecificStats> aggregated_stats;
+ for (const auto& it : content_specific_stats_) {
+ // Calculate simulcast specific metrics (".S0" ... ".S2" suffixes).
+ VideoContentType content_type = it.first;
+ if (videocontenttypehelpers::GetSimulcastId(content_type) > 0) {
+ // Aggregate on experiment id.
+ videocontenttypehelpers::SetExperimentId(&content_type, 0);
+ aggregated_stats[content_type].Add(it.second);
+ }
+ // Calculate experiment specific metrics (".ExperimentGroup[0-7]" suffixes).
+ content_type = it.first;
+ if (videocontenttypehelpers::GetExperimentId(content_type) > 0) {
+ // Aggregate on simulcast id.
+ videocontenttypehelpers::SetSimulcastId(&content_type, 0);
+ aggregated_stats[content_type].Add(it.second);
+ }
+ // Calculate aggregated metrics (no suffixes. Aggregated on everything).
+ content_type = it.first;
+ videocontenttypehelpers::SetSimulcastId(&content_type, 0);
+ videocontenttypehelpers::SetExperimentId(&content_type, 0);
+ aggregated_stats[content_type].Add(it.second);
+ }
+
+ for (const auto& it : aggregated_stats) {
+ // For the metric Foo we report the following slices:
+ // WebRTC.Video.Foo,
+ // WebRTC.Video.Screenshare.Foo,
+ // WebRTC.Video.Foo.S[0-3],
+ // WebRTC.Video.Foo.ExperimentGroup[0-7],
+ // WebRTC.Video.Screenshare.Foo.S[0-3],
+ // WebRTC.Video.Screenshare.Foo.ExperimentGroup[0-7].
+ auto content_type = it.first;
+ auto stats = it.second;
+ std::string uma_prefix = UmaPrefixForContentType(content_type);
+ std::string uma_suffix = UmaSuffixForContentType(content_type);
+ // Metrics can be sliced on either simulcast id or experiment id but not
+ // both.
+ RTC_DCHECK(videocontenttypehelpers::GetExperimentId(content_type) == 0 ||
+ videocontenttypehelpers::GetSimulcastId(content_type) == 0);
+
+ absl::optional<int> e2e_delay_ms =
+ stats.e2e_delay_counter.Avg(kMinRequiredSamples);
+ if (e2e_delay_ms) {
+ RTC_HISTOGRAM_COUNTS_SPARSE_10000(
+ uma_prefix + ".EndToEndDelayInMs" + uma_suffix, *e2e_delay_ms);
+ log_stream << uma_prefix << ".EndToEndDelayInMs" << uma_suffix << " "
+ << *e2e_delay_ms << '\n';
+ }
+ absl::optional<int> e2e_delay_max_ms = stats.e2e_delay_counter.Max();
+ if (e2e_delay_max_ms && e2e_delay_ms) {
+ RTC_HISTOGRAM_COUNTS_SPARSE_100000(
+ uma_prefix + ".EndToEndDelayMaxInMs" + uma_suffix, *e2e_delay_max_ms);
+ log_stream << uma_prefix << ".EndToEndDelayMaxInMs" << uma_suffix << " "
+ << *e2e_delay_max_ms << '\n';
+ }
+ absl::optional<int> interframe_delay_ms =
+ stats.interframe_delay_counter.Avg(kMinRequiredSamples);
+ if (interframe_delay_ms) {
+ RTC_HISTOGRAM_COUNTS_SPARSE_10000(
+ uma_prefix + ".InterframeDelayInMs" + uma_suffix,
+ *interframe_delay_ms);
+ log_stream << uma_prefix << ".InterframeDelayInMs" << uma_suffix << " "
+ << *interframe_delay_ms << '\n';
+ }
+ absl::optional<int> interframe_delay_max_ms =
+ stats.interframe_delay_counter.Max();
+ if (interframe_delay_max_ms && interframe_delay_ms) {
+ RTC_HISTOGRAM_COUNTS_SPARSE_10000(
+ uma_prefix + ".InterframeDelayMaxInMs" + uma_suffix,
+ *interframe_delay_max_ms);
+ log_stream << uma_prefix << ".InterframeDelayMaxInMs" << uma_suffix << " "
+ << *interframe_delay_max_ms << '\n';
+ }
+
+ absl::optional<uint32_t> interframe_delay_95p_ms =
+ stats.interframe_delay_percentiles.GetPercentile(0.95f);
+ if (interframe_delay_95p_ms && interframe_delay_ms != -1) {
+ RTC_HISTOGRAM_COUNTS_SPARSE_10000(
+ uma_prefix + ".InterframeDelay95PercentileInMs" + uma_suffix,
+ *interframe_delay_95p_ms);
+ log_stream << uma_prefix << ".InterframeDelay95PercentileInMs"
+ << uma_suffix << " " << *interframe_delay_95p_ms << '\n';
+ }
+
+ absl::optional<int> width = stats.received_width.Avg(kMinRequiredSamples);
+ if (width) {
+ RTC_HISTOGRAM_COUNTS_SPARSE_10000(
+ uma_prefix + ".ReceivedWidthInPixels" + uma_suffix, *width);
+ log_stream << uma_prefix << ".ReceivedWidthInPixels" << uma_suffix << " "
+ << *width << '\n';
+ }
+
+ absl::optional<int> height = stats.received_height.Avg(kMinRequiredSamples);
+ if (height) {
+ RTC_HISTOGRAM_COUNTS_SPARSE_10000(
+ uma_prefix + ".ReceivedHeightInPixels" + uma_suffix, *height);
+ log_stream << uma_prefix << ".ReceivedHeightInPixels" << uma_suffix << " "
+ << *height << '\n';
+ }
+
+ if (content_type != VideoContentType::UNSPECIFIED) {
+ // Don't report these 3 metrics unsliced, as more precise variants
+ // are reported separately in this method.
+ float flow_duration_sec = stats.flow_duration_ms / 1000.0;
+ if (flow_duration_sec >= metrics::kMinRunTimeInSeconds) {
+ int media_bitrate_kbps = static_cast<int>(stats.total_media_bytes * 8 /
+ flow_duration_sec / 1000);
+ RTC_HISTOGRAM_COUNTS_SPARSE_10000(
+ uma_prefix + ".MediaBitrateReceivedInKbps" + uma_suffix,
+ media_bitrate_kbps);
+ log_stream << uma_prefix << ".MediaBitrateReceivedInKbps" << uma_suffix
+ << " " << media_bitrate_kbps << '\n';
+ }
+
+ int num_total_frames =
+ stats.frame_counts.key_frames + stats.frame_counts.delta_frames;
+ if (num_total_frames >= kMinRequiredSamples) {
+ int num_key_frames = stats.frame_counts.key_frames;
+ int key_frames_permille =
+ (num_key_frames * 1000 + num_total_frames / 2) / num_total_frames;
+ RTC_HISTOGRAM_COUNTS_SPARSE_1000(
+ uma_prefix + ".KeyFramesReceivedInPermille" + uma_suffix,
+ key_frames_permille);
+ log_stream << uma_prefix << ".KeyFramesReceivedInPermille" << uma_suffix
+ << " " << key_frames_permille << '\n';
+ }
+
+ absl::optional<int> qp = stats.qp_counter.Avg(kMinRequiredSamples);
+ if (qp) {
+ RTC_HISTOGRAM_COUNTS_SPARSE_200(
+ uma_prefix + ".Decoded.Vp8.Qp" + uma_suffix, *qp);
+ log_stream << uma_prefix << ".Decoded.Vp8.Qp" << uma_suffix << " "
+ << *qp << '\n';
+ }
+ }
+ }
+
+ StreamDataCounters rtp_rtx_stats = rtp_stats;
+ if (rtx_stats)
+ rtp_rtx_stats.Add(*rtx_stats);
+
+ int64_t elapsed_sec =
+ rtp_rtx_stats.TimeSinceFirstPacketInMs(clock_->TimeInMilliseconds()) /
+ 1000;
+ if (elapsed_sec >= metrics::kMinRunTimeInSeconds) {
+ RTC_HISTOGRAM_COUNTS_10000(
+ "WebRTC.Video.BitrateReceivedInKbps",
+ static_cast<int>(rtp_rtx_stats.transmitted.TotalBytes() * 8 /
+ elapsed_sec / 1000));
+ int media_bitrate_kbs = static_cast<int>(rtp_stats.MediaPayloadBytes() * 8 /
+ elapsed_sec / 1000);
+ RTC_HISTOGRAM_COUNTS_10000("WebRTC.Video.MediaBitrateReceivedInKbps",
+ media_bitrate_kbs);
+ log_stream << "WebRTC.Video.MediaBitrateReceivedInKbps "
+ << media_bitrate_kbs << '\n';
+ RTC_HISTOGRAM_COUNTS_10000(
+ "WebRTC.Video.PaddingBitrateReceivedInKbps",
+ static_cast<int>(rtp_rtx_stats.transmitted.padding_bytes * 8 /
+ elapsed_sec / 1000));
+ RTC_HISTOGRAM_COUNTS_10000(
+ "WebRTC.Video.RetransmittedBitrateReceivedInKbps",
+ static_cast<int>(rtp_rtx_stats.retransmitted.TotalBytes() * 8 /
+ elapsed_sec / 1000));
+ if (rtx_stats) {
+ RTC_HISTOGRAM_COUNTS_10000(
+ "WebRTC.Video.RtxBitrateReceivedInKbps",
+ static_cast<int>(rtx_stats->transmitted.TotalBytes() * 8 /
+ elapsed_sec / 1000));
+ }
+ const RtcpPacketTypeCounter& counters = stats_.rtcp_packet_type_counts;
+ RTC_HISTOGRAM_COUNTS_10000("WebRTC.Video.NackPacketsSentPerMinute",
+ counters.nack_packets * 60 / elapsed_sec);
+ RTC_HISTOGRAM_COUNTS_10000("WebRTC.Video.FirPacketsSentPerMinute",
+ counters.fir_packets * 60 / elapsed_sec);
+ RTC_HISTOGRAM_COUNTS_10000("WebRTC.Video.PliPacketsSentPerMinute",
+ counters.pli_packets * 60 / elapsed_sec);
+ if (counters.nack_requests > 0) {
+ RTC_HISTOGRAM_PERCENTAGE("WebRTC.Video.UniqueNackRequestsSentInPercent",
+ counters.UniqueNackRequestsInPercent());
+ }
+ }
+
+ if (num_certain_states_ >= kBadCallMinRequiredSamples) {
+ RTC_HISTOGRAM_PERCENTAGE("WebRTC.Video.BadCall.Any",
+ 100 * num_bad_states_ / num_certain_states_);
+ }
+ absl::optional<double> fps_fraction =
+ fps_threshold_.FractionHigh(kBadCallMinRequiredSamples);
+ if (fps_fraction) {
+ RTC_HISTOGRAM_PERCENTAGE("WebRTC.Video.BadCall.FrameRate",
+ static_cast<int>(100 * (1 - *fps_fraction)));
+ }
+ absl::optional<double> variance_fraction =
+ variance_threshold_.FractionHigh(kBadCallMinRequiredSamples);
+ if (variance_fraction) {
+ RTC_HISTOGRAM_PERCENTAGE("WebRTC.Video.BadCall.FrameRateVariance",
+ static_cast<int>(100 * *variance_fraction));
+ }
+ absl::optional<double> qp_fraction =
+ qp_threshold_.FractionHigh(kBadCallMinRequiredSamples);
+ if (qp_fraction) {
+ RTC_HISTOGRAM_PERCENTAGE("WebRTC.Video.BadCall.Qp",
+ static_cast<int>(100 * *qp_fraction));
+ }
+
+ RTC_LOG(LS_INFO) << log_stream.str();
+ video_quality_observer_->UpdateHistograms(
+ videocontenttypehelpers::IsScreenshare(last_content_type_));
+}
+
+void ReceiveStatisticsProxy::QualitySample(Timestamp now) {
+ RTC_DCHECK_RUN_ON(&main_thread_);
+
+ if (last_sample_time_ + kMinSampleLengthMs > now.ms())
+ return;
+
+ double fps =
+ render_fps_tracker_.ComputeRateForInterval(now.ms() - last_sample_time_);
+ absl::optional<int> qp = qp_sample_.Avg(1);
+
+ bool prev_fps_bad = !fps_threshold_.IsHigh().value_or(true);
+ bool prev_qp_bad = qp_threshold_.IsHigh().value_or(false);
+ bool prev_variance_bad = variance_threshold_.IsHigh().value_or(false);
+ bool prev_any_bad = prev_fps_bad || prev_qp_bad || prev_variance_bad;
+
+ fps_threshold_.AddMeasurement(static_cast<int>(fps));
+ if (qp)
+ qp_threshold_.AddMeasurement(*qp);
+ absl::optional<double> fps_variance_opt = fps_threshold_.CalculateVariance();
+ double fps_variance = fps_variance_opt.value_or(0);
+ if (fps_variance_opt) {
+ variance_threshold_.AddMeasurement(static_cast<int>(fps_variance));
+ }
+
+ bool fps_bad = !fps_threshold_.IsHigh().value_or(true);
+ bool qp_bad = qp_threshold_.IsHigh().value_or(false);
+ bool variance_bad = variance_threshold_.IsHigh().value_or(false);
+ bool any_bad = fps_bad || qp_bad || variance_bad;
+
+ if (!prev_any_bad && any_bad) {
+ RTC_LOG(LS_INFO) << "Bad call (any) start: " << now.ms();
+ } else if (prev_any_bad && !any_bad) {
+ RTC_LOG(LS_INFO) << "Bad call (any) end: " << now.ms();
+ }
+
+ if (!prev_fps_bad && fps_bad) {
+ RTC_LOG(LS_INFO) << "Bad call (fps) start: " << now.ms();
+ } else if (prev_fps_bad && !fps_bad) {
+ RTC_LOG(LS_INFO) << "Bad call (fps) end: " << now.ms();
+ }
+
+ if (!prev_qp_bad && qp_bad) {
+ RTC_LOG(LS_INFO) << "Bad call (qp) start: " << now.ms();
+ } else if (prev_qp_bad && !qp_bad) {
+ RTC_LOG(LS_INFO) << "Bad call (qp) end: " << now.ms();
+ }
+
+ if (!prev_variance_bad && variance_bad) {
+ RTC_LOG(LS_INFO) << "Bad call (variance) start: " << now.ms();
+ } else if (prev_variance_bad && !variance_bad) {
+ RTC_LOG(LS_INFO) << "Bad call (variance) end: " << now.ms();
+ }
+
+ RTC_LOG(LS_VERBOSE) << "SAMPLE: sample_length: "
+ << (now.ms() - last_sample_time_) << " fps: " << fps
+ << " fps_bad: " << fps_bad << " qp: " << qp.value_or(-1)
+ << " qp_bad: " << qp_bad
+ << " variance_bad: " << variance_bad
+ << " fps_variance: " << fps_variance;
+
+ last_sample_time_ = now.ms();
+ qp_sample_.Reset();
+
+ if (fps_threshold_.IsHigh() || variance_threshold_.IsHigh() ||
+ qp_threshold_.IsHigh()) {
+ if (any_bad)
+ ++num_bad_states_;
+ ++num_certain_states_;
+ }
+}
+
+void ReceiveStatisticsProxy::UpdateFramerate(int64_t now_ms) const {
+ RTC_DCHECK_RUN_ON(&main_thread_);
+
+ int64_t old_frames_ms = now_ms - kRateStatisticsWindowSizeMs;
+ while (!frame_window_.empty() &&
+ frame_window_.begin()->first < old_frames_ms) {
+ frame_window_.erase(frame_window_.begin());
+ }
+
+ size_t framerate =
+ (frame_window_.size() * 1000 + 500) / kRateStatisticsWindowSizeMs;
+
+ stats_.network_frame_rate = static_cast<int>(framerate);
+}
+
+absl::optional<int64_t>
+ReceiveStatisticsProxy::GetCurrentEstimatedPlayoutNtpTimestampMs(
+ int64_t now_ms) const {
+ RTC_DCHECK_RUN_ON(&main_thread_);
+ if (!last_estimated_playout_ntp_timestamp_ms_ ||
+ !last_estimated_playout_time_ms_) {
+ return absl::nullopt;
+ }
+ int64_t elapsed_ms = now_ms - *last_estimated_playout_time_ms_;
+ return *last_estimated_playout_ntp_timestamp_ms_ + elapsed_ms;
+}
+
+VideoReceiveStreamInterface::Stats ReceiveStatisticsProxy::GetStats() const {
+ RTC_DCHECK_RUN_ON(&main_thread_);
+
+ // Like VideoReceiveStreamInterface::GetStats, called on the worker thread
+ // from StatsCollector::ExtractMediaInfo via worker_thread()->BlockingCall().
+ // WebRtcVideoChannel::GetStats(), GetVideoReceiverInfo.
+
+ // Get current frame rates here, as only updating them on new frames prevents
+ // us from ever correctly displaying frame rate of 0.
+ int64_t now_ms = clock_->TimeInMilliseconds();
+ UpdateFramerate(now_ms);
+
+ stats_.render_frame_rate = renders_fps_estimator_.Rate(now_ms).value_or(0);
+ stats_.decode_frame_rate = decode_fps_estimator_.Rate(now_ms).value_or(0);
+
+ if (last_decoded_frame_time_ms_) {
+ // Avoid using a newer timestamp than might be pending for decoded frames.
+ // If we do use now_ms, we might roll the max window to a value that is
+ // higher than that of a decoded frame timestamp that we haven't yet
+ // captured the data for (i.e. pending call to OnDecodedFrame).
+ stats_.interframe_delay_max_ms =
+ interframe_delay_max_moving_.Max(*last_decoded_frame_time_ms_)
+ .value_or(-1);
+ } else {
+ // We're paused. Avoid changing the state of `interframe_delay_max_moving_`.
+ stats_.interframe_delay_max_ms = -1;
+ }
+
+ stats_.freeze_count = video_quality_observer_->NumFreezes();
+ stats_.pause_count = video_quality_observer_->NumPauses();
+ stats_.total_freezes_duration_ms =
+ video_quality_observer_->TotalFreezesDurationMs();
+ stats_.total_pauses_duration_ms =
+ video_quality_observer_->TotalPausesDurationMs();
+ stats_.total_inter_frame_delay =
+ static_cast<double>(video_quality_observer_->TotalFramesDurationMs()) /
+ rtc::kNumMillisecsPerSec;
+ stats_.total_squared_inter_frame_delay =
+ video_quality_observer_->SumSquaredFrameDurationsSec();
+
+ stats_.content_type = last_content_type_;
+ stats_.timing_frame_info = timing_frame_info_counter_.Max(now_ms);
+ stats_.jitter_buffer_delay_seconds =
+ static_cast<double>(current_delay_counter_.Sum(1).value_or(0)) /
+ rtc::kNumMillisecsPerSec;
+ stats_.jitter_buffer_emitted_count = current_delay_counter_.NumSamples();
+ stats_.estimated_playout_ntp_timestamp_ms =
+ GetCurrentEstimatedPlayoutNtpTimestampMs(now_ms);
+ return stats_;
+}
+
+void ReceiveStatisticsProxy::OnIncomingPayloadType(int payload_type) {
+ RTC_DCHECK_RUN_ON(&decode_queue_);
+ worker_thread_->PostTask(SafeTask(task_safety_.flag(), [payload_type, this] {
+ RTC_DCHECK_RUN_ON(&main_thread_);
+ stats_.current_payload_type = payload_type;
+ }));
+}
+
+void ReceiveStatisticsProxy::OnDecoderInfo(
+ const VideoDecoder::DecoderInfo& decoder_info) {
+ RTC_DCHECK_RUN_ON(&decode_queue_);
+ worker_thread_->PostTask(SafeTask(
+ task_safety_.flag(),
+ [this, name = decoder_info.implementation_name,
+ is_hardware_accelerated = decoder_info.is_hardware_accelerated]() {
+ RTC_DCHECK_RUN_ON(&main_thread_);
+ stats_.decoder_implementation_name = name;
+ stats_.power_efficient_decoder = is_hardware_accelerated;
+ }));
+}
+
+void ReceiveStatisticsProxy::OnFrameBufferTimingsUpdated(
+ int max_decode_ms,
+ int current_delay_ms,
+ int target_delay_ms,
+ int jitter_buffer_ms,
+ int min_playout_delay_ms,
+ int render_delay_ms) {
+ RTC_DCHECK_RUN_ON(&main_thread_);
+ stats_.max_decode_ms = max_decode_ms;
+ stats_.current_delay_ms = current_delay_ms;
+ stats_.target_delay_ms = target_delay_ms;
+ stats_.jitter_buffer_ms = jitter_buffer_ms;
+ stats_.min_playout_delay_ms = min_playout_delay_ms;
+ stats_.render_delay_ms = render_delay_ms;
+ jitter_buffer_delay_counter_.Add(jitter_buffer_ms);
+ target_delay_counter_.Add(target_delay_ms);
+ current_delay_counter_.Add(current_delay_ms);
+ // Network delay (rtt/2) + target_delay_ms (jitter delay + decode time +
+ // render delay).
+ delay_counter_.Add(target_delay_ms + avg_rtt_ms_ / 2);
+}
+
+void ReceiveStatisticsProxy::OnUniqueFramesCounted(int num_unique_frames) {
+ RTC_DCHECK_RUN_ON(&main_thread_);
+ num_unique_frames_.emplace(num_unique_frames);
+}
+
+void ReceiveStatisticsProxy::OnTimingFrameInfoUpdated(
+ const TimingFrameInfo& info) {
+ RTC_DCHECK_RUN_ON(&main_thread_);
+ if (info.flags != VideoSendTiming::kInvalid) {
+ int64_t now_ms = clock_->TimeInMilliseconds();
+ timing_frame_info_counter_.Add(info, now_ms);
+ }
+
+ // Measure initial decoding latency between the first frame arriving and
+ // the first frame being decoded.
+ if (!first_frame_received_time_ms_.has_value()) {
+ first_frame_received_time_ms_ = info.receive_finish_ms;
+ }
+ if (stats_.first_frame_received_to_decoded_ms == -1 &&
+ first_decoded_frame_time_ms_) {
+ stats_.first_frame_received_to_decoded_ms =
+ *first_decoded_frame_time_ms_ - *first_frame_received_time_ms_;
+ }
+}
+
+void ReceiveStatisticsProxy::RtcpPacketTypesCounterUpdated(
+ uint32_t ssrc,
+ const RtcpPacketTypeCounter& packet_counter) {
+ if (ssrc != remote_ssrc_)
+ return;
+
+ if (!IsCurrentTaskQueueOrThread(worker_thread_)) {
+ // RtpRtcpInterface::Configuration has a single
+ // RtcpPacketTypeCounterObserver and that same configuration may be used for
+ // both receiver and sender (see ModuleRtpRtcpImpl::ModuleRtpRtcpImpl). The
+ // RTCPSender implementation currently makes calls to this function on a
+ // process thread whereas the RTCPReceiver implementation calls back on the
+ // [main] worker thread.
+ // So until the sender implementation has been updated, we work around this
+ // here by posting the update to the expected thread. We make a by value
+ // copy of the `task_safety_` to handle the case if the queued task
+ // runs after the `ReceiveStatisticsProxy` has been deleted. In such a
+ // case the packet_counter update won't be recorded.
+ worker_thread_->PostTask(
+ SafeTask(task_safety_.flag(), [ssrc, packet_counter, this]() {
+ RtcpPacketTypesCounterUpdated(ssrc, packet_counter);
+ }));
+ return;
+ }
+
+ RTC_DCHECK_RUN_ON(&main_thread_);
+ stats_.rtcp_packet_type_counts = packet_counter;
+}
+
+void ReceiveStatisticsProxy::OnCname(uint32_t ssrc, absl::string_view cname) {
+ RTC_DCHECK_RUN_ON(&main_thread_);
+ // TODO(pbos): Handle both local and remote ssrcs here and RTC_DCHECK that we
+ // receive stats from one of them.
+ if (remote_ssrc_ != ssrc)
+ return;
+
+ stats_.c_name = std::string(cname);
+}
+
+void ReceiveStatisticsProxy::OnDecodedFrame(const VideoFrame& frame,
+ absl::optional<uint8_t> qp,
+ TimeDelta decode_time,
+ VideoContentType content_type) {
+ TimeDelta processing_delay = TimeDelta::Zero();
+ webrtc::Timestamp current_time = clock_->CurrentTime();
+ // TODO(bugs.webrtc.org/13984): some tests do not fill packet_infos().
+ TimeDelta assembly_time = TimeDelta::Zero();
+ if (frame.packet_infos().size() > 0) {
+ const auto [first_packet, last_packet] = std::minmax_element(
+ frame.packet_infos().cbegin(), frame.packet_infos().cend(),
+ [](const webrtc::RtpPacketInfo& a, const webrtc::RtpPacketInfo& b) {
+ return a.receive_time() < b.receive_time();
+ });
+ if (first_packet->receive_time().IsFinite()) {
+ processing_delay = current_time - first_packet->receive_time();
+ // Extract frame assembly time (i.e. time between earliest and latest
+ // packet arrival). Note: for single-packet frames this will be 0.
+ assembly_time =
+ last_packet->receive_time() - first_packet->receive_time();
+ }
+ }
+ // See VCMDecodedFrameCallback::Decoded for more info on what thread/queue we
+ // may be on. E.g. on iOS this gets called on
+ // "com.apple.coremedia.decompressionsession.clientcallback"
+ VideoFrameMetaData meta(frame, current_time);
+ worker_thread_->PostTask(
+ SafeTask(task_safety_.flag(), [meta, qp, decode_time, processing_delay,
+ assembly_time, content_type, this]() {
+ OnDecodedFrame(meta, qp, decode_time, processing_delay, assembly_time,
+ content_type);
+ }));
+}
+
+void ReceiveStatisticsProxy::OnDecodedFrame(
+ const VideoFrameMetaData& frame_meta,
+ absl::optional<uint8_t> qp,
+ TimeDelta decode_time,
+ TimeDelta processing_delay,
+ TimeDelta assembly_time,
+ VideoContentType content_type) {
+ RTC_DCHECK_RUN_ON(&main_thread_);
+
+ const bool is_screenshare =
+ videocontenttypehelpers::IsScreenshare(content_type);
+ const bool was_screenshare =
+ videocontenttypehelpers::IsScreenshare(last_content_type_);
+
+ if (is_screenshare != was_screenshare) {
+ // Reset the quality observer if content type is switched. But first report
+ // stats for the previous part of the call.
+ video_quality_observer_->UpdateHistograms(was_screenshare);
+ video_quality_observer_.reset(new VideoQualityObserver());
+ }
+
+ video_quality_observer_->OnDecodedFrame(frame_meta.rtp_timestamp, qp,
+ last_codec_type_);
+
+ ContentSpecificStats* content_specific_stats =
+ &content_specific_stats_[content_type];
+
+ ++stats_.frames_decoded;
+ if (qp) {
+ if (!stats_.qp_sum) {
+ if (stats_.frames_decoded != 1) {
+ RTC_LOG(LS_WARNING)
+ << "Frames decoded was not 1 when first qp value was received.";
+ }
+ stats_.qp_sum = 0;
+ }
+ *stats_.qp_sum += *qp;
+ content_specific_stats->qp_counter.Add(*qp);
+ } else if (stats_.qp_sum) {
+ RTC_LOG(LS_WARNING)
+ << "QP sum was already set and no QP was given for a frame.";
+ stats_.qp_sum.reset();
+ }
+ decode_time_counter_.Add(decode_time.ms());
+ stats_.decode_ms = decode_time.ms();
+ stats_.total_decode_time += decode_time;
+ stats_.total_processing_delay += processing_delay;
+ stats_.total_assembly_time += assembly_time;
+ if (!assembly_time.IsZero()) {
+ ++stats_.frames_assembled_from_multiple_packets;
+ }
+
+ last_content_type_ = content_type;
+ decode_fps_estimator_.Update(1, frame_meta.decode_timestamp.ms());
+
+ if (last_decoded_frame_time_ms_) {
+ int64_t interframe_delay_ms =
+ frame_meta.decode_timestamp.ms() - *last_decoded_frame_time_ms_;
+ RTC_DCHECK_GE(interframe_delay_ms, 0);
+ interframe_delay_max_moving_.Add(interframe_delay_ms,
+ frame_meta.decode_timestamp.ms());
+ content_specific_stats->interframe_delay_counter.Add(interframe_delay_ms);
+ content_specific_stats->interframe_delay_percentiles.Add(
+ interframe_delay_ms);
+ content_specific_stats->flow_duration_ms += interframe_delay_ms;
+ }
+ if (stats_.frames_decoded == 1) {
+ first_decoded_frame_time_ms_.emplace(frame_meta.decode_timestamp.ms());
+ }
+ last_decoded_frame_time_ms_.emplace(frame_meta.decode_timestamp.ms());
+}
+
+void ReceiveStatisticsProxy::OnRenderedFrame(
+ const VideoFrameMetaData& frame_meta) {
+ RTC_DCHECK_RUN_ON(&main_thread_);
+ // Called from VideoReceiveStream2::OnFrame.
+
+ RTC_DCHECK_GT(frame_meta.width, 0);
+ RTC_DCHECK_GT(frame_meta.height, 0);
+
+ video_quality_observer_->OnRenderedFrame(frame_meta);
+
+ ContentSpecificStats* content_specific_stats =
+ &content_specific_stats_[last_content_type_];
+ renders_fps_estimator_.Update(1, frame_meta.decode_timestamp.ms());
+
+ ++stats_.frames_rendered;
+ stats_.width = frame_meta.width;
+ stats_.height = frame_meta.height;
+
+ render_fps_tracker_.AddSamples(1);
+ render_pixel_tracker_.AddSamples(sqrt(frame_meta.width * frame_meta.height));
+ content_specific_stats->received_width.Add(frame_meta.width);
+ content_specific_stats->received_height.Add(frame_meta.height);
+
+ // Consider taking stats_.render_delay_ms into account.
+ const int64_t time_until_rendering_ms =
+ frame_meta.render_time_ms() - frame_meta.decode_timestamp.ms();
+ if (time_until_rendering_ms < 0) {
+ sum_missed_render_deadline_ms_ += -time_until_rendering_ms;
+ ++num_delayed_frames_rendered_;
+ }
+
+ if (frame_meta.ntp_time_ms > 0) {
+ int64_t delay_ms =
+ clock_->CurrentNtpInMilliseconds() - frame_meta.ntp_time_ms;
+ if (delay_ms >= 0) {
+ content_specific_stats->e2e_delay_counter.Add(delay_ms);
+ }
+ }
+
+ QualitySample(frame_meta.decode_timestamp);
+}
+
+void ReceiveStatisticsProxy::OnSyncOffsetUpdated(int64_t video_playout_ntp_ms,
+ int64_t sync_offset_ms,
+ double estimated_freq_khz) {
+ RTC_DCHECK_RUN_ON(&main_thread_);
+
+ const int64_t now_ms = clock_->TimeInMilliseconds();
+ sync_offset_counter_.Add(std::abs(sync_offset_ms));
+ stats_.sync_offset_ms = sync_offset_ms;
+ last_estimated_playout_ntp_timestamp_ms_ = video_playout_ntp_ms;
+ last_estimated_playout_time_ms_ = now_ms;
+
+ const double kMaxFreqKhz = 10000.0;
+ int offset_khz = kMaxFreqKhz;
+ // Should not be zero or negative. If so, report max.
+ if (estimated_freq_khz < kMaxFreqKhz && estimated_freq_khz > 0.0)
+ offset_khz = static_cast<int>(std::fabs(estimated_freq_khz - 90.0) + 0.5);
+
+ freq_offset_counter_.Add(offset_khz);
+}
+
+void ReceiveStatisticsProxy::OnCompleteFrame(bool is_keyframe,
+ size_t size_bytes,
+ VideoContentType content_type) {
+ RTC_DCHECK_RUN_ON(&main_thread_);
+
+ TRACE_EVENT2("webrtc", "ReceiveStatisticsProxy::OnCompleteFrame",
+ "remote_ssrc", remote_ssrc_, "is_keyframe", is_keyframe);
+
+ if (is_keyframe) {
+ ++stats_.frame_counts.key_frames;
+ } else {
+ ++stats_.frame_counts.delta_frames;
+ }
+
+ // Content type extension is set only for keyframes and should be propagated
+ // for all the following delta frames. Here we may receive frames out of order
+ // and miscategorise some delta frames near the layer switch.
+ // This may slightly offset calculated bitrate and keyframes permille metrics.
+ VideoContentType propagated_content_type =
+ is_keyframe ? content_type : last_content_type_;
+
+ ContentSpecificStats* content_specific_stats =
+ &content_specific_stats_[propagated_content_type];
+
+ content_specific_stats->total_media_bytes += size_bytes;
+ if (is_keyframe) {
+ ++content_specific_stats->frame_counts.key_frames;
+ } else {
+ ++content_specific_stats->frame_counts.delta_frames;
+ }
+
+ int64_t now_ms = clock_->TimeInMilliseconds();
+ frame_window_.insert(std::make_pair(now_ms, size_bytes));
+ UpdateFramerate(now_ms);
+}
+
+void ReceiveStatisticsProxy::OnDroppedFrames(uint32_t frames_dropped) {
+ // Can be called on either the decode queue or the worker thread
+ // See FrameBuffer2 for more details.
+ TRACE_EVENT2("webrtc", "ReceiveStatisticsProxy::OnDroppedFrames",
+ "remote_ssrc", remote_ssrc_, "frames_dropped", frames_dropped);
+ worker_thread_->PostTask(
+ SafeTask(task_safety_.flag(), [frames_dropped, this]() {
+ RTC_DCHECK_RUN_ON(&main_thread_);
+ stats_.frames_dropped += frames_dropped;
+ }));
+}
+
+void ReceiveStatisticsProxy::OnDiscardedPackets(uint32_t packets_discarded) {
+ // Can be called on either the decode queue or the worker thread
+ // See FrameBuffer2 for more details.
+ TRACE_EVENT2("webrtc", "ReceiveStatisticsProxy::OnDiscardedPackets",
+ "remote_ssrc", remote_ssrc_, "packets_discarded",
+ packets_discarded);
+ worker_thread_->PostTask(
+ SafeTask(task_safety_.flag(), [packets_discarded, this]() {
+ RTC_DCHECK_RUN_ON(&main_thread_);
+ stats_.packets_discarded += packets_discarded;
+ }));
+}
+
+void ReceiveStatisticsProxy::OnPreDecode(VideoCodecType codec_type, int qp) {
+ RTC_DCHECK_RUN_ON(&main_thread_);
+ last_codec_type_ = codec_type;
+ if (last_codec_type_ == kVideoCodecVP8 && qp != -1) {
+ qp_counters_.vp8.Add(qp);
+ qp_sample_.Add(qp);
+ }
+}
+
+void ReceiveStatisticsProxy::OnStreamInactive() {
+ RTC_DCHECK_RUN_ON(&main_thread_);
+
+ // TODO(sprang): Figure out any other state that should be reset.
+
+ // Don't report inter-frame delay if stream was paused.
+ last_decoded_frame_time_ms_.reset();
+
+ video_quality_observer_->OnStreamInactive();
+}
+
+void ReceiveStatisticsProxy::OnRttUpdate(int64_t avg_rtt_ms) {
+ RTC_DCHECK_RUN_ON(&main_thread_);
+ TRACE_EVENT2("webrtc", "ReceiveStatisticsProxy::OnRttUpdate",
+ "remote_ssrc", remote_ssrc_, "avg_rtt_ms", avg_rtt_ms);
+ avg_rtt_ms_ = avg_rtt_ms;
+}
+
+void ReceiveStatisticsProxy::DecoderThreadStarting() {
+ RTC_DCHECK_RUN_ON(&main_thread_);
+}
+
+void ReceiveStatisticsProxy::DecoderThreadStopped() {
+ RTC_DCHECK_RUN_ON(&main_thread_);
+ decode_queue_.Detach();
+}
+
+ReceiveStatisticsProxy::ContentSpecificStats::ContentSpecificStats()
+ : interframe_delay_percentiles(kMaxCommonInterframeDelayMs) {}
+
+ReceiveStatisticsProxy::ContentSpecificStats::~ContentSpecificStats() = default;
+
+void ReceiveStatisticsProxy::ContentSpecificStats::Add(
+ const ContentSpecificStats& other) {
+ e2e_delay_counter.Add(other.e2e_delay_counter);
+ interframe_delay_counter.Add(other.interframe_delay_counter);
+ flow_duration_ms += other.flow_duration_ms;
+ total_media_bytes += other.total_media_bytes;
+ received_height.Add(other.received_height);
+ received_width.Add(other.received_width);
+ qp_counter.Add(other.qp_counter);
+ frame_counts.key_frames += other.frame_counts.key_frames;
+ frame_counts.delta_frames += other.frame_counts.delta_frames;
+ interframe_delay_percentiles.Add(other.interframe_delay_percentiles);
+}
+
+} // namespace internal
+} // namespace webrtc
diff --git a/third_party/libwebrtc/video/receive_statistics_proxy2.h b/third_party/libwebrtc/video/receive_statistics_proxy2.h
new file mode 100644
index 0000000000..20139b45e5
--- /dev/null
+++ b/third_party/libwebrtc/video/receive_statistics_proxy2.h
@@ -0,0 +1,223 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VIDEO_RECEIVE_STATISTICS_PROXY2_H_
+#define VIDEO_RECEIVE_STATISTICS_PROXY2_H_
+
+#include <map>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "absl/types/optional.h"
+#include "api/sequence_checker.h"
+#include "api/task_queue/pending_task_safety_flag.h"
+#include "api/task_queue/task_queue_base.h"
+#include "api/units/timestamp.h"
+#include "api/video_codecs/video_decoder.h"
+#include "call/video_receive_stream.h"
+#include "modules/include/module_common_types.h"
+#include "modules/video_coding/include/video_coding_defines.h"
+#include "rtc_base/numerics/histogram_percentile_counter.h"
+#include "rtc_base/numerics/moving_max_counter.h"
+#include "rtc_base/numerics/sample_counter.h"
+#include "rtc_base/rate_statistics.h"
+#include "rtc_base/rate_tracker.h"
+#include "rtc_base/system/no_unique_address.h"
+#include "rtc_base/thread_annotations.h"
+#include "video/quality_threshold.h"
+#include "video/stats_counter.h"
+#include "video/video_quality_observer2.h"
+
+namespace webrtc {
+
+class Clock;
+struct CodecSpecificInfo;
+
+namespace internal {
+// Declared in video_receive_stream2.h.
+struct VideoFrameMetaData;
+
+class ReceiveStatisticsProxy : public VCMReceiveStatisticsCallback,
+ public RtcpCnameCallback,
+ public RtcpPacketTypeCounterObserver {
+ public:
+ ReceiveStatisticsProxy(uint32_t remote_ssrc,
+ Clock* clock,
+ TaskQueueBase* worker_thread);
+ ~ReceiveStatisticsProxy() override;
+
+ VideoReceiveStreamInterface::Stats GetStats() const;
+
+ void OnDecodedFrame(const VideoFrame& frame,
+ absl::optional<uint8_t> qp,
+ TimeDelta decode_time,
+ VideoContentType content_type);
+
+ // Called asyncronously on the worker thread as a result of a call to the
+ // above OnDecodedFrame method, which is called back on the thread where
+ // the actual decoding happens.
+ void OnDecodedFrame(const VideoFrameMetaData& frame_meta,
+ absl::optional<uint8_t> qp,
+ TimeDelta decode_time,
+ TimeDelta processing_delay,
+ TimeDelta assembly_time,
+ VideoContentType content_type);
+
+ void OnSyncOffsetUpdated(int64_t video_playout_ntp_ms,
+ int64_t sync_offset_ms,
+ double estimated_freq_khz);
+ void OnRenderedFrame(const VideoFrameMetaData& frame_meta);
+ void OnIncomingPayloadType(int payload_type);
+ void OnDecoderInfo(const VideoDecoder::DecoderInfo& decoder_info);
+
+ void OnPreDecode(VideoCodecType codec_type, int qp);
+
+ void OnUniqueFramesCounted(int num_unique_frames);
+
+ // Indicates video stream has been paused (no incoming packets).
+ void OnStreamInactive();
+
+ // Overrides VCMReceiveStatisticsCallback.
+ void OnCompleteFrame(bool is_keyframe,
+ size_t size_bytes,
+ VideoContentType content_type) override;
+ void OnDroppedFrames(uint32_t frames_dropped) override;
+ void OnDiscardedPackets(uint32_t packets_discarded) override;
+ void OnFrameBufferTimingsUpdated(int max_decode_ms,
+ int current_delay_ms,
+ int target_delay_ms,
+ int jitter_buffer_ms,
+ int min_playout_delay_ms,
+ int render_delay_ms) override;
+
+ void OnTimingFrameInfoUpdated(const TimingFrameInfo& info) override;
+
+ // Overrides RtcpCnameCallback.
+ void OnCname(uint32_t ssrc, absl::string_view cname) override;
+
+ // Overrides RtcpPacketTypeCounterObserver.
+ void RtcpPacketTypesCounterUpdated(
+ uint32_t ssrc,
+ const RtcpPacketTypeCounter& packet_counter) override;
+
+ void OnRttUpdate(int64_t avg_rtt_ms);
+
+ // Notification methods that are used to check our internal state and validate
+ // threading assumptions. These are called by VideoReceiveStreamInterface.
+ void DecoderThreadStarting();
+ void DecoderThreadStopped();
+
+ // Produce histograms. Must be called after DecoderThreadStopped(), typically
+ // at the end of the call.
+ void UpdateHistograms(absl::optional<int> fraction_lost,
+ const StreamDataCounters& rtp_stats,
+ const StreamDataCounters* rtx_stats);
+
+ private:
+ struct QpCounters {
+ rtc::SampleCounter vp8;
+ };
+
+ struct ContentSpecificStats {
+ ContentSpecificStats();
+ ~ContentSpecificStats();
+
+ void Add(const ContentSpecificStats& other);
+
+ rtc::SampleCounter e2e_delay_counter;
+ rtc::SampleCounter interframe_delay_counter;
+ int64_t flow_duration_ms = 0;
+ int64_t total_media_bytes = 0;
+ rtc::SampleCounter received_width;
+ rtc::SampleCounter received_height;
+ rtc::SampleCounter qp_counter;
+ FrameCounts frame_counts;
+ rtc::HistogramPercentileCounter interframe_delay_percentiles;
+ };
+
+ void QualitySample(Timestamp now);
+
+ // Removes info about old frames and then updates the framerate.
+ void UpdateFramerate(int64_t now_ms) const;
+
+ absl::optional<int64_t> GetCurrentEstimatedPlayoutNtpTimestampMs(
+ int64_t now_ms) const;
+
+ Clock* const clock_;
+ const int64_t start_ms_;
+
+ int64_t last_sample_time_ RTC_GUARDED_BY(main_thread_);
+
+ QualityThreshold fps_threshold_ RTC_GUARDED_BY(main_thread_);
+ QualityThreshold qp_threshold_ RTC_GUARDED_BY(main_thread_);
+ QualityThreshold variance_threshold_ RTC_GUARDED_BY(main_thread_);
+ rtc::SampleCounter qp_sample_ RTC_GUARDED_BY(main_thread_);
+ int num_bad_states_ RTC_GUARDED_BY(main_thread_);
+ int num_certain_states_ RTC_GUARDED_BY(main_thread_);
+ // Note: The `stats_.rtp_stats` member is not used or populated by this class.
+ mutable VideoReceiveStreamInterface::Stats stats_
+ RTC_GUARDED_BY(main_thread_);
+ // Same as stats_.ssrc, but const (no lock required).
+ const uint32_t remote_ssrc_;
+ RateStatistics decode_fps_estimator_ RTC_GUARDED_BY(main_thread_);
+ RateStatistics renders_fps_estimator_ RTC_GUARDED_BY(main_thread_);
+ rtc::RateTracker render_fps_tracker_ RTC_GUARDED_BY(main_thread_);
+ rtc::RateTracker render_pixel_tracker_ RTC_GUARDED_BY(main_thread_);
+ rtc::SampleCounter sync_offset_counter_ RTC_GUARDED_BY(main_thread_);
+ rtc::SampleCounter decode_time_counter_ RTC_GUARDED_BY(main_thread_);
+ rtc::SampleCounter jitter_buffer_delay_counter_ RTC_GUARDED_BY(main_thread_);
+ rtc::SampleCounter target_delay_counter_ RTC_GUARDED_BY(main_thread_);
+ rtc::SampleCounter current_delay_counter_ RTC_GUARDED_BY(main_thread_);
+ rtc::SampleCounter delay_counter_ RTC_GUARDED_BY(main_thread_);
+ std::unique_ptr<VideoQualityObserver> video_quality_observer_
+ RTC_GUARDED_BY(main_thread_);
+ mutable rtc::MovingMaxCounter<int> interframe_delay_max_moving_
+ RTC_GUARDED_BY(main_thread_);
+ std::map<VideoContentType, ContentSpecificStats> content_specific_stats_
+ RTC_GUARDED_BY(main_thread_);
+ MaxCounter freq_offset_counter_ RTC_GUARDED_BY(main_thread_);
+ QpCounters qp_counters_ RTC_GUARDED_BY(main_thread_);
+ int64_t avg_rtt_ms_ RTC_GUARDED_BY(main_thread_) = 0;
+ mutable std::map<int64_t, size_t> frame_window_ RTC_GUARDED_BY(main_thread_);
+ VideoContentType last_content_type_ RTC_GUARDED_BY(&main_thread_);
+ VideoCodecType last_codec_type_ RTC_GUARDED_BY(main_thread_);
+ absl::optional<int64_t> first_frame_received_time_ms_
+ RTC_GUARDED_BY(main_thread_);
+ absl::optional<int64_t> first_decoded_frame_time_ms_
+ RTC_GUARDED_BY(main_thread_);
+ absl::optional<int64_t> last_decoded_frame_time_ms_
+ RTC_GUARDED_BY(main_thread_);
+ size_t num_delayed_frames_rendered_ RTC_GUARDED_BY(main_thread_);
+ int64_t sum_missed_render_deadline_ms_ RTC_GUARDED_BY(main_thread_);
+ // Mutable because calling Max() on MovingMaxCounter is not const. Yet it is
+ // called from const GetStats().
+ mutable rtc::MovingMaxCounter<TimingFrameInfo> timing_frame_info_counter_
+ RTC_GUARDED_BY(main_thread_);
+ absl::optional<int> num_unique_frames_ RTC_GUARDED_BY(main_thread_);
+ absl::optional<int64_t> last_estimated_playout_ntp_timestamp_ms_
+ RTC_GUARDED_BY(main_thread_);
+ absl::optional<int64_t> last_estimated_playout_time_ms_
+ RTC_GUARDED_BY(main_thread_);
+
+ // The thread on which this instance is constructed and some of its main
+ // methods are invoked on such as GetStats().
+ TaskQueueBase* const worker_thread_;
+
+ ScopedTaskSafety task_safety_;
+
+ RTC_NO_UNIQUE_ADDRESS SequenceChecker decode_queue_;
+ SequenceChecker main_thread_;
+ RTC_NO_UNIQUE_ADDRESS SequenceChecker incoming_render_queue_;
+};
+
+} // namespace internal
+} // namespace webrtc
+#endif // VIDEO_RECEIVE_STATISTICS_PROXY2_H_
diff --git a/third_party/libwebrtc/video/receive_statistics_proxy2_unittest.cc b/third_party/libwebrtc/video/receive_statistics_proxy2_unittest.cc
new file mode 100644
index 0000000000..0c628f7b83
--- /dev/null
+++ b/third_party/libwebrtc/video/receive_statistics_proxy2_unittest.cc
@@ -0,0 +1,1818 @@
+/*
+ * Copyright 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video/receive_statistics_proxy2.h"
+
+#include <limits>
+#include <memory>
+#include <string>
+#include <tuple>
+#include <utility>
+
+#include "absl/types/optional.h"
+#include "api/scoped_refptr.h"
+#include "api/units/frequency.h"
+#include "api/units/time_delta.h"
+#include "api/video/i420_buffer.h"
+#include "api/video/video_frame.h"
+#include "api/video/video_frame_buffer.h"
+#include "api/video/video_rotation.h"
+#include "rtc_base/thread.h"
+#include "system_wrappers/include/metrics.h"
+#include "test/gtest.h"
+#include "test/scoped_key_value_config.h"
+#include "test/time_controller/simulated_time_controller.h"
+#include "video/video_receive_stream2.h"
+
+namespace webrtc {
+namespace internal {
+namespace {
+const TimeDelta kFreqOffsetProcessInterval = TimeDelta::Seconds(40);
+const uint32_t kRemoteSsrc = 456;
+const int kMinRequiredSamples = 200;
+const int kWidth = 1280;
+const int kHeight = 720;
+} // namespace
+
+// TODO(sakal): ReceiveStatisticsProxy is lacking unittesting.
+class ReceiveStatisticsProxy2Test : public ::testing::Test {
+ public:
+ ReceiveStatisticsProxy2Test() : time_controller_(Timestamp::Millis(1234)) {
+ metrics::Reset();
+ statistics_proxy_ = std::make_unique<ReceiveStatisticsProxy>(
+ kRemoteSsrc, time_controller_.GetClock(),
+ time_controller_.GetMainThread());
+ }
+
+ ~ReceiveStatisticsProxy2Test() override { statistics_proxy_.reset(); }
+
+ protected:
+ // Convenience method to avoid too many explict flushes.
+ VideoReceiveStreamInterface::Stats FlushAndGetStats() {
+ time_controller_.AdvanceTime(TimeDelta::Zero());
+ return statistics_proxy_->GetStats();
+ }
+
+ void FlushAndUpdateHistograms(absl::optional<int> fraction_lost,
+ const StreamDataCounters& rtp_stats,
+ const StreamDataCounters* rtx_stats) {
+ time_controller_.AdvanceTime(TimeDelta::Zero());
+ statistics_proxy_->UpdateHistograms(fraction_lost, rtp_stats, rtx_stats);
+ }
+
+ VideoFrame CreateFrame(int width, int height) {
+ return CreateVideoFrame(width, height, 0);
+ }
+
+ VideoFrame CreateFrameWithRenderTime(Timestamp render_time) {
+ return CreateFrameWithRenderTimeMs(render_time.ms());
+ }
+
+ VideoFrame CreateFrameWithRenderTimeMs(int64_t render_time_ms) {
+ return CreateVideoFrame(kWidth, kHeight, render_time_ms);
+ }
+
+ VideoFrame CreateVideoFrame(int width, int height, int64_t render_time_ms) {
+ VideoFrame frame =
+ VideoFrame::Builder()
+ .set_video_frame_buffer(I420Buffer::Create(width, height))
+ .set_timestamp_rtp(0)
+ .set_timestamp_ms(render_time_ms)
+ .set_rotation(kVideoRotation_0)
+ .build();
+ frame.set_ntp_time_ms(
+ time_controller_.GetClock()->CurrentNtpInMilliseconds());
+ return frame;
+ }
+
+ // Return the current fake time as a Timestamp.
+ Timestamp Now() { return time_controller_.GetClock()->CurrentTime(); }
+
+ // Creates a VideoFrameMetaData instance with a timestamp.
+ VideoFrameMetaData MetaData(const VideoFrame& frame, Timestamp ts) {
+ return VideoFrameMetaData(frame, ts);
+ }
+
+ // Creates a VideoFrameMetaData instance with the current fake time.
+ VideoFrameMetaData MetaData(const VideoFrame& frame) {
+ return VideoFrameMetaData(frame, Now());
+ }
+
+ test::ScopedKeyValueConfig field_trials_;
+ GlobalSimulatedTimeController time_controller_;
+ std::unique_ptr<ReceiveStatisticsProxy> statistics_proxy_;
+};
+
+TEST_F(ReceiveStatisticsProxy2Test, OnDecodedFrameIncreasesFramesDecoded) {
+ EXPECT_EQ(0u, statistics_proxy_->GetStats().frames_decoded);
+ webrtc::VideoFrame frame = CreateFrame(kWidth, kHeight);
+ for (uint32_t i = 1; i <= 3; ++i) {
+ statistics_proxy_->OnDecodedFrame(frame, absl::nullopt, TimeDelta::Zero(),
+ VideoContentType::UNSPECIFIED);
+ EXPECT_EQ(i, FlushAndGetStats().frames_decoded);
+ }
+}
+
+TEST_F(ReceiveStatisticsProxy2Test, DecodedFpsIsReported) {
+ const Frequency kFps = Frequency::Hertz(20);
+ const int kRequiredSamples =
+ TimeDelta::Seconds(metrics::kMinRunTimeInSeconds) * kFps;
+ webrtc::VideoFrame frame = CreateFrame(kWidth, kHeight);
+ for (int i = 0; i < kRequiredSamples; ++i) {
+ statistics_proxy_->OnDecodedFrame(frame, absl::nullopt, TimeDelta::Zero(),
+ VideoContentType::UNSPECIFIED);
+ time_controller_.AdvanceTime(1 / kFps);
+ }
+ FlushAndUpdateHistograms(absl::nullopt, StreamDataCounters(), nullptr);
+ EXPECT_METRIC_EQ(1,
+ metrics::NumSamples("WebRTC.Video.DecodedFramesPerSecond"));
+ EXPECT_METRIC_EQ(1, metrics::NumEvents("WebRTC.Video.DecodedFramesPerSecond",
+ kFps.hertz()));
+}
+
+TEST_F(ReceiveStatisticsProxy2Test, DecodedFpsIsNotReportedForTooFewSamples) {
+ const Frequency kFps = Frequency::Hertz(20);
+ const int kRequiredSamples =
+ TimeDelta::Seconds(metrics::kMinRunTimeInSeconds) * kFps;
+ webrtc::VideoFrame frame = CreateFrame(kWidth, kHeight);
+ for (int i = 0; i < kRequiredSamples - 1; ++i) {
+ statistics_proxy_->OnDecodedFrame(frame, absl::nullopt, TimeDelta::Zero(),
+ VideoContentType::UNSPECIFIED);
+ time_controller_.AdvanceTime(1 / kFps);
+ }
+ FlushAndUpdateHistograms(absl::nullopt, StreamDataCounters(), nullptr);
+ EXPECT_METRIC_EQ(0,
+ metrics::NumSamples("WebRTC.Video.DecodedFramesPerSecond"));
+}
+
+TEST_F(ReceiveStatisticsProxy2Test,
+ OnDecodedFrameWithQpDoesNotResetFramesDecodedOrTotalDecodeTime) {
+ EXPECT_EQ(0u, statistics_proxy_->GetStats().frames_decoded);
+ webrtc::VideoFrame frame = CreateFrame(kWidth, kHeight);
+ TimeDelta expected_total_decode_time = TimeDelta::Zero();
+ unsigned int expected_frames_decoded = 0;
+ for (uint32_t i = 1; i <= 3; ++i) {
+ statistics_proxy_->OnDecodedFrame(frame, absl::nullopt,
+ TimeDelta::Millis(1),
+ VideoContentType::UNSPECIFIED);
+ expected_total_decode_time += TimeDelta::Millis(1);
+ ++expected_frames_decoded;
+ time_controller_.AdvanceTime(TimeDelta::Zero());
+ EXPECT_EQ(expected_frames_decoded,
+ statistics_proxy_->GetStats().frames_decoded);
+ EXPECT_EQ(expected_total_decode_time,
+ statistics_proxy_->GetStats().total_decode_time);
+ }
+ statistics_proxy_->OnDecodedFrame(frame, 1u, TimeDelta::Millis(3),
+ VideoContentType::UNSPECIFIED);
+ ++expected_frames_decoded;
+ expected_total_decode_time += TimeDelta::Millis(3);
+ time_controller_.AdvanceTime(TimeDelta::Zero());
+ EXPECT_EQ(expected_frames_decoded,
+ statistics_proxy_->GetStats().frames_decoded);
+ EXPECT_EQ(expected_total_decode_time,
+ statistics_proxy_->GetStats().total_decode_time);
+}
+
+TEST_F(ReceiveStatisticsProxy2Test, OnDecodedFrameIncreasesProcessingDelay) {
+ const TimeDelta kProcessingDelay = TimeDelta::Millis(10);
+ EXPECT_EQ(0u, statistics_proxy_->GetStats().frames_decoded);
+ webrtc::VideoFrame frame = CreateFrame(kWidth, kHeight);
+ TimeDelta expected_total_processing_delay = TimeDelta::Zero();
+ unsigned int expected_frames_decoded = 0;
+ // We set receive time fixed and increase the clock by 10ms
+ // in the loop which will increase the processing delay by
+ // 10/20/30ms respectively.
+ RtpPacketInfos::vector_type packet_infos = {RtpPacketInfo(
+ /*ssrc=*/{}, /*csrcs=*/{}, /*rtp_timestamp=*/{}, /*receive_time=*/Now())};
+ frame.set_packet_infos(RtpPacketInfos(packet_infos));
+ for (int i = 1; i <= 3; ++i) {
+ time_controller_.AdvanceTime(kProcessingDelay);
+ statistics_proxy_->OnDecodedFrame(frame, absl::nullopt,
+ TimeDelta::Millis(1),
+ VideoContentType::UNSPECIFIED);
+ expected_total_processing_delay += i * kProcessingDelay;
+ ++expected_frames_decoded;
+ time_controller_.AdvanceTime(TimeDelta::Zero());
+ EXPECT_EQ(expected_frames_decoded,
+ statistics_proxy_->GetStats().frames_decoded);
+ EXPECT_EQ(expected_total_processing_delay,
+ statistics_proxy_->GetStats().total_processing_delay);
+ }
+ time_controller_.AdvanceTime(kProcessingDelay);
+ statistics_proxy_->OnDecodedFrame(frame, 1u, TimeDelta::Millis(3),
+ VideoContentType::UNSPECIFIED);
+ ++expected_frames_decoded;
+ expected_total_processing_delay += 4 * kProcessingDelay;
+ time_controller_.AdvanceTime(TimeDelta::Zero());
+ EXPECT_EQ(expected_frames_decoded,
+ statistics_proxy_->GetStats().frames_decoded);
+ EXPECT_EQ(expected_total_processing_delay,
+ statistics_proxy_->GetStats().total_processing_delay);
+}
+
+TEST_F(ReceiveStatisticsProxy2Test, OnDecodedFrameIncreasesAssemblyTime) {
+ const TimeDelta kAssemblyTime = TimeDelta::Millis(7);
+ EXPECT_EQ(0u, statistics_proxy_->GetStats().frames_decoded);
+ webrtc::VideoFrame frame = CreateFrame(kWidth, kHeight);
+ TimeDelta expected_total_assembly_time = TimeDelta::Zero();
+ unsigned int expected_frames_decoded = 0;
+ unsigned int expected_frames_assembled_from_multiple_packets = 0;
+
+ // A single-packet frame will not increase total assembly time
+ // and frames assembled.
+ RtpPacketInfos::vector_type single_packet_frame = {RtpPacketInfo(
+ /*ssrc=*/{}, /*csrcs=*/{}, /*rtp_timestamp=*/{}, /*receive_time=*/Now())};
+ frame.set_packet_infos(RtpPacketInfos(single_packet_frame));
+ statistics_proxy_->OnDecodedFrame(frame, absl::nullopt, TimeDelta::Millis(1),
+ VideoContentType::UNSPECIFIED);
+ ++expected_frames_decoded;
+ time_controller_.AdvanceTime(TimeDelta::Zero());
+ EXPECT_EQ(expected_total_assembly_time,
+ statistics_proxy_->GetStats().total_assembly_time);
+ EXPECT_EQ(
+ expected_frames_assembled_from_multiple_packets,
+ statistics_proxy_->GetStats().frames_assembled_from_multiple_packets);
+
+ // In an ordered frame the first and last packet matter.
+ RtpPacketInfos::vector_type ordered_frame = {
+ RtpPacketInfo(/*ssrc=*/{}, /*csrcs=*/{}, /*rtp_timestamp=*/{},
+ /*receive_time=*/Now()),
+ RtpPacketInfo(/*ssrc=*/{}, /*csrcs=*/{}, /*rtp_timestamp=*/{},
+ /*receive_time=*/Now() + kAssemblyTime),
+ RtpPacketInfo(/*ssrc=*/{}, /*csrcs=*/{}, /*rtp_timestamp=*/{},
+ /*receive_time=*/Now() + 2 * kAssemblyTime),
+ };
+ frame.set_packet_infos(RtpPacketInfos(ordered_frame));
+ statistics_proxy_->OnDecodedFrame(frame, 1u, TimeDelta::Millis(3),
+ VideoContentType::UNSPECIFIED);
+ ++expected_frames_decoded;
+ ++expected_frames_assembled_from_multiple_packets;
+ expected_total_assembly_time += 2 * kAssemblyTime;
+ time_controller_.AdvanceTime(TimeDelta::Zero());
+ EXPECT_EQ(expected_frames_decoded,
+ statistics_proxy_->GetStats().frames_decoded);
+ EXPECT_EQ(expected_total_assembly_time,
+ statistics_proxy_->GetStats().total_assembly_time);
+ EXPECT_EQ(
+ expected_frames_assembled_from_multiple_packets,
+ statistics_proxy_->GetStats().frames_assembled_from_multiple_packets);
+
+ // "First" and "last" are in receive time, not sequence number.
+ RtpPacketInfos::vector_type unordered_frame = {
+ RtpPacketInfo(/*ssrc=*/{}, /*csrcs=*/{}, /*rtp_timestamp=*/{},
+ /*receive_time=*/Now() + 2 * kAssemblyTime),
+ RtpPacketInfo(/*ssrc=*/{}, /*csrcs=*/{}, /*rtp_timestamp=*/{},
+ /*receive_time=*/Now()),
+ RtpPacketInfo(/*ssrc=*/{}, /*csrcs=*/{}, /*rtp_timestamp=*/{},
+ /*receive_time=*/Now() + kAssemblyTime),
+ };
+ frame.set_packet_infos(RtpPacketInfos(unordered_frame));
+ statistics_proxy_->OnDecodedFrame(frame, 1u, TimeDelta::Millis(3),
+ VideoContentType::UNSPECIFIED);
+ ++expected_frames_decoded;
+ ++expected_frames_assembled_from_multiple_packets;
+ expected_total_assembly_time += 2 * kAssemblyTime;
+ time_controller_.AdvanceTime(TimeDelta::Zero());
+ EXPECT_EQ(expected_frames_decoded,
+ statistics_proxy_->GetStats().frames_decoded);
+ EXPECT_EQ(expected_total_assembly_time,
+ statistics_proxy_->GetStats().total_assembly_time);
+ EXPECT_EQ(
+ expected_frames_assembled_from_multiple_packets,
+ statistics_proxy_->GetStats().frames_assembled_from_multiple_packets);
+}
+
+TEST_F(ReceiveStatisticsProxy2Test, OnDecodedFrameIncreasesQpSum) {
+ EXPECT_EQ(absl::nullopt, statistics_proxy_->GetStats().qp_sum);
+ webrtc::VideoFrame frame = CreateFrame(kWidth, kHeight);
+ statistics_proxy_->OnDecodedFrame(frame, 3u, TimeDelta::Zero(),
+ VideoContentType::UNSPECIFIED);
+ EXPECT_EQ(3u, FlushAndGetStats().qp_sum);
+ statistics_proxy_->OnDecodedFrame(frame, 127u, TimeDelta::Zero(),
+ VideoContentType::UNSPECIFIED);
+ EXPECT_EQ(130u, FlushAndGetStats().qp_sum);
+}
+
+TEST_F(ReceiveStatisticsProxy2Test, OnDecodedFrameIncreasesTotalDecodeTime) {
+ EXPECT_EQ(absl::nullopt, statistics_proxy_->GetStats().qp_sum);
+ webrtc::VideoFrame frame = CreateFrame(kWidth, kHeight);
+ statistics_proxy_->OnDecodedFrame(frame, 3u, TimeDelta::Millis(4),
+ VideoContentType::UNSPECIFIED);
+ EXPECT_EQ(4u, FlushAndGetStats().total_decode_time.ms());
+ statistics_proxy_->OnDecodedFrame(frame, 127u, TimeDelta::Millis(7),
+ VideoContentType::UNSPECIFIED);
+ EXPECT_EQ(11u, FlushAndGetStats().total_decode_time.ms());
+}
+
+TEST_F(ReceiveStatisticsProxy2Test, ReportsContentType) {
+ const std::string kRealtimeString("realtime");
+ const std::string kScreenshareString("screen");
+ webrtc::VideoFrame frame = CreateFrame(kWidth, kHeight);
+ EXPECT_EQ(kRealtimeString, videocontenttypehelpers::ToString(
+ statistics_proxy_->GetStats().content_type));
+ statistics_proxy_->OnDecodedFrame(frame, 3u, TimeDelta::Zero(),
+ VideoContentType::SCREENSHARE);
+ EXPECT_EQ(kScreenshareString,
+ videocontenttypehelpers::ToString(FlushAndGetStats().content_type));
+ statistics_proxy_->OnDecodedFrame(frame, 3u, TimeDelta::Zero(),
+ VideoContentType::UNSPECIFIED);
+ EXPECT_EQ(kRealtimeString,
+ videocontenttypehelpers::ToString(FlushAndGetStats().content_type));
+}
+
+TEST_F(ReceiveStatisticsProxy2Test, ReportsMaxInterframeDelay) {
+ webrtc::VideoFrame frame = CreateFrame(kWidth, kHeight);
+ const TimeDelta kInterframeDelay1 = TimeDelta::Millis(100);
+ const TimeDelta kInterframeDelay2 = TimeDelta::Millis(200);
+ const TimeDelta kInterframeDelay3 = TimeDelta::Millis(100);
+ EXPECT_EQ(-1, statistics_proxy_->GetStats().interframe_delay_max_ms);
+ statistics_proxy_->OnDecodedFrame(frame, absl::nullopt, TimeDelta::Zero(),
+ VideoContentType::UNSPECIFIED);
+ EXPECT_EQ(-1, FlushAndGetStats().interframe_delay_max_ms);
+
+ time_controller_.AdvanceTime(kInterframeDelay1);
+ statistics_proxy_->OnDecodedFrame(frame, absl::nullopt, TimeDelta::Zero(),
+ VideoContentType::UNSPECIFIED);
+ EXPECT_EQ(kInterframeDelay1.ms(), FlushAndGetStats().interframe_delay_max_ms);
+
+ time_controller_.AdvanceTime(kInterframeDelay2);
+ statistics_proxy_->OnDecodedFrame(frame, absl::nullopt, TimeDelta::Zero(),
+ VideoContentType::UNSPECIFIED);
+ EXPECT_EQ(kInterframeDelay2.ms(), FlushAndGetStats().interframe_delay_max_ms);
+
+ time_controller_.AdvanceTime(kInterframeDelay3);
+ statistics_proxy_->OnDecodedFrame(frame, absl::nullopt, TimeDelta::Zero(),
+ VideoContentType::UNSPECIFIED);
+ // kInterframeDelay3 is smaller than kInterframeDelay2.
+ EXPECT_EQ(kInterframeDelay2.ms(), FlushAndGetStats().interframe_delay_max_ms);
+}
+
+TEST_F(ReceiveStatisticsProxy2Test, ReportInterframeDelayInWindow) {
+ webrtc::VideoFrame frame = CreateFrame(kWidth, kHeight);
+ const TimeDelta kInterframeDelay1 = TimeDelta::Millis(900);
+ const TimeDelta kInterframeDelay2 = TimeDelta::Millis(750);
+ const TimeDelta kInterframeDelay3 = TimeDelta::Millis(700);
+ EXPECT_EQ(-1, statistics_proxy_->GetStats().interframe_delay_max_ms);
+ statistics_proxy_->OnDecodedFrame(frame, absl::nullopt, TimeDelta::Zero(),
+ VideoContentType::UNSPECIFIED);
+ EXPECT_EQ(-1, FlushAndGetStats().interframe_delay_max_ms);
+
+ time_controller_.AdvanceTime(kInterframeDelay1);
+ statistics_proxy_->OnDecodedFrame(frame, absl::nullopt, TimeDelta::Zero(),
+ VideoContentType::UNSPECIFIED);
+ EXPECT_EQ(kInterframeDelay1.ms(), FlushAndGetStats().interframe_delay_max_ms);
+
+ time_controller_.AdvanceTime(kInterframeDelay2);
+ statistics_proxy_->OnDecodedFrame(frame, absl::nullopt, TimeDelta::Zero(),
+ VideoContentType::UNSPECIFIED);
+ // Still first delay is the maximum
+ EXPECT_EQ(kInterframeDelay1.ms(), FlushAndGetStats().interframe_delay_max_ms);
+
+ time_controller_.AdvanceTime(kInterframeDelay3);
+ statistics_proxy_->OnDecodedFrame(frame, absl::nullopt, TimeDelta::Zero(),
+ VideoContentType::UNSPECIFIED);
+ // Now the first sample is out of the window, so the second is the maximum.
+ EXPECT_EQ(kInterframeDelay2.ms(), FlushAndGetStats().interframe_delay_max_ms);
+}
+
+TEST_F(ReceiveStatisticsProxy2Test, ReportsFreezeMetrics) {
+ const TimeDelta kFreezeDuration = TimeDelta::Seconds(1);
+
+ VideoReceiveStreamInterface::Stats stats = statistics_proxy_->GetStats();
+ EXPECT_EQ(0u, stats.freeze_count);
+ EXPECT_FALSE(stats.total_freezes_duration_ms);
+
+ webrtc::VideoFrame frame = CreateFrame(kWidth, kHeight);
+ for (size_t i = 0; i < VideoQualityObserver::kMinFrameSamplesToDetectFreeze;
+ ++i) {
+ time_controller_.AdvanceTime(TimeDelta::Millis(30));
+ statistics_proxy_->OnRenderedFrame(MetaData(frame));
+ }
+
+ // Freeze.
+ time_controller_.AdvanceTime(kFreezeDuration);
+ statistics_proxy_->OnRenderedFrame(MetaData(frame));
+
+ stats = statistics_proxy_->GetStats();
+ EXPECT_EQ(1u, stats.freeze_count);
+ EXPECT_EQ(kFreezeDuration.ms(), stats.total_freezes_duration_ms);
+}
+
+TEST_F(ReceiveStatisticsProxy2Test, ReportsPauseMetrics) {
+ VideoReceiveStreamInterface::Stats stats = statistics_proxy_->GetStats();
+ ASSERT_EQ(0u, stats.pause_count);
+ ASSERT_EQ(0u, stats.total_pauses_duration_ms);
+
+ webrtc::VideoFrame frame = CreateFrame(kWidth, kHeight);
+ statistics_proxy_->OnRenderedFrame(MetaData(frame));
+
+ // Pause.
+ time_controller_.AdvanceTime(TimeDelta::Millis(5432));
+ statistics_proxy_->OnStreamInactive();
+ statistics_proxy_->OnRenderedFrame(MetaData(frame));
+
+ stats = statistics_proxy_->GetStats();
+ EXPECT_EQ(1u, stats.pause_count);
+ EXPECT_EQ(5432u, stats.total_pauses_duration_ms);
+}
+
+TEST_F(ReceiveStatisticsProxy2Test, PauseBeforeFirstAndAfterLastFrameIgnored) {
+ VideoReceiveStreamInterface::Stats stats = statistics_proxy_->GetStats();
+ ASSERT_EQ(0u, stats.pause_count);
+ ASSERT_EQ(0u, stats.total_pauses_duration_ms);
+
+ webrtc::VideoFrame frame = CreateFrame(kWidth, kHeight);
+
+ // Pause -> Frame -> Pause
+ time_controller_.AdvanceTime(TimeDelta::Seconds(5));
+ statistics_proxy_->OnStreamInactive();
+ statistics_proxy_->OnRenderedFrame(MetaData(frame));
+
+ time_controller_.AdvanceTime(TimeDelta::Millis(30));
+ statistics_proxy_->OnRenderedFrame(MetaData(frame));
+
+ time_controller_.AdvanceTime(TimeDelta::Seconds(5));
+ statistics_proxy_->OnStreamInactive();
+
+ stats = statistics_proxy_->GetStats();
+ EXPECT_EQ(0u, stats.pause_count);
+ EXPECT_EQ(0u, stats.total_pauses_duration_ms);
+}
+
+TEST_F(ReceiveStatisticsProxy2Test, ReportsTotalInterFrameDelay) {
+ VideoReceiveStreamInterface::Stats stats = statistics_proxy_->GetStats();
+ ASSERT_EQ(0.0, stats.total_inter_frame_delay);
+
+ webrtc::VideoFrame frame = CreateFrame(kWidth, kHeight);
+
+ // Emulate delay before first frame is rendered. This is needed to ensure
+ // that frame duration only covers time since first frame is rendered and
+ // not the total time.
+ time_controller_.AdvanceTime(TimeDelta::Millis(5432));
+ for (int i = 0; i <= 10; ++i) {
+ time_controller_.AdvanceTime(TimeDelta::Millis(30));
+ statistics_proxy_->OnRenderedFrame(MetaData(frame));
+ }
+
+ stats = statistics_proxy_->GetStats();
+ EXPECT_EQ(10 * 30 / 1000.0, stats.total_inter_frame_delay);
+}
+
+TEST_F(ReceiveStatisticsProxy2Test, ReportsTotalSquaredInterFrameDelay) {
+ VideoReceiveStreamInterface::Stats stats = statistics_proxy_->GetStats();
+ ASSERT_EQ(0.0, stats.total_squared_inter_frame_delay);
+
+ webrtc::VideoFrame frame = CreateFrame(kWidth, kHeight);
+ for (int i = 0; i <= 10; ++i) {
+ time_controller_.AdvanceTime(TimeDelta::Millis(30));
+ statistics_proxy_->OnRenderedFrame(MetaData(frame));
+ }
+
+ stats = statistics_proxy_->GetStats();
+ const double kExpectedTotalSquaredInterFrameDelaySecs =
+ 10 * (30 / 1000.0 * 30 / 1000.0);
+ EXPECT_EQ(kExpectedTotalSquaredInterFrameDelaySecs,
+ stats.total_squared_inter_frame_delay);
+}
+
+TEST_F(ReceiveStatisticsProxy2Test, OnDecodedFrameWithoutQpQpSumWontExist) {
+ webrtc::VideoFrame frame = CreateFrame(kWidth, kHeight);
+ EXPECT_EQ(absl::nullopt, statistics_proxy_->GetStats().qp_sum);
+ statistics_proxy_->OnDecodedFrame(frame, absl::nullopt, TimeDelta::Zero(),
+ VideoContentType::UNSPECIFIED);
+ EXPECT_EQ(absl::nullopt, FlushAndGetStats().qp_sum);
+}
+
+TEST_F(ReceiveStatisticsProxy2Test, OnDecodedFrameWithoutQpResetsQpSum) {
+ webrtc::VideoFrame frame = CreateFrame(kWidth, kHeight);
+ EXPECT_EQ(absl::nullopt, statistics_proxy_->GetStats().qp_sum);
+ statistics_proxy_->OnDecodedFrame(frame, 3u, TimeDelta::Zero(),
+ VideoContentType::UNSPECIFIED);
+ EXPECT_EQ(3u, FlushAndGetStats().qp_sum);
+ statistics_proxy_->OnDecodedFrame(frame, absl::nullopt, TimeDelta::Zero(),
+ VideoContentType::UNSPECIFIED);
+ EXPECT_EQ(absl::nullopt, FlushAndGetStats().qp_sum);
+}
+
+TEST_F(ReceiveStatisticsProxy2Test, OnRenderedFrameIncreasesFramesRendered) {
+ EXPECT_EQ(0u, statistics_proxy_->GetStats().frames_rendered);
+ webrtc::VideoFrame frame = CreateFrame(kWidth, kHeight);
+ for (uint32_t i = 1; i <= 3; ++i) {
+ statistics_proxy_->OnRenderedFrame(MetaData(frame));
+ EXPECT_EQ(i, statistics_proxy_->GetStats().frames_rendered);
+ }
+}
+
+TEST_F(ReceiveStatisticsProxy2Test, GetStatsReportsSsrc) {
+ EXPECT_EQ(kRemoteSsrc, statistics_proxy_->GetStats().ssrc);
+}
+
+TEST_F(ReceiveStatisticsProxy2Test, GetStatsReportsIncomingPayloadType) {
+ const int kPayloadType = 111;
+ statistics_proxy_->OnIncomingPayloadType(kPayloadType);
+ time_controller_.AdvanceTime(TimeDelta::Zero());
+ EXPECT_EQ(kPayloadType, statistics_proxy_->GetStats().current_payload_type);
+}
+
+TEST_F(ReceiveStatisticsProxy2Test, GetStatsReportsDecoderInfo) {
+ auto init_stats = statistics_proxy_->GetStats();
+ EXPECT_EQ(init_stats.decoder_implementation_name, "unknown");
+ EXPECT_EQ(init_stats.power_efficient_decoder, absl::nullopt);
+
+ const VideoDecoder::DecoderInfo decoder_info{
+ .implementation_name = "decoderName", .is_hardware_accelerated = true};
+ statistics_proxy_->OnDecoderInfo(decoder_info);
+ time_controller_.AdvanceTime(TimeDelta::Zero());
+ auto stats = statistics_proxy_->GetStats();
+ EXPECT_EQ(decoder_info.implementation_name,
+ stats.decoder_implementation_name);
+ EXPECT_TRUE(stats.power_efficient_decoder);
+}
+
+TEST_F(ReceiveStatisticsProxy2Test, GetStatsReportsOnCompleteFrame) {
+ const int kFrameSizeBytes = 1000;
+ statistics_proxy_->OnCompleteFrame(true, kFrameSizeBytes,
+ VideoContentType::UNSPECIFIED);
+ VideoReceiveStreamInterface::Stats stats = statistics_proxy_->GetStats();
+ EXPECT_EQ(1, stats.network_frame_rate);
+ EXPECT_EQ(1, stats.frame_counts.key_frames);
+ EXPECT_EQ(0, stats.frame_counts.delta_frames);
+}
+
+TEST_F(ReceiveStatisticsProxy2Test, GetStatsReportsOnDroppedFrame) {
+ unsigned int dropped_frames = 0;
+ for (int i = 0; i < 10; ++i) {
+ statistics_proxy_->OnDroppedFrames(i);
+ dropped_frames += i;
+ }
+ VideoReceiveStreamInterface::Stats stats = FlushAndGetStats();
+ EXPECT_EQ(dropped_frames, stats.frames_dropped);
+}
+
+TEST_F(ReceiveStatisticsProxy2Test, GetStatsReportsDecodeTimingStats) {
+ const int kMaxDecodeMs = 2;
+ const int kCurrentDelayMs = 3;
+ const int kTargetDelayMs = 4;
+ const int kJitterBufferMs = 5;
+ const int kMinPlayoutDelayMs = 6;
+ const int kRenderDelayMs = 7;
+ const int64_t kRttMs = 8;
+ statistics_proxy_->OnRttUpdate(kRttMs);
+ statistics_proxy_->OnFrameBufferTimingsUpdated(
+ kMaxDecodeMs, kCurrentDelayMs, kTargetDelayMs, kJitterBufferMs,
+ kMinPlayoutDelayMs, kRenderDelayMs);
+ VideoReceiveStreamInterface::Stats stats = FlushAndGetStats();
+ EXPECT_EQ(kMaxDecodeMs, stats.max_decode_ms);
+ EXPECT_EQ(kCurrentDelayMs, stats.current_delay_ms);
+ EXPECT_EQ(kTargetDelayMs, stats.target_delay_ms);
+ EXPECT_EQ(kJitterBufferMs, stats.jitter_buffer_ms);
+ EXPECT_EQ(kMinPlayoutDelayMs, stats.min_playout_delay_ms);
+ EXPECT_EQ(kRenderDelayMs, stats.render_delay_ms);
+}
+
+TEST_F(ReceiveStatisticsProxy2Test, GetStatsReportsRtcpPacketTypeCounts) {
+ const uint32_t kFirPackets = 33;
+ const uint32_t kPliPackets = 44;
+ const uint32_t kNackPackets = 55;
+ RtcpPacketTypeCounter counter;
+ counter.fir_packets = kFirPackets;
+ counter.pli_packets = kPliPackets;
+ counter.nack_packets = kNackPackets;
+ statistics_proxy_->RtcpPacketTypesCounterUpdated(kRemoteSsrc, counter);
+ VideoReceiveStreamInterface::Stats stats = statistics_proxy_->GetStats();
+ EXPECT_EQ(kFirPackets, stats.rtcp_packet_type_counts.fir_packets);
+ EXPECT_EQ(kPliPackets, stats.rtcp_packet_type_counts.pli_packets);
+ EXPECT_EQ(kNackPackets, stats.rtcp_packet_type_counts.nack_packets);
+}
+
+TEST_F(ReceiveStatisticsProxy2Test,
+ GetStatsReportsNoRtcpPacketTypeCountsForUnknownSsrc) {
+ RtcpPacketTypeCounter counter;
+ counter.fir_packets = 33;
+ statistics_proxy_->RtcpPacketTypesCounterUpdated(kRemoteSsrc + 1, counter);
+ EXPECT_EQ(0u,
+ statistics_proxy_->GetStats().rtcp_packet_type_counts.fir_packets);
+}
+
+TEST_F(ReceiveStatisticsProxy2Test, GetStatsReportsFrameCounts) {
+ const int kKeyFrames = 3;
+ const int kDeltaFrames = 22;
+ for (int i = 0; i < kKeyFrames; i++) {
+ statistics_proxy_->OnCompleteFrame(true, 0, VideoContentType::UNSPECIFIED);
+ }
+ for (int i = 0; i < kDeltaFrames; i++) {
+ statistics_proxy_->OnCompleteFrame(false, 0, VideoContentType::UNSPECIFIED);
+ }
+
+ VideoReceiveStreamInterface::Stats stats = statistics_proxy_->GetStats();
+ EXPECT_EQ(kKeyFrames, stats.frame_counts.key_frames);
+ EXPECT_EQ(kDeltaFrames, stats.frame_counts.delta_frames);
+}
+
+TEST_F(ReceiveStatisticsProxy2Test, GetStatsReportsCName) {
+ const char* kName = "cName";
+ statistics_proxy_->OnCname(kRemoteSsrc, kName);
+ EXPECT_STREQ(kName, statistics_proxy_->GetStats().c_name.c_str());
+}
+
+TEST_F(ReceiveStatisticsProxy2Test, GetStatsReportsNoCNameForUnknownSsrc) {
+ const char* kName = "cName";
+ statistics_proxy_->OnCname(kRemoteSsrc + 1, kName);
+ EXPECT_STREQ("", statistics_proxy_->GetStats().c_name.c_str());
+}
+
+TEST_F(ReceiveStatisticsProxy2Test, ReportsLongestTimingFrameInfo) {
+ const int64_t kShortEndToEndDelay = 10;
+ const int64_t kMedEndToEndDelay = 20;
+ const int64_t kLongEndToEndDelay = 100;
+ const uint32_t kExpectedRtpTimestamp = 2;
+ TimingFrameInfo info;
+ absl::optional<TimingFrameInfo> result;
+ info.rtp_timestamp = kExpectedRtpTimestamp - 1;
+ info.capture_time_ms = 0;
+ info.decode_finish_ms = kShortEndToEndDelay;
+ statistics_proxy_->OnTimingFrameInfoUpdated(info);
+ info.rtp_timestamp =
+ kExpectedRtpTimestamp; // this frame should be reported in the end.
+ info.capture_time_ms = 0;
+ info.decode_finish_ms = kLongEndToEndDelay;
+ statistics_proxy_->OnTimingFrameInfoUpdated(info);
+ info.rtp_timestamp = kExpectedRtpTimestamp + 1;
+ info.capture_time_ms = 0;
+ info.decode_finish_ms = kMedEndToEndDelay;
+ statistics_proxy_->OnTimingFrameInfoUpdated(info);
+ result = FlushAndGetStats().timing_frame_info;
+ EXPECT_TRUE(result);
+ EXPECT_EQ(kExpectedRtpTimestamp, result->rtp_timestamp);
+}
+
+TEST_F(ReceiveStatisticsProxy2Test, RespectsReportingIntervalForTimingFrames) {
+ TimingFrameInfo info;
+ const int64_t kShortEndToEndDelay = 10;
+ const uint32_t kExpectedRtpTimestamp = 2;
+ const TimeDelta kShortDelay = TimeDelta::Seconds(1);
+ const TimeDelta kLongDelay = TimeDelta::Seconds(10);
+ absl::optional<TimingFrameInfo> result;
+ info.rtp_timestamp = kExpectedRtpTimestamp;
+ info.capture_time_ms = 0;
+ info.decode_finish_ms = kShortEndToEndDelay;
+ statistics_proxy_->OnTimingFrameInfoUpdated(info);
+ time_controller_.AdvanceTime(kShortDelay);
+ result = FlushAndGetStats().timing_frame_info;
+ EXPECT_TRUE(result);
+ EXPECT_EQ(kExpectedRtpTimestamp, result->rtp_timestamp);
+ time_controller_.AdvanceTime(kLongDelay);
+ result = statistics_proxy_->GetStats().timing_frame_info;
+ EXPECT_FALSE(result);
+}
+
+TEST_F(ReceiveStatisticsProxy2Test, LifetimeHistogramIsUpdated) {
+ const TimeDelta kLifetime = TimeDelta::Seconds(3);
+ time_controller_.AdvanceTime(kLifetime);
+ // Need at least one frame to report stream lifetime.
+ statistics_proxy_->OnCompleteFrame(true, 1000, VideoContentType::UNSPECIFIED);
+ statistics_proxy_->UpdateHistograms(absl::nullopt, StreamDataCounters(),
+ nullptr);
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.ReceiveStreamLifetimeInSeconds"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.ReceiveStreamLifetimeInSeconds",
+ kLifetime.seconds()));
+}
+
+TEST_F(ReceiveStatisticsProxy2Test,
+ LifetimeHistogramNotReportedForEmptyStreams) {
+ const TimeDelta kLifetime = TimeDelta::Seconds(3);
+ time_controller_.AdvanceTime(kLifetime);
+ // No frames received.
+ statistics_proxy_->UpdateHistograms(absl::nullopt, StreamDataCounters(),
+ nullptr);
+ EXPECT_METRIC_EQ(
+ 0, metrics::NumSamples("WebRTC.Video.ReceiveStreamLifetimeInSeconds"));
+}
+
+TEST_F(ReceiveStatisticsProxy2Test, BadCallHistogramsAreUpdated) {
+ // Based on the tuning parameters this will produce 7 uncertain states,
+ // then 10 certainly bad states. There has to be 10 certain states before
+ // any histograms are recorded.
+ const int kNumBadSamples = 17;
+ // We only count one sample per second.
+ const TimeDelta kBadFameInterval = TimeDelta::Millis(1100);
+
+ StreamDataCounters counters;
+ counters.first_packet_time_ms = Now().ms();
+
+ webrtc::VideoFrame frame = CreateFrame(kWidth, kHeight);
+
+ for (int i = 0; i < kNumBadSamples; ++i) {
+ time_controller_.AdvanceTime(kBadFameInterval);
+ statistics_proxy_->OnRenderedFrame(MetaData(frame));
+ }
+ statistics_proxy_->UpdateHistograms(absl::nullopt, counters, nullptr);
+ EXPECT_METRIC_EQ(1, metrics::NumSamples("WebRTC.Video.BadCall.Any"));
+ EXPECT_METRIC_EQ(1, metrics::NumEvents("WebRTC.Video.BadCall.Any", 100));
+
+ EXPECT_METRIC_EQ(1, metrics::NumSamples("WebRTC.Video.BadCall.FrameRate"));
+ EXPECT_METRIC_EQ(1,
+ metrics::NumEvents("WebRTC.Video.BadCall.FrameRate", 100));
+
+ EXPECT_METRIC_EQ(
+ 0, metrics::NumSamples("WebRTC.Video.BadCall.FrameRateVariance"));
+
+ EXPECT_METRIC_EQ(0, metrics::NumSamples("WebRTC.Video.BadCall.Qp"));
+}
+
+TEST_F(ReceiveStatisticsProxy2Test, PacketLossHistogramIsUpdated) {
+ statistics_proxy_->UpdateHistograms(10, StreamDataCounters(), nullptr);
+ EXPECT_METRIC_EQ(
+ 0, metrics::NumSamples("WebRTC.Video.ReceivedPacketsLostInPercent"));
+
+ // Restart
+ SetUp();
+
+ // Min run time has passed.
+ time_controller_.AdvanceTime(
+ TimeDelta::Seconds(metrics::kMinRunTimeInSeconds));
+ statistics_proxy_->UpdateHistograms(10, StreamDataCounters(), nullptr);
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.ReceivedPacketsLostInPercent"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.ReceivedPacketsLostInPercent", 10));
+}
+
+TEST_F(ReceiveStatisticsProxy2Test, GetStatsReportsPlayoutTimestamp) {
+ const int64_t kVideoNtpMs = 21;
+ const int64_t kSyncOffsetMs = 22;
+ const double kFreqKhz = 90.0;
+ EXPECT_EQ(absl::nullopt,
+ statistics_proxy_->GetStats().estimated_playout_ntp_timestamp_ms);
+ statistics_proxy_->OnSyncOffsetUpdated(kVideoNtpMs, kSyncOffsetMs, kFreqKhz);
+ EXPECT_EQ(kVideoNtpMs, FlushAndGetStats().estimated_playout_ntp_timestamp_ms);
+ time_controller_.AdvanceTime(TimeDelta::Millis(13));
+ EXPECT_EQ(kVideoNtpMs + 13,
+ statistics_proxy_->GetStats().estimated_playout_ntp_timestamp_ms);
+ time_controller_.AdvanceTime(TimeDelta::Millis(5));
+ EXPECT_EQ(kVideoNtpMs + 13 + 5,
+ statistics_proxy_->GetStats().estimated_playout_ntp_timestamp_ms);
+}
+
+TEST_F(ReceiveStatisticsProxy2Test, GetStatsReportsAvSyncOffset) {
+ const int64_t kVideoNtpMs = 21;
+ const int64_t kSyncOffsetMs = 22;
+ const double kFreqKhz = 90.0;
+ EXPECT_EQ(std::numeric_limits<int>::max(),
+ statistics_proxy_->GetStats().sync_offset_ms);
+ statistics_proxy_->OnSyncOffsetUpdated(kVideoNtpMs, kSyncOffsetMs, kFreqKhz);
+ EXPECT_EQ(kSyncOffsetMs, FlushAndGetStats().sync_offset_ms);
+}
+
+TEST_F(ReceiveStatisticsProxy2Test, AvSyncOffsetHistogramIsUpdated) {
+ const int64_t kVideoNtpMs = 21;
+ const int64_t kSyncOffsetMs = 22;
+ const double kFreqKhz = 90.0;
+ for (int i = 0; i < kMinRequiredSamples; ++i) {
+ statistics_proxy_->OnSyncOffsetUpdated(kVideoNtpMs, kSyncOffsetMs,
+ kFreqKhz);
+ }
+ FlushAndUpdateHistograms(absl::nullopt, StreamDataCounters(), nullptr);
+ EXPECT_METRIC_EQ(1, metrics::NumSamples("WebRTC.Video.AVSyncOffsetInMs"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.AVSyncOffsetInMs", kSyncOffsetMs));
+}
+
+TEST_F(ReceiveStatisticsProxy2Test, RtpToNtpFrequencyOffsetHistogramIsUpdated) {
+ const int64_t kVideoNtpMs = 21;
+ const int64_t kSyncOffsetMs = 22;
+ const double kFreqKhz = 90.0;
+ statistics_proxy_->OnSyncOffsetUpdated(kVideoNtpMs, kSyncOffsetMs, kFreqKhz);
+ statistics_proxy_->OnSyncOffsetUpdated(kVideoNtpMs, kSyncOffsetMs,
+ kFreqKhz + 2.2);
+ time_controller_.AdvanceTime(kFreqOffsetProcessInterval);
+ //) Process interval passed, max diff: 2.
+ statistics_proxy_->OnSyncOffsetUpdated(kVideoNtpMs, kSyncOffsetMs,
+ kFreqKhz + 1.1);
+ statistics_proxy_->OnSyncOffsetUpdated(kVideoNtpMs, kSyncOffsetMs,
+ kFreqKhz - 4.2);
+ statistics_proxy_->OnSyncOffsetUpdated(kVideoNtpMs, kSyncOffsetMs,
+ kFreqKhz - 0.9);
+ time_controller_.AdvanceTime(kFreqOffsetProcessInterval);
+ //) Process interval passed, max diff: 4.
+ statistics_proxy_->OnSyncOffsetUpdated(kVideoNtpMs, kSyncOffsetMs, kFreqKhz);
+ FlushAndUpdateHistograms(absl::nullopt, StreamDataCounters(), nullptr);
+ // Average reported: (2 + 4) / 2 = 3.
+ EXPECT_METRIC_EQ(1,
+ metrics::NumSamples("WebRTC.Video.RtpToNtpFreqOffsetInKhz"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.RtpToNtpFreqOffsetInKhz", 3));
+}
+
+TEST_F(ReceiveStatisticsProxy2Test, Vp8QpHistogramIsUpdated) {
+ const int kQp = 22;
+
+ for (int i = 0; i < kMinRequiredSamples; ++i)
+ statistics_proxy_->OnPreDecode(kVideoCodecVP8, kQp);
+
+ FlushAndUpdateHistograms(absl::nullopt, StreamDataCounters(), nullptr);
+ EXPECT_METRIC_EQ(1, metrics::NumSamples("WebRTC.Video.Decoded.Vp8.Qp"));
+ EXPECT_METRIC_EQ(1, metrics::NumEvents("WebRTC.Video.Decoded.Vp8.Qp", kQp));
+}
+
+TEST_F(ReceiveStatisticsProxy2Test,
+ Vp8QpHistogramIsNotUpdatedForTooFewSamples) {
+ const int kQp = 22;
+
+ for (int i = 0; i < kMinRequiredSamples - 1; ++i)
+ statistics_proxy_->OnPreDecode(kVideoCodecVP8, kQp);
+
+ statistics_proxy_->UpdateHistograms(absl::nullopt, StreamDataCounters(),
+ nullptr);
+ EXPECT_METRIC_EQ(0, metrics::NumSamples("WebRTC.Video.Decoded.Vp8.Qp"));
+}
+
+TEST_F(ReceiveStatisticsProxy2Test, Vp8QpHistogramIsNotUpdatedIfNoQpValue) {
+ for (int i = 0; i < kMinRequiredSamples; ++i)
+ statistics_proxy_->OnPreDecode(kVideoCodecVP8, -1);
+
+ statistics_proxy_->UpdateHistograms(absl::nullopt, StreamDataCounters(),
+ nullptr);
+ EXPECT_METRIC_EQ(0, metrics::NumSamples("WebRTC.Video.Decoded.Vp8.Qp"));
+}
+
+TEST_F(ReceiveStatisticsProxy2Test,
+ KeyFrameHistogramNotUpdatedForTooFewSamples) {
+ const bool kIsKeyFrame = false;
+ const int kFrameSizeBytes = 1000;
+
+ for (int i = 0; i < kMinRequiredSamples - 1; ++i)
+ statistics_proxy_->OnCompleteFrame(kIsKeyFrame, kFrameSizeBytes,
+ VideoContentType::UNSPECIFIED);
+
+ EXPECT_EQ(0, statistics_proxy_->GetStats().frame_counts.key_frames);
+ EXPECT_EQ(kMinRequiredSamples - 1,
+ statistics_proxy_->GetStats().frame_counts.delta_frames);
+
+ statistics_proxy_->UpdateHistograms(absl::nullopt, StreamDataCounters(),
+ nullptr);
+ EXPECT_METRIC_EQ(
+ 0, metrics::NumSamples("WebRTC.Video.KeyFramesReceivedInPermille"));
+}
+
+TEST_F(ReceiveStatisticsProxy2Test,
+ KeyFrameHistogramUpdatedForMinRequiredSamples) {
+ const bool kIsKeyFrame = false;
+ const int kFrameSizeBytes = 1000;
+
+ for (int i = 0; i < kMinRequiredSamples; ++i)
+ statistics_proxy_->OnCompleteFrame(kIsKeyFrame, kFrameSizeBytes,
+ VideoContentType::UNSPECIFIED);
+
+ EXPECT_EQ(0, statistics_proxy_->GetStats().frame_counts.key_frames);
+ EXPECT_EQ(kMinRequiredSamples,
+ statistics_proxy_->GetStats().frame_counts.delta_frames);
+
+ statistics_proxy_->UpdateHistograms(absl::nullopt, StreamDataCounters(),
+ nullptr);
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.KeyFramesReceivedInPermille"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.KeyFramesReceivedInPermille", 0));
+}
+
+TEST_F(ReceiveStatisticsProxy2Test, KeyFrameHistogramIsUpdated) {
+ const int kFrameSizeBytes = 1000;
+
+ for (int i = 0; i < kMinRequiredSamples; ++i)
+ statistics_proxy_->OnCompleteFrame(true, kFrameSizeBytes,
+ VideoContentType::UNSPECIFIED);
+
+ for (int i = 0; i < kMinRequiredSamples; ++i)
+ statistics_proxy_->OnCompleteFrame(false, kFrameSizeBytes,
+ VideoContentType::UNSPECIFIED);
+
+ EXPECT_EQ(kMinRequiredSamples,
+ statistics_proxy_->GetStats().frame_counts.key_frames);
+ EXPECT_EQ(kMinRequiredSamples,
+ statistics_proxy_->GetStats().frame_counts.delta_frames);
+
+ statistics_proxy_->UpdateHistograms(absl::nullopt, StreamDataCounters(),
+ nullptr);
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.KeyFramesReceivedInPermille"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.KeyFramesReceivedInPermille", 500));
+}
+
+TEST_F(ReceiveStatisticsProxy2Test,
+ TimingHistogramsNotUpdatedForTooFewSamples) {
+ const int kMaxDecodeMs = 2;
+ const int kCurrentDelayMs = 3;
+ const int kTargetDelayMs = 4;
+ const int kJitterBufferMs = 5;
+ const int kMinPlayoutDelayMs = 6;
+ const int kRenderDelayMs = 7;
+
+ for (int i = 0; i < kMinRequiredSamples - 1; ++i) {
+ statistics_proxy_->OnFrameBufferTimingsUpdated(
+ kMaxDecodeMs, kCurrentDelayMs, kTargetDelayMs, kJitterBufferMs,
+ kMinPlayoutDelayMs, kRenderDelayMs);
+ }
+
+ statistics_proxy_->UpdateHistograms(absl::nullopt, StreamDataCounters(),
+ nullptr);
+ EXPECT_METRIC_EQ(0, metrics::NumSamples("WebRTC.Video.DecodeTimeInMs"));
+ EXPECT_METRIC_EQ(0,
+ metrics::NumSamples("WebRTC.Video.JitterBufferDelayInMs"));
+ EXPECT_METRIC_EQ(0, metrics::NumSamples("WebRTC.Video.TargetDelayInMs"));
+ EXPECT_METRIC_EQ(0, metrics::NumSamples("WebRTC.Video.CurrentDelayInMs"));
+ EXPECT_METRIC_EQ(0, metrics::NumSamples("WebRTC.Video.OnewayDelayInMs"));
+}
+
+TEST_F(ReceiveStatisticsProxy2Test, TimingHistogramsAreUpdated) {
+ const int kMaxDecodeMs = 2;
+ const int kCurrentDelayMs = 3;
+ const int kTargetDelayMs = 4;
+ const int kJitterBufferMs = 5;
+ const int kMinPlayoutDelayMs = 6;
+ const int kRenderDelayMs = 7;
+
+ for (int i = 0; i < kMinRequiredSamples; ++i) {
+ statistics_proxy_->OnFrameBufferTimingsUpdated(
+ kMaxDecodeMs, kCurrentDelayMs, kTargetDelayMs, kJitterBufferMs,
+ kMinPlayoutDelayMs, kRenderDelayMs);
+ }
+
+ FlushAndUpdateHistograms(absl::nullopt, StreamDataCounters(), nullptr);
+ EXPECT_METRIC_EQ(1,
+ metrics::NumSamples("WebRTC.Video.JitterBufferDelayInMs"));
+ EXPECT_METRIC_EQ(1, metrics::NumSamples("WebRTC.Video.TargetDelayInMs"));
+ EXPECT_METRIC_EQ(1, metrics::NumSamples("WebRTC.Video.CurrentDelayInMs"));
+ EXPECT_METRIC_EQ(1, metrics::NumSamples("WebRTC.Video.OnewayDelayInMs"));
+
+ EXPECT_METRIC_EQ(1, metrics::NumEvents("WebRTC.Video.JitterBufferDelayInMs",
+ kJitterBufferMs));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.TargetDelayInMs", kTargetDelayMs));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.CurrentDelayInMs", kCurrentDelayMs));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.OnewayDelayInMs", kTargetDelayMs));
+}
+
+TEST_F(ReceiveStatisticsProxy2Test, DoesNotReportStaleFramerates) {
+ const Frequency kDefaultFps = Frequency::Hertz(30);
+ webrtc::VideoFrame frame = CreateFrame(kWidth, kHeight);
+
+ for (int i = 0; i < kDefaultFps.hertz(); ++i) {
+ // Since OnRenderedFrame is never called the fps in each sample will be 0,
+ // i.e. bad
+ frame.set_ntp_time_ms(
+ time_controller_.GetClock()->CurrentNtpInMilliseconds());
+ statistics_proxy_->OnDecodedFrame(frame, absl::nullopt, TimeDelta::Zero(),
+ VideoContentType::UNSPECIFIED);
+ statistics_proxy_->OnRenderedFrame(MetaData(frame));
+ time_controller_.AdvanceTime(1 / kDefaultFps);
+ }
+
+ // Why -1? Because RateStatistics does not consider the first frame in the
+ // rate as it will appear in the previous bucket.
+ EXPECT_EQ(kDefaultFps.hertz() - 1,
+ statistics_proxy_->GetStats().decode_frame_rate);
+ EXPECT_EQ(kDefaultFps.hertz() - 1,
+ statistics_proxy_->GetStats().render_frame_rate);
+
+ // FPS trackers in stats proxy have a 1000ms sliding window.
+ time_controller_.AdvanceTime(TimeDelta::Seconds(1));
+ EXPECT_EQ(0, statistics_proxy_->GetStats().decode_frame_rate);
+ EXPECT_EQ(0, statistics_proxy_->GetStats().render_frame_rate);
+}
+
+TEST_F(ReceiveStatisticsProxy2Test, GetStatsReportsReceivedFrameStats) {
+ EXPECT_EQ(0, statistics_proxy_->GetStats().width);
+ EXPECT_EQ(0, statistics_proxy_->GetStats().height);
+ EXPECT_EQ(0u, statistics_proxy_->GetStats().frames_rendered);
+
+ statistics_proxy_->OnRenderedFrame(MetaData(CreateFrame(kWidth, kHeight)));
+
+ EXPECT_EQ(kWidth, statistics_proxy_->GetStats().width);
+ EXPECT_EQ(kHeight, statistics_proxy_->GetStats().height);
+ EXPECT_EQ(1u, statistics_proxy_->GetStats().frames_rendered);
+}
+
+TEST_F(ReceiveStatisticsProxy2Test,
+ ReceivedFrameHistogramsAreNotUpdatedForTooFewSamples) {
+ for (int i = 0; i < kMinRequiredSamples - 1; ++i) {
+ statistics_proxy_->OnRenderedFrame(MetaData(CreateFrame(kWidth, kHeight)));
+ }
+
+ statistics_proxy_->UpdateHistograms(absl::nullopt, StreamDataCounters(),
+ nullptr);
+ EXPECT_METRIC_EQ(0,
+ metrics::NumSamples("WebRTC.Video.ReceivedWidthInPixels"));
+ EXPECT_METRIC_EQ(0,
+ metrics::NumSamples("WebRTC.Video.ReceivedHeightInPixels"));
+ EXPECT_METRIC_EQ(0,
+ metrics::NumSamples("WebRTC.Video.RenderFramesPerSecond"));
+ EXPECT_METRIC_EQ(
+ 0, metrics::NumSamples("WebRTC.Video.RenderSqrtPixelsPerSecond"));
+}
+
+TEST_F(ReceiveStatisticsProxy2Test, ReceivedFrameHistogramsAreUpdated) {
+ for (int i = 0; i < kMinRequiredSamples; ++i) {
+ statistics_proxy_->OnRenderedFrame(MetaData(CreateFrame(kWidth, kHeight)));
+ }
+
+ statistics_proxy_->UpdateHistograms(absl::nullopt, StreamDataCounters(),
+ nullptr);
+ EXPECT_METRIC_EQ(1,
+ metrics::NumSamples("WebRTC.Video.ReceivedWidthInPixels"));
+ EXPECT_METRIC_EQ(1,
+ metrics::NumSamples("WebRTC.Video.ReceivedHeightInPixels"));
+ EXPECT_METRIC_EQ(1,
+ metrics::NumSamples("WebRTC.Video.RenderFramesPerSecond"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.RenderSqrtPixelsPerSecond"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.ReceivedWidthInPixels", kWidth));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.ReceivedHeightInPixels", kHeight));
+}
+
+TEST_F(ReceiveStatisticsProxy2Test, ZeroDelayReportedIfFrameNotDelayed) {
+ webrtc::VideoFrame frame = CreateFrame(kWidth, kHeight);
+ statistics_proxy_->OnDecodedFrame(frame, absl::nullopt, TimeDelta::Zero(),
+ VideoContentType::UNSPECIFIED);
+
+ // Frame not delayed, delayed frames to render: 0%.
+ statistics_proxy_->OnRenderedFrame(
+ MetaData(CreateFrameWithRenderTime(Now())));
+
+ // Min run time has passed.
+ time_controller_.AdvanceTime(
+ TimeDelta::Seconds((metrics::kMinRunTimeInSeconds)));
+ FlushAndUpdateHistograms(absl::nullopt, StreamDataCounters(), nullptr);
+ EXPECT_METRIC_EQ(1,
+ metrics::NumSamples("WebRTC.Video.DelayedFramesToRenderer"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.DelayedFramesToRenderer", 0));
+ EXPECT_METRIC_EQ(0, metrics::NumSamples(
+ "WebRTC.Video.DelayedFramesToRenderer_AvgDelayInMs"));
+}
+
+TEST_F(ReceiveStatisticsProxy2Test,
+ DelayedFrameHistogramsAreNotUpdatedIfMinRuntimeHasNotPassed) {
+ webrtc::VideoFrame frame = CreateFrame(kWidth, kHeight);
+ statistics_proxy_->OnDecodedFrame(frame, absl::nullopt, TimeDelta::Zero(),
+ VideoContentType::UNSPECIFIED);
+
+ // Frame not delayed, delayed frames to render: 0%.
+ statistics_proxy_->OnRenderedFrame(
+ MetaData(CreateFrameWithRenderTime(Now())));
+
+ // Min run time has not passed.
+ time_controller_.AdvanceTime(
+ TimeDelta::Seconds(metrics::kMinRunTimeInSeconds) - TimeDelta::Millis(1));
+ statistics_proxy_->UpdateHistograms(absl::nullopt, StreamDataCounters(),
+ nullptr);
+ EXPECT_METRIC_EQ(0,
+ metrics::NumSamples("WebRTC.Video.DelayedFramesToRenderer"));
+ EXPECT_METRIC_EQ(0, metrics::NumSamples(
+ "WebRTC.Video.DelayedFramesToRenderer_AvgDelayInMs"));
+}
+
+TEST_F(ReceiveStatisticsProxy2Test,
+ DelayedFramesHistogramsAreNotUpdatedIfNoRenderedFrames) {
+ webrtc::VideoFrame frame = CreateFrame(kWidth, kHeight);
+ statistics_proxy_->OnDecodedFrame(frame, absl::nullopt, TimeDelta::Zero(),
+ VideoContentType::UNSPECIFIED);
+
+ // Min run time has passed. No rendered frames.
+ time_controller_.AdvanceTime(
+ TimeDelta::Seconds((metrics::kMinRunTimeInSeconds)));
+ statistics_proxy_->UpdateHistograms(absl::nullopt, StreamDataCounters(),
+ nullptr);
+ EXPECT_METRIC_EQ(0,
+ metrics::NumSamples("WebRTC.Video.DelayedFramesToRenderer"));
+ EXPECT_METRIC_EQ(0, metrics::NumSamples(
+ "WebRTC.Video.DelayedFramesToRenderer_AvgDelayInMs"));
+}
+
+TEST_F(ReceiveStatisticsProxy2Test, DelayReportedIfFrameIsDelayed) {
+ webrtc::VideoFrame frame = CreateFrame(kWidth, kHeight);
+ statistics_proxy_->OnDecodedFrame(frame, absl::nullopt, TimeDelta::Zero(),
+ VideoContentType::UNSPECIFIED);
+
+ // Frame delayed 1 ms, delayed frames to render: 100%.
+ statistics_proxy_->OnRenderedFrame(
+ MetaData(CreateFrameWithRenderTimeMs(Now().ms() - 1)));
+
+ // Min run time has passed.
+ time_controller_.AdvanceTime(
+ TimeDelta::Seconds(metrics::kMinRunTimeInSeconds));
+ FlushAndUpdateHistograms(absl::nullopt, StreamDataCounters(), nullptr);
+ EXPECT_METRIC_EQ(1,
+ metrics::NumSamples("WebRTC.Video.DelayedFramesToRenderer"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.DelayedFramesToRenderer", 100));
+ EXPECT_METRIC_EQ(1, metrics::NumSamples(
+ "WebRTC.Video.DelayedFramesToRenderer_AvgDelayInMs"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.DelayedFramesToRenderer_AvgDelayInMs",
+ 1));
+}
+
+TEST_F(ReceiveStatisticsProxy2Test, AverageDelayOfDelayedFramesIsReported) {
+ webrtc::VideoFrame frame = CreateFrame(kWidth, kHeight);
+ statistics_proxy_->OnDecodedFrame(frame, absl::nullopt, TimeDelta::Zero(),
+ VideoContentType::UNSPECIFIED);
+
+ // Two frames delayed (6 ms, 10 ms), delayed frames to render: 50%.
+ const int64_t kNowMs = Now().ms();
+
+ statistics_proxy_->OnRenderedFrame(
+ MetaData(CreateFrameWithRenderTimeMs(kNowMs - 10)));
+ statistics_proxy_->OnRenderedFrame(
+ MetaData(CreateFrameWithRenderTimeMs(kNowMs - 6)));
+ statistics_proxy_->OnRenderedFrame(
+ MetaData(CreateFrameWithRenderTimeMs(kNowMs)));
+ statistics_proxy_->OnRenderedFrame(
+ MetaData(CreateFrameWithRenderTimeMs(kNowMs + 1)));
+
+ // Min run time has passed.
+ time_controller_.AdvanceTime(
+ TimeDelta::Seconds(metrics::kMinRunTimeInSeconds));
+ FlushAndUpdateHistograms(absl::nullopt, StreamDataCounters(), nullptr);
+ EXPECT_METRIC_EQ(1,
+ metrics::NumSamples("WebRTC.Video.DelayedFramesToRenderer"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.DelayedFramesToRenderer", 50));
+ EXPECT_METRIC_EQ(1, metrics::NumSamples(
+ "WebRTC.Video.DelayedFramesToRenderer_AvgDelayInMs"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.DelayedFramesToRenderer_AvgDelayInMs",
+ 8));
+}
+
+TEST_F(ReceiveStatisticsProxy2Test,
+ RtcpHistogramsNotUpdatedIfMinRuntimeHasNotPassed) {
+ StreamDataCounters data_counters;
+ data_counters.first_packet_time_ms =
+ time_controller_.GetClock()->TimeInMilliseconds();
+
+ time_controller_.AdvanceTime(
+ TimeDelta::Seconds(metrics::kMinRunTimeInSeconds) - TimeDelta::Millis(1));
+
+ RtcpPacketTypeCounter counter;
+ statistics_proxy_->RtcpPacketTypesCounterUpdated(kRemoteSsrc, counter);
+
+ statistics_proxy_->UpdateHistograms(absl::nullopt, data_counters, nullptr);
+ EXPECT_METRIC_EQ(0,
+ metrics::NumSamples("WebRTC.Video.FirPacketsSentPerMinute"));
+ EXPECT_METRIC_EQ(0,
+ metrics::NumSamples("WebRTC.Video.PliPacketsSentPerMinute"));
+ EXPECT_METRIC_EQ(
+ 0, metrics::NumSamples("WebRTC.Video.NackPacketsSentPerMinute"));
+}
+
+TEST_F(ReceiveStatisticsProxy2Test, RtcpHistogramsAreUpdated) {
+ StreamDataCounters data_counters;
+ data_counters.first_packet_time_ms =
+ time_controller_.GetClock()->TimeInMilliseconds();
+ time_controller_.AdvanceTime(
+ TimeDelta::Seconds(metrics::kMinRunTimeInSeconds));
+
+ const uint32_t kFirPackets = 100;
+ const uint32_t kPliPackets = 200;
+ const uint32_t kNackPackets = 300;
+
+ RtcpPacketTypeCounter counter;
+ counter.fir_packets = kFirPackets;
+ counter.pli_packets = kPliPackets;
+ counter.nack_packets = kNackPackets;
+ statistics_proxy_->RtcpPacketTypesCounterUpdated(kRemoteSsrc, counter);
+
+ statistics_proxy_->UpdateHistograms(absl::nullopt, data_counters, nullptr);
+ EXPECT_METRIC_EQ(1,
+ metrics::NumSamples("WebRTC.Video.FirPacketsSentPerMinute"));
+ EXPECT_METRIC_EQ(1,
+ metrics::NumSamples("WebRTC.Video.PliPacketsSentPerMinute"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.NackPacketsSentPerMinute"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.FirPacketsSentPerMinute",
+ kFirPackets * 60 / metrics::kMinRunTimeInSeconds));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.PliPacketsSentPerMinute",
+ kPliPackets * 60 / metrics::kMinRunTimeInSeconds));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.NackPacketsSentPerMinute",
+ kNackPackets * 60 / metrics::kMinRunTimeInSeconds));
+}
+
+class ReceiveStatisticsProxy2TestWithFreezeDuration
+ : public ReceiveStatisticsProxy2Test,
+ public ::testing::WithParamInterface<
+ std::tuple<uint32_t, uint32_t, uint32_t>> {
+ protected:
+ const uint32_t frame_duration_ms_ = {std::get<0>(GetParam())};
+ const uint32_t freeze_duration_ms_ = {std::get<1>(GetParam())};
+ const uint32_t expected_freeze_count_ = {std::get<2>(GetParam())};
+};
+
+// It is a freeze if:
+// frame_duration_ms >= max(3 * avg_frame_duration, avg_frame_duration + 150)
+// where avg_frame_duration is average duration of last 30 frames including
+// the current one.
+//
+// Condition 1: 3 * avg_frame_duration > avg_frame_duration + 150
+const auto kFreezeDetectionCond1Freeze = std::make_tuple(150, 483, 1);
+const auto kFreezeDetectionCond1NotFreeze = std::make_tuple(150, 482, 0);
+// Condition 2: 3 * avg_frame_duration < avg_frame_duration + 150
+const auto kFreezeDetectionCond2Freeze = std::make_tuple(30, 185, 1);
+const auto kFreezeDetectionCond2NotFreeze = std::make_tuple(30, 184, 0);
+
+INSTANTIATE_TEST_SUITE_P(_,
+ ReceiveStatisticsProxy2TestWithFreezeDuration,
+ ::testing::Values(kFreezeDetectionCond1Freeze,
+ kFreezeDetectionCond1NotFreeze,
+ kFreezeDetectionCond2Freeze,
+ kFreezeDetectionCond2NotFreeze));
+
+TEST_P(ReceiveStatisticsProxy2TestWithFreezeDuration, FreezeDetection) {
+ VideoReceiveStreamInterface::Stats stats = statistics_proxy_->GetStats();
+ EXPECT_EQ(0u, stats.freeze_count);
+ webrtc::VideoFrame frame = CreateFrame(kWidth, kHeight);
+
+ // Add a very long frame. This is need to verify that average frame
+ // duration, which is supposed to be calculated as mean of durations of
+ // last 30 frames, is calculated correctly.
+ statistics_proxy_->OnRenderedFrame(MetaData(frame));
+ time_controller_.AdvanceTime(TimeDelta::Seconds(2));
+ for (size_t i = 0;
+ i <= VideoQualityObserver::kAvgInterframeDelaysWindowSizeFrames; ++i) {
+ time_controller_.AdvanceTime(TimeDelta::Millis(frame_duration_ms_));
+ statistics_proxy_->OnRenderedFrame(MetaData(frame));
+ }
+
+ time_controller_.AdvanceTime(TimeDelta::Millis(freeze_duration_ms_));
+ statistics_proxy_->OnRenderedFrame(MetaData(frame));
+
+ stats = statistics_proxy_->GetStats();
+ EXPECT_EQ(stats.freeze_count, expected_freeze_count_);
+}
+
+class ReceiveStatisticsProxy2TestWithContent
+ : public ReceiveStatisticsProxy2Test,
+ public ::testing::WithParamInterface<webrtc::VideoContentType> {
+ protected:
+ const webrtc::VideoContentType content_type_{GetParam()};
+};
+
+INSTANTIATE_TEST_SUITE_P(ContentTypes,
+ ReceiveStatisticsProxy2TestWithContent,
+ ::testing::Values(VideoContentType::UNSPECIFIED,
+ VideoContentType::SCREENSHARE));
+
+TEST_P(ReceiveStatisticsProxy2TestWithContent, InterFrameDelaysAreReported) {
+ const TimeDelta kInterFrameDelay = TimeDelta::Millis(33);
+ webrtc::VideoFrame frame = CreateFrame(kWidth, kHeight);
+
+ for (int i = 0; i < kMinRequiredSamples; ++i) {
+ statistics_proxy_->OnDecodedFrame(frame, absl::nullopt, TimeDelta::Zero(),
+ content_type_);
+ time_controller_.AdvanceTime(kInterFrameDelay);
+ }
+ // One extra with double the interval.
+ time_controller_.AdvanceTime(kInterFrameDelay);
+ statistics_proxy_->OnDecodedFrame(frame, absl::nullopt, TimeDelta::Zero(),
+ content_type_);
+
+ FlushAndUpdateHistograms(absl::nullopt, StreamDataCounters(), nullptr);
+ const TimeDelta kExpectedInterFrame =
+ (kInterFrameDelay * (kMinRequiredSamples - 1) + kInterFrameDelay * 2) /
+ kMinRequiredSamples;
+ if (videocontenttypehelpers::IsScreenshare(content_type_)) {
+ EXPECT_METRIC_EQ(
+ kExpectedInterFrame.ms(),
+ metrics::MinSample("WebRTC.Video.Screenshare.InterframeDelayInMs"));
+ EXPECT_METRIC_EQ(
+ kInterFrameDelay.ms() * 2,
+ metrics::MinSample("WebRTC.Video.Screenshare.InterframeDelayMaxInMs"));
+ } else {
+ EXPECT_METRIC_EQ(kExpectedInterFrame.ms(),
+ metrics::MinSample("WebRTC.Video.InterframeDelayInMs"));
+ EXPECT_METRIC_EQ(kInterFrameDelay.ms() * 2,
+ metrics::MinSample("WebRTC.Video.InterframeDelayMaxInMs"));
+ }
+}
+
+TEST_P(ReceiveStatisticsProxy2TestWithContent,
+ InterFrameDelaysPercentilesAreReported) {
+ const TimeDelta kInterFrameDelay = TimeDelta::Millis(33);
+ const int kLastFivePercentsSamples = kMinRequiredSamples * 5 / 100;
+ webrtc::VideoFrame frame = CreateFrame(kWidth, kHeight);
+
+ for (int i = 0; i <= kMinRequiredSamples - kLastFivePercentsSamples; ++i) {
+ time_controller_.AdvanceTime(kInterFrameDelay);
+ statistics_proxy_->OnDecodedFrame(frame, absl::nullopt, TimeDelta::Zero(),
+ content_type_);
+ }
+ // Last 5% of intervals are double in size.
+ for (int i = 0; i < kLastFivePercentsSamples; ++i) {
+ time_controller_.AdvanceTime(2 * kInterFrameDelay);
+ statistics_proxy_->OnDecodedFrame(frame, absl::nullopt, TimeDelta::Zero(),
+ content_type_);
+ }
+ // Final sample is outlier and 10 times as big.
+ time_controller_.AdvanceTime(10 * kInterFrameDelay);
+ statistics_proxy_->OnDecodedFrame(frame, absl::nullopt, TimeDelta::Zero(),
+ content_type_);
+
+ FlushAndUpdateHistograms(absl::nullopt, StreamDataCounters(), nullptr);
+ const TimeDelta kExpectedInterFrame = kInterFrameDelay * 2;
+ if (videocontenttypehelpers::IsScreenshare(content_type_)) {
+ EXPECT_METRIC_EQ(
+ kExpectedInterFrame.ms(),
+ metrics::MinSample(
+ "WebRTC.Video.Screenshare.InterframeDelay95PercentileInMs"));
+ } else {
+ EXPECT_METRIC_EQ(
+ kExpectedInterFrame.ms(),
+ metrics::MinSample("WebRTC.Video.InterframeDelay95PercentileInMs"));
+ }
+}
+
+TEST_P(ReceiveStatisticsProxy2TestWithContent,
+ MaxInterFrameDelayOnlyWithValidAverage) {
+ const TimeDelta kInterFrameDelay = TimeDelta::Millis(33);
+ webrtc::VideoFrame frame = CreateFrame(kWidth, kHeight);
+
+ for (int i = 0; i < kMinRequiredSamples; ++i) {
+ statistics_proxy_->OnDecodedFrame(frame, absl::nullopt, TimeDelta::Zero(),
+ content_type_);
+ time_controller_.AdvanceTime(kInterFrameDelay);
+ }
+
+ // `kMinRequiredSamples` samples, and thereby intervals, is required. That
+ // means we're one frame short of having a valid data set.
+ statistics_proxy_->UpdateHistograms(absl::nullopt, StreamDataCounters(),
+ nullptr);
+ EXPECT_METRIC_EQ(0, metrics::NumSamples("WebRTC.Video.InterframeDelayInMs"));
+ EXPECT_METRIC_EQ(0,
+ metrics::NumSamples("WebRTC.Video.InterframeDelayMaxInMs"));
+ EXPECT_METRIC_EQ(
+ 0, metrics::NumSamples("WebRTC.Video.Screenshare.InterframeDelayInMs"));
+ EXPECT_METRIC_EQ(0, metrics::NumSamples(
+ "WebRTC.Video.Screenshare.InterframeDelayMaxInMs"));
+}
+
+TEST_P(ReceiveStatisticsProxy2TestWithContent,
+ MaxInterFrameDelayOnlyWithPause) {
+ const TimeDelta kInterFrameDelay = TimeDelta::Millis(33);
+ webrtc::VideoFrame frame = CreateFrame(kWidth, kHeight);
+
+ for (int i = 0; i <= kMinRequiredSamples; ++i) {
+ statistics_proxy_->OnDecodedFrame(frame, absl::nullopt, TimeDelta::Zero(),
+ content_type_);
+ time_controller_.AdvanceTime(kInterFrameDelay);
+ }
+
+ // At this state, we should have a valid inter-frame delay.
+ // Indicate stream paused and make a large jump in time.
+ statistics_proxy_->OnStreamInactive();
+ time_controller_.AdvanceTime(TimeDelta::Seconds(5));
+ // Insert two more frames. The interval during the pause should be
+ // disregarded in the stats.
+ statistics_proxy_->OnDecodedFrame(frame, absl::nullopt, TimeDelta::Zero(),
+ content_type_);
+ time_controller_.AdvanceTime(kInterFrameDelay);
+ statistics_proxy_->OnDecodedFrame(frame, absl::nullopt, TimeDelta::Zero(),
+ content_type_);
+
+ FlushAndUpdateHistograms(absl::nullopt, StreamDataCounters(), nullptr);
+ if (videocontenttypehelpers::IsScreenshare(content_type_)) {
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.Screenshare.InterframeDelayInMs"));
+ EXPECT_METRIC_EQ(1, metrics::NumSamples(
+ "WebRTC.Video.Screenshare.InterframeDelayMaxInMs"));
+ EXPECT_METRIC_EQ(
+ kInterFrameDelay.ms(),
+ metrics::MinSample("WebRTC.Video.Screenshare.InterframeDelayInMs"));
+ EXPECT_METRIC_EQ(
+ kInterFrameDelay.ms(),
+ metrics::MinSample("WebRTC.Video.Screenshare.InterframeDelayMaxInMs"));
+ } else {
+ EXPECT_METRIC_EQ(1,
+ metrics::NumSamples("WebRTC.Video.InterframeDelayInMs"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.InterframeDelayMaxInMs"));
+ EXPECT_METRIC_EQ(kInterFrameDelay.ms(),
+ metrics::MinSample("WebRTC.Video.InterframeDelayInMs"));
+ EXPECT_METRIC_EQ(kInterFrameDelay.ms(),
+ metrics::MinSample("WebRTC.Video.InterframeDelayMaxInMs"));
+ }
+}
+
+TEST_P(ReceiveStatisticsProxy2TestWithContent, FreezesAreReported) {
+ const TimeDelta kInterFrameDelay = TimeDelta::Millis(33);
+ const TimeDelta kFreezeDelay = TimeDelta::Millis(200);
+ const TimeDelta kCallDuration =
+ kMinRequiredSamples * kInterFrameDelay + kFreezeDelay;
+ webrtc::VideoFrame frame = CreateFrame(kWidth, kHeight);
+
+ for (int i = 0; i < kMinRequiredSamples; ++i) {
+ VideoFrameMetaData meta = MetaData(frame);
+ statistics_proxy_->OnDecodedFrame(meta, absl::nullopt, TimeDelta::Zero(),
+ TimeDelta::Zero(), TimeDelta::Zero(),
+ content_type_);
+ statistics_proxy_->OnRenderedFrame(meta);
+ time_controller_.AdvanceTime(kInterFrameDelay);
+ }
+ // Add extra freeze.
+ time_controller_.AdvanceTime(kFreezeDelay);
+ VideoFrameMetaData meta = MetaData(frame);
+ statistics_proxy_->OnDecodedFrame(meta, absl::nullopt, TimeDelta::Zero(),
+ TimeDelta::Zero(), TimeDelta::Zero(),
+ content_type_);
+ statistics_proxy_->OnRenderedFrame(meta);
+
+ FlushAndUpdateHistograms(absl::nullopt, StreamDataCounters(), nullptr);
+ const TimeDelta kExpectedTimeBetweenFreezes =
+ kInterFrameDelay * (kMinRequiredSamples - 1);
+ const int kExpectedNumberFreezesPerMinute = 60 / kCallDuration.seconds();
+ if (videocontenttypehelpers::IsScreenshare(content_type_)) {
+ EXPECT_METRIC_EQ(
+ (kFreezeDelay + kInterFrameDelay).ms(),
+ metrics::MinSample("WebRTC.Video.Screenshare.MeanFreezeDurationMs"));
+ EXPECT_METRIC_EQ(kExpectedTimeBetweenFreezes.ms(),
+ metrics::MinSample(
+ "WebRTC.Video.Screenshare.MeanTimeBetweenFreezesMs"));
+ EXPECT_METRIC_EQ(
+ kExpectedNumberFreezesPerMinute,
+ metrics::MinSample("WebRTC.Video.Screenshare.NumberFreezesPerMinute"));
+ } else {
+ EXPECT_METRIC_EQ((kFreezeDelay + kInterFrameDelay).ms(),
+ metrics::MinSample("WebRTC.Video.MeanFreezeDurationMs"));
+ EXPECT_METRIC_EQ(
+ kExpectedTimeBetweenFreezes.ms(),
+ metrics::MinSample("WebRTC.Video.MeanTimeBetweenFreezesMs"));
+ EXPECT_METRIC_EQ(kExpectedNumberFreezesPerMinute,
+ metrics::MinSample("WebRTC.Video.NumberFreezesPerMinute"));
+ }
+}
+
+TEST_P(ReceiveStatisticsProxy2TestWithContent, HarmonicFrameRateIsReported) {
+ const TimeDelta kFrameDuration = TimeDelta::Millis(33);
+ const TimeDelta kFreezeDuration = TimeDelta::Millis(200);
+ const TimeDelta kPauseDuration = TimeDelta::Seconds(10);
+ const TimeDelta kCallDuration =
+ kMinRequiredSamples * kFrameDuration + kFreezeDuration + kPauseDuration;
+ webrtc::VideoFrame frame = CreateFrame(kWidth, kHeight);
+
+ for (int i = 0; i < kMinRequiredSamples; ++i) {
+ time_controller_.AdvanceTime(kFrameDuration);
+ statistics_proxy_->OnDecodedFrame(frame, absl::nullopt, TimeDelta::Zero(),
+ content_type_);
+ statistics_proxy_->OnRenderedFrame(MetaData(frame));
+ }
+
+ // Freezes and pauses should be included into harmonic frame rate.
+ // Add freeze.
+ time_controller_.AdvanceTime(kFreezeDuration);
+ statistics_proxy_->OnDecodedFrame(frame, absl::nullopt, TimeDelta::Zero(),
+ content_type_);
+ statistics_proxy_->OnRenderedFrame(MetaData(frame));
+
+ // Add pause.
+ time_controller_.AdvanceTime(kPauseDuration);
+ statistics_proxy_->OnStreamInactive();
+ statistics_proxy_->OnDecodedFrame(frame, absl::nullopt, TimeDelta::Zero(),
+ content_type_);
+ statistics_proxy_->OnRenderedFrame(MetaData(frame));
+
+ FlushAndUpdateHistograms(absl::nullopt, StreamDataCounters(), nullptr);
+ double kSumSquaredFrameDurationSecs =
+ (kMinRequiredSamples - 1) *
+ (kFrameDuration.seconds<double>() * kFrameDuration.seconds<double>());
+ kSumSquaredFrameDurationSecs +=
+ kFreezeDuration.seconds<double>() * kFreezeDuration.seconds<double>();
+ kSumSquaredFrameDurationSecs +=
+ kPauseDuration.seconds<double>() * kPauseDuration.seconds<double>();
+ const int kExpectedHarmonicFrameRateFps = std::round(
+ kCallDuration.seconds<double>() / kSumSquaredFrameDurationSecs);
+ if (videocontenttypehelpers::IsScreenshare(content_type_)) {
+ EXPECT_METRIC_EQ(
+ kExpectedHarmonicFrameRateFps,
+ metrics::MinSample("WebRTC.Video.Screenshare.HarmonicFrameRate"));
+ } else {
+ EXPECT_METRIC_EQ(kExpectedHarmonicFrameRateFps,
+ metrics::MinSample("WebRTC.Video.HarmonicFrameRate"));
+ }
+}
+
+TEST_P(ReceiveStatisticsProxy2TestWithContent, PausesAreIgnored) {
+ const TimeDelta kInterFrameDelay = TimeDelta::Millis(33);
+ const TimeDelta kPauseDuration = TimeDelta::Seconds(10);
+ webrtc::VideoFrame frame = CreateFrame(kWidth, kHeight);
+
+ for (int i = 0; i <= kMinRequiredSamples; ++i) {
+ VideoFrameMetaData meta = MetaData(frame);
+ statistics_proxy_->OnDecodedFrame(meta, absl::nullopt, TimeDelta::Zero(),
+ TimeDelta::Zero(), TimeDelta::Zero(),
+ content_type_);
+ statistics_proxy_->OnRenderedFrame(meta);
+ time_controller_.AdvanceTime(kInterFrameDelay);
+ }
+ // Add a pause.
+ time_controller_.AdvanceTime(kPauseDuration);
+ statistics_proxy_->OnStreamInactive();
+ // Second playback interval with triple the length.
+ for (int i = 0; i <= kMinRequiredSamples * 3; ++i) {
+ VideoFrameMetaData meta = MetaData(frame);
+ statistics_proxy_->OnDecodedFrame(meta, absl::nullopt, TimeDelta::Zero(),
+ TimeDelta::Zero(), TimeDelta::Zero(),
+ content_type_);
+ statistics_proxy_->OnRenderedFrame(meta);
+ time_controller_.AdvanceTime(kInterFrameDelay);
+ }
+
+ FlushAndUpdateHistograms(absl::nullopt, StreamDataCounters(), nullptr);
+ // Average of two playback intervals.
+ const TimeDelta kExpectedTimeBetweenFreezes =
+ kInterFrameDelay * kMinRequiredSamples * 2;
+ if (videocontenttypehelpers::IsScreenshare(content_type_)) {
+ EXPECT_METRIC_EQ(-1, metrics::MinSample(
+ "WebRTC.Video.Screenshare.MeanFreezeDurationMs"));
+ EXPECT_METRIC_EQ(kExpectedTimeBetweenFreezes.ms(),
+ metrics::MinSample(
+ "WebRTC.Video.Screenshare.MeanTimeBetweenFreezesMs"));
+ } else {
+ EXPECT_METRIC_EQ(-1,
+ metrics::MinSample("WebRTC.Video.MeanFreezeDurationMs"));
+ EXPECT_METRIC_EQ(
+ kExpectedTimeBetweenFreezes.ms(),
+ metrics::MinSample("WebRTC.Video.MeanTimeBetweenFreezesMs"));
+ }
+}
+
+TEST_P(ReceiveStatisticsProxy2TestWithContent, ManyPausesAtTheBeginning) {
+ const TimeDelta kInterFrameDelay = TimeDelta::Millis(33);
+ const TimeDelta kPauseDuration = TimeDelta::Seconds(10);
+ webrtc::VideoFrame frame = CreateFrame(kWidth, kHeight);
+
+ for (int i = 0; i <= kMinRequiredSamples; ++i) {
+ statistics_proxy_->OnDecodedFrame(frame, absl::nullopt, TimeDelta::Zero(),
+ content_type_);
+ time_controller_.AdvanceTime(kInterFrameDelay);
+ statistics_proxy_->OnStreamInactive();
+ time_controller_.AdvanceTime(kPauseDuration);
+ statistics_proxy_->OnDecodedFrame(frame, absl::nullopt, TimeDelta::Zero(),
+ content_type_);
+ time_controller_.AdvanceTime(kInterFrameDelay);
+ }
+
+ statistics_proxy_->UpdateHistograms(absl::nullopt, StreamDataCounters(),
+ nullptr);
+ // No freezes should be detected, as all long inter-frame delays were
+ // pauses.
+ if (videocontenttypehelpers::IsScreenshare(content_type_)) {
+ EXPECT_METRIC_EQ(-1, metrics::MinSample(
+ "WebRTC.Video.Screenshare.MeanFreezeDurationMs"));
+ } else {
+ EXPECT_METRIC_EQ(-1,
+ metrics::MinSample("WebRTC.Video.MeanFreezeDurationMs"));
+ }
+}
+
+TEST_P(ReceiveStatisticsProxy2TestWithContent, TimeInHdReported) {
+ const TimeDelta kInterFrameDelay = TimeDelta::Millis(20);
+ webrtc::VideoFrame frame_hd = CreateFrame(1280, 720);
+ webrtc::VideoFrame frame_sd = CreateFrame(640, 360);
+
+ // HD frames.
+ for (int i = 0; i < kMinRequiredSamples; ++i) {
+ VideoFrameMetaData meta = MetaData(frame_hd);
+ statistics_proxy_->OnDecodedFrame(meta, absl::nullopt, TimeDelta::Zero(),
+ TimeDelta::Zero(), TimeDelta::Zero(),
+ content_type_);
+ statistics_proxy_->OnRenderedFrame(meta);
+ time_controller_.AdvanceTime(kInterFrameDelay);
+ }
+ // SD frames.
+ for (int i = 0; i < 2 * kMinRequiredSamples; ++i) {
+ VideoFrameMetaData meta = MetaData(frame_sd);
+ statistics_proxy_->OnDecodedFrame(meta, absl::nullopt, TimeDelta::Zero(),
+ TimeDelta::Zero(), TimeDelta::Zero(),
+ content_type_);
+ statistics_proxy_->OnRenderedFrame(meta);
+ time_controller_.AdvanceTime(kInterFrameDelay);
+ }
+ // Extra last frame.
+ statistics_proxy_->OnRenderedFrame(MetaData(frame_sd));
+
+ statistics_proxy_->UpdateHistograms(absl::nullopt, StreamDataCounters(),
+ nullptr);
+ const int kExpectedTimeInHdPercents = 33;
+ if (videocontenttypehelpers::IsScreenshare(content_type_)) {
+ EXPECT_METRIC_EQ(
+ kExpectedTimeInHdPercents,
+ metrics::MinSample("WebRTC.Video.Screenshare.TimeInHdPercentage"));
+ } else {
+ EXPECT_METRIC_EQ(kExpectedTimeInHdPercents,
+ metrics::MinSample("WebRTC.Video.TimeInHdPercentage"));
+ }
+}
+
+TEST_P(ReceiveStatisticsProxy2TestWithContent, TimeInBlockyVideoReported) {
+ const TimeDelta kInterFrameDelay = TimeDelta::Millis(20);
+ const int kHighQp = 80;
+ const int kLowQp = 30;
+ webrtc::VideoFrame frame = CreateFrame(kWidth, kHeight);
+
+ // High quality frames.
+ for (int i = 0; i < kMinRequiredSamples; ++i) {
+ VideoFrameMetaData meta = MetaData(frame);
+ statistics_proxy_->OnDecodedFrame(meta, kLowQp, TimeDelta::Zero(),
+ TimeDelta::Zero(), TimeDelta::Zero(),
+ content_type_);
+ statistics_proxy_->OnRenderedFrame(meta);
+ time_controller_.AdvanceTime(kInterFrameDelay);
+ }
+ // Blocky frames.
+ for (int i = 0; i < 2 * kMinRequiredSamples; ++i) {
+ VideoFrameMetaData meta = MetaData(frame);
+ statistics_proxy_->OnDecodedFrame(meta, kHighQp, TimeDelta::Zero(),
+ TimeDelta::Zero(), TimeDelta::Zero(),
+ content_type_);
+ statistics_proxy_->OnRenderedFrame(meta);
+ time_controller_.AdvanceTime(kInterFrameDelay);
+ }
+ // Extra last frame.
+ statistics_proxy_->OnDecodedFrame(frame, kHighQp, TimeDelta::Zero(),
+ content_type_);
+ statistics_proxy_->OnRenderedFrame(MetaData(frame));
+
+ FlushAndUpdateHistograms(absl::nullopt, StreamDataCounters(), nullptr);
+ const int kExpectedTimeInHdPercents = 66;
+ if (videocontenttypehelpers::IsScreenshare(content_type_)) {
+ EXPECT_METRIC_EQ(
+ kExpectedTimeInHdPercents,
+ metrics::MinSample(
+ "WebRTC.Video.Screenshare.TimeInBlockyVideoPercentage"));
+ } else {
+ EXPECT_METRIC_EQ(
+ kExpectedTimeInHdPercents,
+ metrics::MinSample("WebRTC.Video.TimeInBlockyVideoPercentage"));
+ }
+}
+
+TEST_P(ReceiveStatisticsProxy2TestWithContent, DownscalesReported) {
+ // To ensure long enough call duration.
+ const TimeDelta kInterFrameDelay = TimeDelta::Seconds(2);
+
+ webrtc::VideoFrame frame_hd = CreateFrame(1280, 720);
+ webrtc::VideoFrame frame_sd = CreateFrame(640, 360);
+ webrtc::VideoFrame frame_ld = CreateFrame(320, 180);
+
+ // Call once to pass content type.
+ statistics_proxy_->OnDecodedFrame(frame_hd, absl::nullopt, TimeDelta::Zero(),
+ content_type_);
+
+ time_controller_.AdvanceTime(TimeDelta::Zero());
+ statistics_proxy_->OnRenderedFrame(MetaData(frame_hd));
+ time_controller_.AdvanceTime(kInterFrameDelay);
+ // Downscale.
+ statistics_proxy_->OnRenderedFrame(MetaData(frame_sd));
+ time_controller_.AdvanceTime(kInterFrameDelay);
+ // Downscale.
+ statistics_proxy_->OnRenderedFrame(MetaData(frame_ld));
+ time_controller_.AdvanceTime(kInterFrameDelay);
+ statistics_proxy_->UpdateHistograms(absl::nullopt, StreamDataCounters(),
+ nullptr);
+ const int kExpectedDownscales = 30; // 2 per 4 seconds = 30 per minute.
+ if (!videocontenttypehelpers::IsScreenshare(content_type_)) {
+ EXPECT_METRIC_EQ(kExpectedDownscales,
+ metrics::MinSample(
+ "WebRTC.Video.NumberResolutionDownswitchesPerMinute"));
+ }
+}
+
+TEST_P(ReceiveStatisticsProxy2TestWithContent, DecodeTimeReported) {
+ const TimeDelta kInterFrameDelay = TimeDelta::Millis(20);
+ const int kLowQp = 30;
+ const TimeDelta kDecodeTime = TimeDelta::Millis(7);
+
+ webrtc::VideoFrame frame = CreateFrame(kWidth, kHeight);
+
+ for (int i = 0; i < kMinRequiredSamples; ++i) {
+ statistics_proxy_->OnDecodedFrame(frame, kLowQp, kDecodeTime,
+ content_type_);
+ time_controller_.AdvanceTime(kInterFrameDelay);
+ }
+ FlushAndUpdateHistograms(absl::nullopt, StreamDataCounters(), nullptr);
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.DecodeTimeInMs", kDecodeTime.ms()));
+}
+
+TEST_P(ReceiveStatisticsProxy2TestWithContent,
+ StatsAreSlicedOnSimulcastAndExperiment) {
+ const uint8_t experiment_id = 1;
+ webrtc::VideoContentType content_type = content_type_;
+ videocontenttypehelpers::SetExperimentId(&content_type, experiment_id);
+ const TimeDelta kInterFrameDelay1 = TimeDelta::Millis(30);
+ const TimeDelta kInterFrameDelay2 = TimeDelta::Millis(50);
+ webrtc::VideoFrame frame = CreateFrame(kWidth, kHeight);
+
+ videocontenttypehelpers::SetSimulcastId(&content_type, 1);
+ for (int i = 0; i <= kMinRequiredSamples; ++i) {
+ time_controller_.AdvanceTime(kInterFrameDelay1);
+ statistics_proxy_->OnDecodedFrame(frame, absl::nullopt, TimeDelta::Zero(),
+ content_type);
+ }
+
+ videocontenttypehelpers::SetSimulcastId(&content_type, 2);
+ for (int i = 0; i <= kMinRequiredSamples; ++i) {
+ time_controller_.AdvanceTime(kInterFrameDelay2);
+ statistics_proxy_->OnDecodedFrame(frame, absl::nullopt, TimeDelta::Zero(),
+ content_type);
+ }
+ FlushAndUpdateHistograms(absl::nullopt, StreamDataCounters(), nullptr);
+
+ if (videocontenttypehelpers::IsScreenshare(content_type)) {
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.Screenshare.InterframeDelayInMs"));
+ EXPECT_METRIC_EQ(1, metrics::NumSamples(
+ "WebRTC.Video.Screenshare.InterframeDelayMaxInMs"));
+ EXPECT_METRIC_EQ(1, metrics::NumSamples(
+ "WebRTC.Video.Screenshare.InterframeDelayInMs.S0"));
+ EXPECT_METRIC_EQ(1,
+ metrics::NumSamples(
+ "WebRTC.Video.Screenshare.InterframeDelayMaxInMs.S0"));
+ EXPECT_METRIC_EQ(1, metrics::NumSamples(
+ "WebRTC.Video.Screenshare.InterframeDelayInMs.S1"));
+ EXPECT_METRIC_EQ(1,
+ metrics::NumSamples(
+ "WebRTC.Video.Screenshare.InterframeDelayMaxInMs.S1"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.Screenshare.InterframeDelayInMs"
+ ".ExperimentGroup0"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.Screenshare.InterframeDelayMaxInMs"
+ ".ExperimentGroup0"));
+ EXPECT_METRIC_EQ(
+ kInterFrameDelay1.ms(),
+ metrics::MinSample("WebRTC.Video.Screenshare.InterframeDelayInMs.S0"));
+ EXPECT_METRIC_EQ(
+ kInterFrameDelay2.ms(),
+ metrics::MinSample("WebRTC.Video.Screenshare.InterframeDelayInMs.S1"));
+ EXPECT_METRIC_EQ(
+ ((kInterFrameDelay1 + kInterFrameDelay2) / 2).ms(),
+ metrics::MinSample("WebRTC.Video.Screenshare.InterframeDelayInMs"));
+ EXPECT_METRIC_EQ(
+ kInterFrameDelay2.ms(),
+ metrics::MinSample("WebRTC.Video.Screenshare.InterframeDelayMaxInMs"));
+ EXPECT_METRIC_EQ(
+ ((kInterFrameDelay1 + kInterFrameDelay2) / 2).ms(),
+ metrics::MinSample(
+ "WebRTC.Video.Screenshare.InterframeDelayInMs.ExperimentGroup0"));
+ } else {
+ EXPECT_METRIC_EQ(1,
+ metrics::NumSamples("WebRTC.Video.InterframeDelayInMs"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.InterframeDelayMaxInMs"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.InterframeDelayInMs.S0"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.InterframeDelayMaxInMs.S0"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.InterframeDelayInMs.S1"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.InterframeDelayMaxInMs.S1"));
+ EXPECT_METRIC_EQ(1, metrics::NumSamples("WebRTC.Video.InterframeDelayInMs"
+ ".ExperimentGroup0"));
+ EXPECT_METRIC_EQ(1,
+ metrics::NumSamples("WebRTC.Video.InterframeDelayMaxInMs"
+ ".ExperimentGroup0"));
+ EXPECT_METRIC_EQ(kInterFrameDelay1.ms(),
+ metrics::MinSample("WebRTC.Video.InterframeDelayInMs.S0"));
+ EXPECT_METRIC_EQ(kInterFrameDelay2.ms(),
+ metrics::MinSample("WebRTC.Video.InterframeDelayInMs.S1"));
+ EXPECT_METRIC_EQ((kInterFrameDelay1 + kInterFrameDelay2).ms() / 2,
+ metrics::MinSample("WebRTC.Video.InterframeDelayInMs"));
+ EXPECT_METRIC_EQ(kInterFrameDelay2.ms(),
+ metrics::MinSample("WebRTC.Video.InterframeDelayMaxInMs"));
+ EXPECT_METRIC_EQ((kInterFrameDelay1 + kInterFrameDelay2).ms() / 2,
+ metrics::MinSample(
+ "WebRTC.Video.InterframeDelayInMs.ExperimentGroup0"));
+ }
+}
+
+} // namespace internal
+} // namespace webrtc
diff --git a/third_party/libwebrtc/video/render/BUILD.gn b/third_party/libwebrtc/video/render/BUILD.gn
new file mode 100644
index 0000000000..ff721dc61c
--- /dev/null
+++ b/third_party/libwebrtc/video/render/BUILD.gn
@@ -0,0 +1,51 @@
+# Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+import("../../webrtc.gni")
+
+rtc_library("incoming_video_stream") {
+ visibility = [ "*" ]
+
+ sources = [
+ "incoming_video_stream.cc",
+ "incoming_video_stream.h",
+ ]
+
+ deps = [
+ ":video_render_frames",
+ "../../api:sequence_checker",
+ "../../api/task_queue:task_queue",
+ "../../api/units:time_delta",
+ "../../api/video:video_frame",
+ "../../rtc_base:checks",
+ "../../rtc_base:event_tracer",
+ "../../rtc_base:macromagic",
+ "../../rtc_base:race_checker",
+ "../../rtc_base:rtc_task_queue",
+ ]
+
+ absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ]
+}
+
+rtc_library("video_render_frames") {
+ visibility = [ ":*" ] # Private.
+
+ sources = [
+ "video_render_frames.cc",
+ "video_render_frames.h",
+ ]
+
+ deps = [
+ "../../api/video:video_frame",
+ "../../rtc_base:checks",
+ "../../rtc_base:logging",
+ "../../rtc_base:timeutils",
+ "../../system_wrappers:metrics",
+ ]
+ absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ]
+}
diff --git a/third_party/libwebrtc/video/render/incoming_video_stream.cc b/third_party/libwebrtc/video/render/incoming_video_stream.cc
new file mode 100644
index 0000000000..e740c47bd0
--- /dev/null
+++ b/third_party/libwebrtc/video/render/incoming_video_stream.cc
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video/render/incoming_video_stream.h"
+
+#include <memory>
+#include <utility>
+
+#include "absl/types/optional.h"
+#include "api/units/time_delta.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/trace_event.h"
+#include "video/render/video_render_frames.h"
+
+namespace webrtc {
+
+IncomingVideoStream::IncomingVideoStream(
+ TaskQueueFactory* task_queue_factory,
+ int32_t delay_ms,
+ rtc::VideoSinkInterface<VideoFrame>* callback)
+ : render_buffers_(delay_ms),
+ callback_(callback),
+ incoming_render_queue_(task_queue_factory->CreateTaskQueue(
+ "IncomingVideoStream",
+ TaskQueueFactory::Priority::HIGH)) {}
+
+IncomingVideoStream::~IncomingVideoStream() {
+ RTC_DCHECK(main_thread_checker_.IsCurrent());
+}
+
+void IncomingVideoStream::OnFrame(const VideoFrame& video_frame) {
+ TRACE_EVENT0("webrtc", "IncomingVideoStream::OnFrame");
+ RTC_CHECK_RUNS_SERIALIZED(&decoder_race_checker_);
+ RTC_DCHECK(!incoming_render_queue_.IsCurrent());
+ // TODO(srte): Using video_frame = std::move(video_frame) would move the frame
+ // into the lambda instead of copying it, but it doesn't work unless we change
+ // OnFrame to take its frame argument by value instead of const reference.
+ incoming_render_queue_.PostTask([this, video_frame = video_frame]() mutable {
+ RTC_DCHECK_RUN_ON(&incoming_render_queue_);
+ if (render_buffers_.AddFrame(std::move(video_frame)) == 1)
+ Dequeue();
+ });
+}
+
+void IncomingVideoStream::Dequeue() {
+ TRACE_EVENT0("webrtc", "IncomingVideoStream::Dequeue");
+ RTC_DCHECK_RUN_ON(&incoming_render_queue_);
+ absl::optional<VideoFrame> frame_to_render = render_buffers_.FrameToRender();
+ if (frame_to_render)
+ callback_->OnFrame(*frame_to_render);
+
+ if (render_buffers_.HasPendingFrames()) {
+ uint32_t wait_time = render_buffers_.TimeToNextFrameRelease();
+ incoming_render_queue_.PostDelayedHighPrecisionTask(
+ [this]() { Dequeue(); }, TimeDelta::Millis(wait_time));
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/video/render/incoming_video_stream.h b/third_party/libwebrtc/video/render/incoming_video_stream.h
new file mode 100644
index 0000000000..4873ae7dcb
--- /dev/null
+++ b/third_party/libwebrtc/video/render/incoming_video_stream.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VIDEO_RENDER_INCOMING_VIDEO_STREAM_H_
+#define VIDEO_RENDER_INCOMING_VIDEO_STREAM_H_
+
+#include <stdint.h>
+
+#include "api/sequence_checker.h"
+#include "api/task_queue/task_queue_factory.h"
+#include "api/video/video_frame.h"
+#include "api/video/video_sink_interface.h"
+#include "rtc_base/race_checker.h"
+#include "rtc_base/task_queue.h"
+#include "rtc_base/thread_annotations.h"
+#include "video/render/video_render_frames.h"
+
+namespace webrtc {
+
+class IncomingVideoStream : public rtc::VideoSinkInterface<VideoFrame> {
+ public:
+ IncomingVideoStream(TaskQueueFactory* task_queue_factory,
+ int32_t delay_ms,
+ rtc::VideoSinkInterface<VideoFrame>* callback);
+ ~IncomingVideoStream() override;
+
+ private:
+ void OnFrame(const VideoFrame& video_frame) override;
+ void Dequeue();
+
+ SequenceChecker main_thread_checker_;
+ rtc::RaceChecker decoder_race_checker_;
+
+ VideoRenderFrames render_buffers_ RTC_GUARDED_BY(&incoming_render_queue_);
+ rtc::VideoSinkInterface<VideoFrame>* const callback_;
+ rtc::TaskQueue incoming_render_queue_;
+};
+
+} // namespace webrtc
+
+#endif // VIDEO_RENDER_INCOMING_VIDEO_STREAM_H_
diff --git a/third_party/libwebrtc/video/render/incoming_video_stream_gn/moz.build b/third_party/libwebrtc/video/render/incoming_video_stream_gn/moz.build
new file mode 100644
index 0000000000..c8b8614b04
--- /dev/null
+++ b/third_party/libwebrtc/video/render/incoming_video_stream_gn/moz.build
@@ -0,0 +1,225 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+DEFINES["WEBRTC_STRICT_FIELD_TRIALS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "!/third_party/libwebrtc/gen",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/video/render/incoming_video_stream.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "mips32":
+
+ DEFINES["MIPS32_LE"] = True
+ DEFINES["MIPS_FPU_LE"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "mips64":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("incoming_video_stream_gn")
diff --git a/third_party/libwebrtc/video/render/video_render_frames.cc b/third_party/libwebrtc/video/render/video_render_frames.cc
new file mode 100644
index 0000000000..ea1362abbb
--- /dev/null
+++ b/third_party/libwebrtc/video/render/video_render_frames.cc
@@ -0,0 +1,116 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video/render/video_render_frames.h"
+
+#include <type_traits>
+#include <utility>
+
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/time_utils.h"
+#include "system_wrappers/include/metrics.h"
+
+namespace webrtc {
+namespace {
+// Don't render frames with timestamp older than 500ms from now.
+const int kOldRenderTimestampMS = 500;
+// Don't render frames with timestamp more than 10s into the future.
+const int kFutureRenderTimestampMS = 10000;
+
+const uint32_t kEventMaxWaitTimeMs = 200;
+const uint32_t kMinRenderDelayMs = 10;
+const uint32_t kMaxRenderDelayMs = 500;
+const size_t kMaxIncomingFramesBeforeLogged = 100;
+
+uint32_t EnsureValidRenderDelay(uint32_t render_delay) {
+ return (render_delay < kMinRenderDelayMs || render_delay > kMaxRenderDelayMs)
+ ? kMinRenderDelayMs
+ : render_delay;
+}
+} // namespace
+
+VideoRenderFrames::VideoRenderFrames(uint32_t render_delay_ms)
+ : render_delay_ms_(EnsureValidRenderDelay(render_delay_ms)) {}
+
+VideoRenderFrames::~VideoRenderFrames() {
+ frames_dropped_ += incoming_frames_.size();
+ RTC_HISTOGRAM_COUNTS_1000("WebRTC.Video.DroppedFrames.RenderQueue",
+ frames_dropped_);
+ RTC_LOG(LS_INFO) << "WebRTC.Video.DroppedFrames.RenderQueue "
+ << frames_dropped_;
+}
+
+int32_t VideoRenderFrames::AddFrame(VideoFrame&& new_frame) {
+ const int64_t time_now = rtc::TimeMillis();
+
+ // Drop old frames only when there are other frames in the queue, otherwise, a
+ // really slow system never renders any frames.
+ if (!incoming_frames_.empty() &&
+ new_frame.render_time_ms() + kOldRenderTimestampMS < time_now) {
+ RTC_LOG(LS_WARNING) << "Too old frame, timestamp=" << new_frame.timestamp();
+ ++frames_dropped_;
+ return -1;
+ }
+
+ if (new_frame.render_time_ms() > time_now + kFutureRenderTimestampMS) {
+ RTC_LOG(LS_WARNING) << "Frame too long into the future, timestamp="
+ << new_frame.timestamp();
+ ++frames_dropped_;
+ return -1;
+ }
+
+ if (new_frame.render_time_ms() < last_render_time_ms_) {
+ RTC_LOG(LS_WARNING) << "Frame scheduled out of order, render_time="
+ << new_frame.render_time_ms()
+ << ", latest=" << last_render_time_ms_;
+ // For more details, see bug:
+ // https://bugs.chromium.org/p/webrtc/issues/detail?id=7253
+ ++frames_dropped_;
+ return -1;
+ }
+
+ last_render_time_ms_ = new_frame.render_time_ms();
+ incoming_frames_.emplace_back(std::move(new_frame));
+
+ if (incoming_frames_.size() > kMaxIncomingFramesBeforeLogged) {
+ RTC_LOG(LS_WARNING) << "Stored incoming frames: "
+ << incoming_frames_.size();
+ }
+ return static_cast<int32_t>(incoming_frames_.size());
+}
+
+absl::optional<VideoFrame> VideoRenderFrames::FrameToRender() {
+ absl::optional<VideoFrame> render_frame;
+ // Get the newest frame that can be released for rendering.
+ while (!incoming_frames_.empty() && TimeToNextFrameRelease() <= 0) {
+ if (render_frame) {
+ ++frames_dropped_;
+ }
+ render_frame = std::move(incoming_frames_.front());
+ incoming_frames_.pop_front();
+ }
+ return render_frame;
+}
+
+uint32_t VideoRenderFrames::TimeToNextFrameRelease() {
+ if (incoming_frames_.empty()) {
+ return kEventMaxWaitTimeMs;
+ }
+ const int64_t time_to_release = incoming_frames_.front().render_time_ms() -
+ render_delay_ms_ - rtc::TimeMillis();
+ return time_to_release < 0 ? 0u : static_cast<uint32_t>(time_to_release);
+}
+
+bool VideoRenderFrames::HasPendingFrames() const {
+ return !incoming_frames_.empty();
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/video/render/video_render_frames.h b/third_party/libwebrtc/video/render/video_render_frames.h
new file mode 100644
index 0000000000..7f48eae496
--- /dev/null
+++ b/third_party/libwebrtc/video/render/video_render_frames.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VIDEO_RENDER_VIDEO_RENDER_FRAMES_H_
+#define VIDEO_RENDER_VIDEO_RENDER_FRAMES_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <list>
+
+#include "absl/types/optional.h"
+#include "api/video/video_frame.h"
+
+namespace webrtc {
+
+// Class definitions
+class VideoRenderFrames {
+ public:
+ explicit VideoRenderFrames(uint32_t render_delay_ms);
+ VideoRenderFrames(const VideoRenderFrames&) = delete;
+ ~VideoRenderFrames();
+
+ // Add a frame to the render queue
+ int32_t AddFrame(VideoFrame&& new_frame);
+
+ // Get a frame for rendering, or false if it's not time to render.
+ absl::optional<VideoFrame> FrameToRender();
+
+ // Returns the number of ms to next frame to render
+ uint32_t TimeToNextFrameRelease();
+
+ bool HasPendingFrames() const;
+
+ private:
+ // Sorted list with framed to be rendered, oldest first.
+ std::list<VideoFrame> incoming_frames_;
+
+ // Estimated delay from a frame is released until it's rendered.
+ const uint32_t render_delay_ms_;
+
+ int64_t last_render_time_ms_ = 0;
+ size_t frames_dropped_ = 0;
+};
+
+} // namespace webrtc
+
+#endif // VIDEO_RENDER_VIDEO_RENDER_FRAMES_H_
diff --git a/third_party/libwebrtc/video/render/video_render_frames_gn/moz.build b/third_party/libwebrtc/video/render/video_render_frames_gn/moz.build
new file mode 100644
index 0000000000..951c654ef6
--- /dev/null
+++ b/third_party/libwebrtc/video/render/video_render_frames_gn/moz.build
@@ -0,0 +1,225 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+DEFINES["WEBRTC_STRICT_FIELD_TRIALS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "!/third_party/libwebrtc/gen",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/video/render/video_render_frames.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "mips32":
+
+ DEFINES["MIPS32_LE"] = True
+ DEFINES["MIPS_FPU_LE"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "mips64":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("video_render_frames_gn")
diff --git a/third_party/libwebrtc/video/report_block_stats.cc b/third_party/libwebrtc/video/report_block_stats.cc
new file mode 100644
index 0000000000..bf60364682
--- /dev/null
+++ b/third_party/libwebrtc/video/report_block_stats.cc
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video/report_block_stats.h"
+
+#include <algorithm>
+
+namespace webrtc {
+
+namespace {
+int FractionLost(uint32_t num_lost_sequence_numbers,
+ uint32_t num_sequence_numbers) {
+ if (num_sequence_numbers == 0) {
+ return 0;
+ }
+ return ((num_lost_sequence_numbers * 255) + (num_sequence_numbers / 2)) /
+ num_sequence_numbers;
+}
+} // namespace
+
+// Helper class for rtcp statistics.
+ReportBlockStats::ReportBlockStats()
+ : num_sequence_numbers_(0), num_lost_sequence_numbers_(0) {}
+
+ReportBlockStats::~ReportBlockStats() {}
+
+void ReportBlockStats::Store(uint32_t ssrc,
+ int packets_lost,
+ uint32_t extended_highest_sequence_number) {
+ Report report;
+ report.packets_lost = packets_lost;
+ report.extended_highest_sequence_number = extended_highest_sequence_number;
+
+ // Get diff with previous report block.
+ const auto prev_report = prev_reports_.find(ssrc);
+ if (prev_report != prev_reports_.end()) {
+ int seq_num_diff = report.extended_highest_sequence_number -
+ prev_report->second.extended_highest_sequence_number;
+ int cum_loss_diff = report.packets_lost - prev_report->second.packets_lost;
+ if (seq_num_diff >= 0 && cum_loss_diff >= 0) {
+ // Update total number of packets/lost packets.
+ num_sequence_numbers_ += seq_num_diff;
+ num_lost_sequence_numbers_ += cum_loss_diff;
+ }
+ }
+ // Store current report block.
+ prev_reports_[ssrc] = report;
+}
+
+int ReportBlockStats::FractionLostInPercent() const {
+ if (num_sequence_numbers_ == 0) {
+ return -1;
+ }
+ return FractionLost(num_lost_sequence_numbers_, num_sequence_numbers_) * 100 /
+ 255;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/video/report_block_stats.h b/third_party/libwebrtc/video/report_block_stats.h
new file mode 100644
index 0000000000..1d1140295c
--- /dev/null
+++ b/third_party/libwebrtc/video/report_block_stats.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VIDEO_REPORT_BLOCK_STATS_H_
+#define VIDEO_REPORT_BLOCK_STATS_H_
+
+#include <stdint.h>
+
+#include <map>
+
+namespace webrtc {
+
+// TODO(nisse): Usefulness of this class is somewhat unclear. The inputs are
+// cumulative counters, from which we compute deltas, and then accumulate the
+// deltas. May be needed on the send side, to handle wraparound in the short
+// counters received over RTCP, but should not be needed on the receive side
+// where we can use large enough types for all counters we need.
+
+// Helper class for rtcp statistics.
+class ReportBlockStats {
+ public:
+ ReportBlockStats();
+ ~ReportBlockStats();
+
+ // Updates stats and stores report block.
+ void Store(uint32_t ssrc,
+ int packets_lost,
+ uint32_t extended_highest_sequence_number);
+
+ // Returns the total fraction of lost packets (or -1 if less than two report
+ // blocks have been stored).
+ int FractionLostInPercent() const;
+
+ private:
+ // The information from an RTCP report block that we need.
+ struct Report {
+ uint32_t extended_highest_sequence_number;
+ int32_t packets_lost;
+ };
+
+ // The total number of packets/lost packets.
+ uint32_t num_sequence_numbers_;
+ uint32_t num_lost_sequence_numbers_;
+
+ // Map holding the last stored report (mapped by the source SSRC).
+ std::map<uint32_t, Report> prev_reports_;
+};
+
+} // namespace webrtc
+
+#endif // VIDEO_REPORT_BLOCK_STATS_H_
diff --git a/third_party/libwebrtc/video/report_block_stats_unittest.cc b/third_party/libwebrtc/video/report_block_stats_unittest.cc
new file mode 100644
index 0000000000..bd66e571a0
--- /dev/null
+++ b/third_party/libwebrtc/video/report_block_stats_unittest.cc
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video/report_block_stats.h"
+
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+
+constexpr uint32_t kSsrc1 = 123;
+constexpr uint32_t kSsrc2 = 234;
+
+TEST(ReportBlockStatsTest, StoreAndGetFractionLost) {
+ ReportBlockStats stats;
+ EXPECT_EQ(-1, stats.FractionLostInPercent());
+
+ // First report.
+ stats.Store(kSsrc1, /*packets_lost=*/10,
+ /*extended_highest_sequence_number=*/24'000);
+ EXPECT_EQ(-1, stats.FractionLostInPercent());
+ // fl: 100 * (15-10) / (24100-24000) = 5%
+ stats.Store(kSsrc1, /*packets_lost=*/15,
+ /*extended_highest_sequence_number=*/24'100);
+ EXPECT_EQ(5, stats.FractionLostInPercent());
+ // fl: 100 * (50-10) / (24200-24000) = 20%
+ stats.Store(kSsrc1, /*packets_lost=*/50,
+ /*extended_highest_sequence_number=*/24'200);
+ EXPECT_EQ(20, stats.FractionLostInPercent());
+}
+
+TEST(ReportBlockStatsTest, StoreAndGetFractionLost_TwoSsrcs) {
+ ReportBlockStats stats;
+ EXPECT_EQ(-1, stats.FractionLostInPercent());
+
+ // First report.
+ stats.Store(kSsrc1, /*packets_lost=*/10,
+ /*extended_highest_sequence_number=*/24'000);
+ EXPECT_EQ(-1, stats.FractionLostInPercent());
+ // fl: 100 * (15-10) / (24100-24000) = 5%
+ stats.Store(kSsrc1, /*packets_lost=*/15,
+ /*extended_highest_sequence_number=*/24'100);
+ EXPECT_EQ(5, stats.FractionLostInPercent());
+
+ // First report, kSsrc2.
+ stats.Store(kSsrc2, /*packets_lost=*/111,
+ /*extended_highest_sequence_number=*/8'500);
+ EXPECT_EQ(5, stats.FractionLostInPercent());
+ // fl: 100 * ((15-10) + (136-111)) / ((24100-24000) + (8800-8500)) = 7%
+ stats.Store(kSsrc2, /*packets_lost=*/136,
+ /*extended_highest_sequence_number=*/8'800);
+ EXPECT_EQ(7, stats.FractionLostInPercent());
+}
+
+} // namespace
+} // namespace webrtc
diff --git a/third_party/libwebrtc/video/rtp_streams_synchronizer2.cc b/third_party/libwebrtc/video/rtp_streams_synchronizer2.cc
new file mode 100644
index 0000000000..0fbb3916cb
--- /dev/null
+++ b/third_party/libwebrtc/video/rtp_streams_synchronizer2.cc
@@ -0,0 +1,219 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video/rtp_streams_synchronizer2.h"
+
+#include "absl/types/optional.h"
+#include "call/syncable.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/time_utils.h"
+#include "rtc_base/trace_event.h"
+#include "system_wrappers/include/rtp_to_ntp_estimator.h"
+
+namespace webrtc {
+namespace internal {
+namespace {
+// Time interval for logging stats.
+constexpr int64_t kStatsLogIntervalMs = 10000;
+constexpr TimeDelta kSyncInterval = TimeDelta::Millis(1000);
+
+bool UpdateMeasurements(StreamSynchronization::Measurements* stream,
+ const Syncable::Info& info) {
+ stream->latest_timestamp = info.latest_received_capture_timestamp;
+ stream->latest_receive_time_ms = info.latest_receive_time_ms;
+ return stream->rtp_to_ntp.UpdateMeasurements(
+ NtpTime(info.capture_time_ntp_secs, info.capture_time_ntp_frac),
+ info.capture_time_source_clock) !=
+ RtpToNtpEstimator::kInvalidMeasurement;
+}
+
+} // namespace
+
+RtpStreamsSynchronizer::RtpStreamsSynchronizer(TaskQueueBase* main_queue,
+ Syncable* syncable_video)
+ : task_queue_(main_queue),
+ syncable_video_(syncable_video),
+ last_stats_log_ms_(rtc::TimeMillis()) {
+ RTC_DCHECK(syncable_video);
+}
+
+RtpStreamsSynchronizer::~RtpStreamsSynchronizer() {
+ RTC_DCHECK_RUN_ON(&main_checker_);
+ repeating_task_.Stop();
+}
+
+void RtpStreamsSynchronizer::ConfigureSync(Syncable* syncable_audio) {
+ RTC_DCHECK_RUN_ON(&main_checker_);
+
+ // Prevent expensive no-ops.
+ if (syncable_audio == syncable_audio_)
+ return;
+
+ syncable_audio_ = syncable_audio;
+ sync_.reset(nullptr);
+ if (!syncable_audio_) {
+ repeating_task_.Stop();
+ return;
+ }
+
+ sync_.reset(
+ new StreamSynchronization(syncable_video_->id(), syncable_audio_->id()));
+
+ if (repeating_task_.Running())
+ return;
+
+ repeating_task_ =
+ RepeatingTaskHandle::DelayedStart(task_queue_, kSyncInterval, [this]() {
+ UpdateDelay();
+ return kSyncInterval;
+ });
+}
+
+void RtpStreamsSynchronizer::UpdateDelay() {
+ RTC_DCHECK_RUN_ON(&main_checker_);
+
+ if (!syncable_audio_)
+ return;
+
+ RTC_DCHECK(sync_.get());
+
+ bool log_stats = false;
+ const int64_t now_ms = rtc::TimeMillis();
+ if (now_ms - last_stats_log_ms_ > kStatsLogIntervalMs) {
+ last_stats_log_ms_ = now_ms;
+ log_stats = true;
+ }
+
+ int64_t last_audio_receive_time_ms =
+ audio_measurement_.latest_receive_time_ms;
+ absl::optional<Syncable::Info> audio_info = syncable_audio_->GetInfo();
+ if (!audio_info || !UpdateMeasurements(&audio_measurement_, *audio_info)) {
+ return;
+ }
+
+ if (last_audio_receive_time_ms == audio_measurement_.latest_receive_time_ms) {
+ // No new audio packet has been received since last update.
+ return;
+ }
+
+ int64_t last_video_receive_ms = video_measurement_.latest_receive_time_ms;
+ absl::optional<Syncable::Info> video_info = syncable_video_->GetInfo();
+ if (!video_info || !UpdateMeasurements(&video_measurement_, *video_info)) {
+ return;
+ }
+
+ if (last_video_receive_ms == video_measurement_.latest_receive_time_ms) {
+ // No new video packet has been received since last update.
+ return;
+ }
+
+ int relative_delay_ms;
+ // Calculate how much later or earlier the audio stream is compared to video.
+ if (!sync_->ComputeRelativeDelay(audio_measurement_, video_measurement_,
+ &relative_delay_ms)) {
+ return;
+ }
+
+ if (log_stats) {
+ RTC_LOG(LS_INFO) << "Sync info stats: " << now_ms
+ << ", {ssrc: " << sync_->audio_stream_id() << ", "
+ << "cur_delay_ms: " << audio_info->current_delay_ms
+ << "} {ssrc: " << sync_->video_stream_id() << ", "
+ << "cur_delay_ms: " << video_info->current_delay_ms
+ << "} {relative_delay_ms: " << relative_delay_ms << "} ";
+ }
+
+ TRACE_COUNTER1("webrtc", "SyncCurrentVideoDelay",
+ video_info->current_delay_ms);
+ TRACE_COUNTER1("webrtc", "SyncCurrentAudioDelay",
+ audio_info->current_delay_ms);
+ TRACE_COUNTER1("webrtc", "SyncRelativeDelay", relative_delay_ms);
+
+ int target_audio_delay_ms = 0;
+ int target_video_delay_ms = video_info->current_delay_ms;
+ // Calculate the necessary extra audio delay and desired total video
+ // delay to get the streams in sync.
+ if (!sync_->ComputeDelays(relative_delay_ms, audio_info->current_delay_ms,
+ &target_audio_delay_ms, &target_video_delay_ms)) {
+ return;
+ }
+
+ if (log_stats) {
+ RTC_LOG(LS_INFO) << "Sync delay stats: " << now_ms
+ << ", {ssrc: " << sync_->audio_stream_id() << ", "
+ << "target_delay_ms: " << target_audio_delay_ms
+ << "} {ssrc: " << sync_->video_stream_id() << ", "
+ << "target_delay_ms: " << target_video_delay_ms << "} ";
+ }
+
+ if (!syncable_audio_->SetMinimumPlayoutDelay(target_audio_delay_ms)) {
+ sync_->ReduceAudioDelay();
+ }
+ if (!syncable_video_->SetMinimumPlayoutDelay(target_video_delay_ms)) {
+ sync_->ReduceVideoDelay();
+ }
+}
+
+// TODO(https://bugs.webrtc.org/7065): Move RtpToNtpEstimator out of
+// RtpStreamsSynchronizer and into respective receive stream to always populate
+// the estimated playout timestamp.
+bool RtpStreamsSynchronizer::GetStreamSyncOffsetInMs(
+ uint32_t rtp_timestamp,
+ int64_t render_time_ms,
+ int64_t* video_playout_ntp_ms,
+ int64_t* stream_offset_ms,
+ double* estimated_freq_khz) const {
+ RTC_DCHECK_RUN_ON(&main_checker_);
+
+ if (!syncable_audio_)
+ return false;
+
+ uint32_t audio_rtp_timestamp;
+ int64_t time_ms;
+ if (!syncable_audio_->GetPlayoutRtpTimestamp(&audio_rtp_timestamp,
+ &time_ms)) {
+ return false;
+ }
+
+ NtpTime latest_audio_ntp =
+ audio_measurement_.rtp_to_ntp.Estimate(audio_rtp_timestamp);
+ if (!latest_audio_ntp.Valid()) {
+ return false;
+ }
+ int64_t latest_audio_ntp_ms = latest_audio_ntp.ToMs();
+
+ syncable_audio_->SetEstimatedPlayoutNtpTimestampMs(latest_audio_ntp_ms,
+ time_ms);
+
+ NtpTime latest_video_ntp =
+ video_measurement_.rtp_to_ntp.Estimate(rtp_timestamp);
+ if (!latest_video_ntp.Valid()) {
+ return false;
+ }
+ int64_t latest_video_ntp_ms = latest_video_ntp.ToMs();
+
+ // Current audio ntp.
+ int64_t now_ms = rtc::TimeMillis();
+ latest_audio_ntp_ms += (now_ms - time_ms);
+
+ // Remove video playout delay.
+ int64_t time_to_render_ms = render_time_ms - now_ms;
+ if (time_to_render_ms > 0)
+ latest_video_ntp_ms -= time_to_render_ms;
+
+ *video_playout_ntp_ms = latest_video_ntp_ms;
+ *stream_offset_ms = latest_audio_ntp_ms - latest_video_ntp_ms;
+ *estimated_freq_khz = video_measurement_.rtp_to_ntp.EstimatedFrequencyKhz();
+ return true;
+}
+
+} // namespace internal
+} // namespace webrtc
diff --git a/third_party/libwebrtc/video/rtp_streams_synchronizer2.h b/third_party/libwebrtc/video/rtp_streams_synchronizer2.h
new file mode 100644
index 0000000000..7042b1bd9a
--- /dev/null
+++ b/third_party/libwebrtc/video/rtp_streams_synchronizer2.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VIDEO_RTP_STREAMS_SYNCHRONIZER2_H_
+#define VIDEO_RTP_STREAMS_SYNCHRONIZER2_H_
+
+#include <memory>
+
+#include "api/sequence_checker.h"
+#include "api/task_queue/task_queue_base.h"
+#include "rtc_base/system/no_unique_address.h"
+#include "rtc_base/task_utils/repeating_task.h"
+#include "video/stream_synchronization.h"
+
+namespace webrtc {
+
+class Syncable;
+
+namespace internal {
+
+// RtpStreamsSynchronizer is responsible for synchronizing audio and video for
+// a given audio receive stream and video receive stream.
+class RtpStreamsSynchronizer {
+ public:
+ RtpStreamsSynchronizer(TaskQueueBase* main_queue, Syncable* syncable_video);
+ ~RtpStreamsSynchronizer();
+
+ void ConfigureSync(Syncable* syncable_audio);
+
+ // Gets the estimated playout NTP timestamp for the video frame with
+ // `rtp_timestamp` and the sync offset between the current played out audio
+ // frame and the video frame. Returns true on success, false otherwise.
+ // The `estimated_freq_khz` is the frequency used in the RTP to NTP timestamp
+ // conversion.
+ bool GetStreamSyncOffsetInMs(uint32_t rtp_timestamp,
+ int64_t render_time_ms,
+ int64_t* video_playout_ntp_ms,
+ int64_t* stream_offset_ms,
+ double* estimated_freq_khz) const;
+
+ private:
+ void UpdateDelay();
+
+ TaskQueueBase* const task_queue_;
+
+ // Used to check if we're running on the main thread/task queue.
+ // The reason we currently don't use RTC_DCHECK_RUN_ON(task_queue_) is because
+ // we might be running on an rtc::Thread implementation of TaskQueue, which
+ // does not consistently set itself as the active TaskQueue.
+ // Instead, we rely on a SequenceChecker for now.
+ RTC_NO_UNIQUE_ADDRESS SequenceChecker main_checker_;
+
+ Syncable* const syncable_video_;
+
+ Syncable* syncable_audio_ RTC_GUARDED_BY(main_checker_) = nullptr;
+ std::unique_ptr<StreamSynchronization> sync_ RTC_GUARDED_BY(main_checker_);
+ StreamSynchronization::Measurements audio_measurement_
+ RTC_GUARDED_BY(main_checker_);
+ StreamSynchronization::Measurements video_measurement_
+ RTC_GUARDED_BY(main_checker_);
+ RepeatingTaskHandle repeating_task_ RTC_GUARDED_BY(main_checker_);
+ int64_t last_stats_log_ms_ RTC_GUARDED_BY(&main_checker_);
+};
+
+} // namespace internal
+} // namespace webrtc
+
+#endif // VIDEO_RTP_STREAMS_SYNCHRONIZER2_H_
diff --git a/third_party/libwebrtc/video/rtp_video_stream_receiver2.cc b/third_party/libwebrtc/video/rtp_video_stream_receiver2.cc
new file mode 100644
index 0000000000..8055ac0e0f
--- /dev/null
+++ b/third_party/libwebrtc/video/rtp_video_stream_receiver2.cc
@@ -0,0 +1,1317 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video/rtp_video_stream_receiver2.h"
+
+#include <algorithm>
+#include <limits>
+#include <memory>
+#include <utility>
+#include <vector>
+
+#include "absl/algorithm/container.h"
+#include "absl/memory/memory.h"
+#include "absl/types/optional.h"
+#include "api/video/video_codec_type.h"
+#include "media/base/media_constants.h"
+#include "modules/pacing/packet_router.h"
+#include "modules/remote_bitrate_estimator/include/remote_bitrate_estimator.h"
+#include "modules/rtp_rtcp/include/receive_statistics.h"
+#include "modules/rtp_rtcp/include/rtp_cvo.h"
+#include "modules/rtp_rtcp/source/create_video_rtp_depacketizer.h"
+#include "modules/rtp_rtcp/source/rtp_dependency_descriptor_extension.h"
+#include "modules/rtp_rtcp/source/rtp_format.h"
+#include "modules/rtp_rtcp/source/rtp_generic_frame_descriptor.h"
+#include "modules/rtp_rtcp/source/rtp_generic_frame_descriptor_extension.h"
+#include "modules/rtp_rtcp/source/rtp_header_extensions.h"
+#include "modules/rtp_rtcp/source/rtp_packet_received.h"
+#include "modules/rtp_rtcp/source/rtp_rtcp_config.h"
+#include "modules/rtp_rtcp/source/ulpfec_receiver.h"
+#include "modules/rtp_rtcp/source/video_rtp_depacketizer.h"
+#include "modules/rtp_rtcp/source/video_rtp_depacketizer_raw.h"
+#include "modules/video_coding/frame_object.h"
+#include "modules/video_coding/h264_sprop_parameter_sets.h"
+#include "modules/video_coding/h264_sps_pps_tracker.h"
+#include "modules/video_coding/nack_requester.h"
+#include "modules/video_coding/packet_buffer.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/strings/string_builder.h"
+#include "rtc_base/trace_event.h"
+#include "system_wrappers/include/metrics.h"
+#include "system_wrappers/include/ntp_time.h"
+
+namespace webrtc {
+
+namespace {
+// TODO(philipel): Change kPacketBufferStartSize back to 32 in M63 see:
+// crbug.com/752886
+constexpr int kPacketBufferStartSize = 512;
+constexpr int kPacketBufferMaxSize = 2048;
+
+constexpr int kMaxPacketAgeToNack = 450;
+
+int PacketBufferMaxSize(const FieldTrialsView& field_trials) {
+ // The group here must be a positive power of 2, in which case that is used as
+ // size. All other values shall result in the default value being used.
+ const std::string group_name =
+ field_trials.Lookup("WebRTC-PacketBufferMaxSize");
+ int packet_buffer_max_size = kPacketBufferMaxSize;
+ if (!group_name.empty() &&
+ (sscanf(group_name.c_str(), "%d", &packet_buffer_max_size) != 1 ||
+ packet_buffer_max_size <= 0 ||
+ // Verify that the number is a positive power of 2.
+ (packet_buffer_max_size & (packet_buffer_max_size - 1)) != 0)) {
+ RTC_LOG(LS_WARNING) << "Invalid packet buffer max size: " << group_name;
+ packet_buffer_max_size = kPacketBufferMaxSize;
+ }
+ return packet_buffer_max_size;
+}
+
+std::unique_ptr<ModuleRtpRtcpImpl2> CreateRtpRtcpModule(
+ Clock* clock,
+ ReceiveStatistics* receive_statistics,
+ Transport* outgoing_transport,
+ RtcpRttStats* rtt_stats,
+ RtcpPacketTypeCounterObserver* rtcp_packet_type_counter_observer,
+ RtcpCnameCallback* rtcp_cname_callback,
+ bool non_sender_rtt_measurement,
+ uint32_t local_ssrc,
+ RtcEventLog* rtc_event_log,
+ RtcpEventObserver* rtcp_event_observer) {
+ RtpRtcpInterface::Configuration configuration;
+ configuration.clock = clock;
+ configuration.audio = false;
+ configuration.receiver_only = true;
+ configuration.receive_statistics = receive_statistics;
+ configuration.outgoing_transport = outgoing_transport;
+ configuration.rtt_stats = rtt_stats;
+ configuration.rtcp_packet_type_counter_observer =
+ rtcp_packet_type_counter_observer;
+ configuration.rtcp_cname_callback = rtcp_cname_callback;
+ configuration.local_media_ssrc = local_ssrc;
+ configuration.rtcp_event_observer = rtcp_event_observer;
+ configuration.non_sender_rtt_measurement = non_sender_rtt_measurement;
+ configuration.event_log = rtc_event_log;
+
+ std::unique_ptr<ModuleRtpRtcpImpl2> rtp_rtcp =
+ ModuleRtpRtcpImpl2::Create(configuration);
+ rtp_rtcp->SetRTCPStatus(RtcpMode::kCompound);
+
+ return rtp_rtcp;
+}
+
+std::unique_ptr<NackRequester> MaybeConstructNackModule(
+ TaskQueueBase* current_queue,
+ NackPeriodicProcessor* nack_periodic_processor,
+ const NackConfig& nack,
+ Clock* clock,
+ NackSender* nack_sender,
+ KeyFrameRequestSender* keyframe_request_sender,
+ const FieldTrialsView& field_trials) {
+ if (nack.rtp_history_ms == 0)
+ return nullptr;
+
+ // TODO(bugs.webrtc.org/12420): pass rtp_history_ms to the nack module.
+ return std::make_unique<NackRequester>(current_queue, nack_periodic_processor,
+ clock, nack_sender,
+ keyframe_request_sender, field_trials);
+}
+
+std::unique_ptr<UlpfecReceiver> MaybeConstructUlpfecReceiver(
+ uint32_t remote_ssrc,
+ int red_payload_type,
+ int ulpfec_payload_type,
+ RecoveredPacketReceiver* callback,
+ Clock* clock) {
+ RTC_DCHECK_GE(red_payload_type, -1);
+ RTC_DCHECK_GE(ulpfec_payload_type, -1);
+ if (red_payload_type == -1)
+ return nullptr;
+
+ // TODO(tommi, brandtr): Consider including this check too once
+ // `UlpfecReceiver` has been updated to not consider both red and ulpfec
+ // payload ids.
+ // if (ulpfec_payload_type == -1)
+ // return nullptr;
+
+ return std::make_unique<UlpfecReceiver>(remote_ssrc, ulpfec_payload_type,
+ callback, clock);
+}
+
+static const int kPacketLogIntervalMs = 10000;
+
+} // namespace
+
+RtpVideoStreamReceiver2::RtcpFeedbackBuffer::RtcpFeedbackBuffer(
+ KeyFrameRequestSender* key_frame_request_sender,
+ NackSender* nack_sender,
+ LossNotificationSender* loss_notification_sender)
+ : key_frame_request_sender_(key_frame_request_sender),
+ nack_sender_(nack_sender),
+ loss_notification_sender_(loss_notification_sender),
+ request_key_frame_(false) {
+ RTC_DCHECK(key_frame_request_sender_);
+ RTC_DCHECK(nack_sender_);
+ RTC_DCHECK(loss_notification_sender_);
+ packet_sequence_checker_.Detach();
+}
+
+void RtpVideoStreamReceiver2::RtcpFeedbackBuffer::RequestKeyFrame() {
+ RTC_DCHECK_RUN_ON(&packet_sequence_checker_);
+ request_key_frame_ = true;
+}
+
+void RtpVideoStreamReceiver2::RtcpFeedbackBuffer::SendNack(
+ const std::vector<uint16_t>& sequence_numbers,
+ bool buffering_allowed) {
+ RTC_DCHECK_RUN_ON(&packet_sequence_checker_);
+ RTC_DCHECK(!sequence_numbers.empty());
+ nack_sequence_numbers_.insert(nack_sequence_numbers_.end(),
+ sequence_numbers.cbegin(),
+ sequence_numbers.cend());
+ if (!buffering_allowed) {
+ // Note that while *buffering* is not allowed, *batching* is, meaning that
+ // previously buffered messages may be sent along with the current message.
+ SendBufferedRtcpFeedback();
+ }
+}
+
+void RtpVideoStreamReceiver2::RtcpFeedbackBuffer::SendLossNotification(
+ uint16_t last_decoded_seq_num,
+ uint16_t last_received_seq_num,
+ bool decodability_flag,
+ bool buffering_allowed) {
+ RTC_DCHECK_RUN_ON(&packet_sequence_checker_);
+ RTC_DCHECK(buffering_allowed);
+ RTC_DCHECK(!lntf_state_)
+ << "SendLossNotification() called twice in a row with no call to "
+ "SendBufferedRtcpFeedback() in between.";
+ lntf_state_ = absl::make_optional<LossNotificationState>(
+ last_decoded_seq_num, last_received_seq_num, decodability_flag);
+}
+
+void RtpVideoStreamReceiver2::RtcpFeedbackBuffer::SendBufferedRtcpFeedback() {
+ RTC_DCHECK_RUN_ON(&packet_sequence_checker_);
+
+ bool request_key_frame = false;
+ std::vector<uint16_t> nack_sequence_numbers;
+ absl::optional<LossNotificationState> lntf_state;
+
+ std::swap(request_key_frame, request_key_frame_);
+ std::swap(nack_sequence_numbers, nack_sequence_numbers_);
+ std::swap(lntf_state, lntf_state_);
+
+ if (lntf_state) {
+ // If either a NACK or a key frame request is sent, we should buffer
+ // the LNTF and wait for them (NACK or key frame request) to trigger
+ // the compound feedback message.
+ // Otherwise, the LNTF should be sent out immediately.
+ const bool buffering_allowed =
+ request_key_frame || !nack_sequence_numbers.empty();
+
+ loss_notification_sender_->SendLossNotification(
+ lntf_state->last_decoded_seq_num, lntf_state->last_received_seq_num,
+ lntf_state->decodability_flag, buffering_allowed);
+ }
+
+ if (request_key_frame) {
+ key_frame_request_sender_->RequestKeyFrame();
+ } else if (!nack_sequence_numbers.empty()) {
+ nack_sender_->SendNack(nack_sequence_numbers, true);
+ }
+}
+
+void RtpVideoStreamReceiver2::RtcpFeedbackBuffer::ClearLossNotificationState() {
+ RTC_DCHECK_RUN_ON(&packet_sequence_checker_);
+ lntf_state_.reset();
+}
+
+RtpVideoStreamReceiver2::RtpVideoStreamReceiver2(
+ TaskQueueBase* current_queue,
+ Clock* clock,
+ Transport* transport,
+ RtcpRttStats* rtt_stats,
+ PacketRouter* packet_router,
+ const VideoReceiveStreamInterface::Config* config,
+ ReceiveStatistics* rtp_receive_statistics,
+ RtcpPacketTypeCounterObserver* rtcp_packet_type_counter_observer,
+ RtcpCnameCallback* rtcp_cname_callback,
+ NackPeriodicProcessor* nack_periodic_processor,
+ VCMReceiveStatisticsCallback* vcm_receive_statistics,
+ OnCompleteFrameCallback* complete_frame_callback,
+ rtc::scoped_refptr<FrameDecryptorInterface> frame_decryptor,
+ rtc::scoped_refptr<FrameTransformerInterface> frame_transformer,
+ const FieldTrialsView& field_trials,
+ RtcEventLog* event_log)
+ : field_trials_(field_trials),
+ worker_queue_(current_queue),
+ clock_(clock),
+ config_(*config),
+ packet_router_(packet_router),
+ ntp_estimator_(clock),
+ rtp_header_extensions_(config_.rtp.extensions),
+ forced_playout_delay_max_ms_("max_ms", absl::nullopt),
+ forced_playout_delay_min_ms_("min_ms", absl::nullopt),
+ rtp_receive_statistics_(rtp_receive_statistics),
+ ulpfec_receiver_(
+ MaybeConstructUlpfecReceiver(config->rtp.remote_ssrc,
+ config->rtp.red_payload_type,
+ config->rtp.ulpfec_payload_type,
+ this,
+ clock_)),
+ red_payload_type_(config_.rtp.red_payload_type),
+ packet_sink_(config->rtp.packet_sink_),
+ receiving_(false),
+ last_packet_log_ms_(-1),
+ rtp_rtcp_(CreateRtpRtcpModule(
+ clock,
+ rtp_receive_statistics_,
+ transport,
+ rtt_stats,
+ rtcp_packet_type_counter_observer,
+ rtcp_cname_callback,
+ config_.rtp.rtcp_xr.receiver_reference_time_report,
+ config_.rtp.local_ssrc,
+ event_log,
+ config_.rtp.rtcp_event_observer)),
+ nack_periodic_processor_(nack_periodic_processor),
+ complete_frame_callback_(complete_frame_callback),
+ keyframe_request_method_(config_.rtp.keyframe_method),
+ // TODO(bugs.webrtc.org/10336): Let `rtcp_feedback_buffer_` communicate
+ // directly with `rtp_rtcp_`.
+ rtcp_feedback_buffer_(this, this, this),
+ nack_module_(MaybeConstructNackModule(current_queue,
+ nack_periodic_processor,
+ config_.rtp.nack,
+ clock_,
+ &rtcp_feedback_buffer_,
+ &rtcp_feedback_buffer_,
+ field_trials_)),
+ vcm_receive_statistics_(vcm_receive_statistics),
+ packet_buffer_(kPacketBufferStartSize,
+ PacketBufferMaxSize(field_trials_)),
+ reference_finder_(std::make_unique<RtpFrameReferenceFinder>()),
+ has_received_frame_(false),
+ frames_decryptable_(false),
+ absolute_capture_time_interpolator_(clock) {
+ packet_sequence_checker_.Detach();
+ if (packet_router_)
+ packet_router_->AddReceiveRtpModule(rtp_rtcp_.get(), config_.rtp.remb);
+
+ RTC_DCHECK(config_.rtp.rtcp_mode != RtcpMode::kOff)
+ << "A stream should not be configured with RTCP disabled. This value is "
+ "reserved for internal usage.";
+ // TODO(pbos): What's an appropriate local_ssrc for receive-only streams?
+ RTC_DCHECK(config_.rtp.local_ssrc != 0);
+ RTC_DCHECK(config_.rtp.remote_ssrc != config_.rtp.local_ssrc);
+
+ rtp_rtcp_->SetRTCPStatus(config_.rtp.rtcp_mode);
+ rtp_rtcp_->SetRemoteSSRC(config_.rtp.remote_ssrc);
+
+ if (config_.rtp.nack.rtp_history_ms > 0) {
+ rtp_receive_statistics_->SetMaxReorderingThreshold(config_.rtp.remote_ssrc,
+ kMaxPacketAgeToNack);
+ }
+ ParseFieldTrial(
+ {&forced_playout_delay_max_ms_, &forced_playout_delay_min_ms_},
+ field_trials_.Lookup("WebRTC-ForcePlayoutDelay"));
+
+ if (config_.rtp.lntf.enabled) {
+ loss_notification_controller_ =
+ std::make_unique<LossNotificationController>(&rtcp_feedback_buffer_,
+ &rtcp_feedback_buffer_);
+ }
+
+ // Only construct the encrypted receiver if frame encryption is enabled.
+ if (config_.crypto_options.sframe.require_frame_encryption) {
+ buffered_frame_decryptor_ =
+ std::make_unique<BufferedFrameDecryptor>(this, this, field_trials_);
+ if (frame_decryptor != nullptr) {
+ buffered_frame_decryptor_->SetFrameDecryptor(std::move(frame_decryptor));
+ }
+ }
+
+ if (frame_transformer) {
+ frame_transformer_delegate_ =
+ rtc::make_ref_counted<RtpVideoStreamReceiverFrameTransformerDelegate>(
+ this, std::move(frame_transformer), rtc::Thread::Current(),
+ config_.rtp.remote_ssrc);
+ frame_transformer_delegate_->Init();
+ }
+}
+
+RtpVideoStreamReceiver2::~RtpVideoStreamReceiver2() {
+ if (packet_router_)
+ packet_router_->RemoveReceiveRtpModule(rtp_rtcp_.get());
+ ulpfec_receiver_.reset();
+ if (frame_transformer_delegate_)
+ frame_transformer_delegate_->Reset();
+}
+
+void RtpVideoStreamReceiver2::AddReceiveCodec(
+ uint8_t payload_type,
+ VideoCodecType video_codec,
+ const std::map<std::string, std::string>& codec_params,
+ bool raw_payload) {
+ RTC_DCHECK_RUN_ON(&packet_sequence_checker_);
+ if (codec_params.count(cricket::kH264FmtpSpsPpsIdrInKeyframe) > 0 ||
+ field_trials_.IsEnabled("WebRTC-SpsPpsIdrIsH264Keyframe")) {
+ packet_buffer_.ForceSpsPpsIdrIsH264Keyframe();
+ }
+ payload_type_map_.emplace(
+ payload_type, raw_payload ? std::make_unique<VideoRtpDepacketizerRaw>()
+ : CreateVideoRtpDepacketizer(video_codec));
+ pt_codec_params_.emplace(payload_type, codec_params);
+}
+
+void RtpVideoStreamReceiver2::RemoveReceiveCodec(uint8_t payload_type) {
+ RTC_DCHECK_RUN_ON(&packet_sequence_checker_);
+ auto codec_params_it = pt_codec_params_.find(payload_type);
+ if (codec_params_it == pt_codec_params_.end())
+ return;
+
+ const bool sps_pps_idr_in_key_frame =
+ codec_params_it->second.count(cricket::kH264FmtpSpsPpsIdrInKeyframe) > 0;
+
+ pt_codec_params_.erase(codec_params_it);
+ payload_type_map_.erase(payload_type);
+
+ if (sps_pps_idr_in_key_frame) {
+ bool reset_setting = true;
+ for (auto& [unused, codec_params] : pt_codec_params_) {
+ if (codec_params.count(cricket::kH264FmtpSpsPpsIdrInKeyframe) > 0) {
+ reset_setting = false;
+ break;
+ }
+ }
+
+ if (reset_setting) {
+ packet_buffer_.ResetSpsPpsIdrIsH264Keyframe();
+ }
+ }
+}
+
+void RtpVideoStreamReceiver2::RemoveReceiveCodecs() {
+ RTC_DCHECK_RUN_ON(&packet_sequence_checker_);
+
+ pt_codec_params_.clear();
+ payload_type_map_.clear();
+ packet_buffer_.ResetSpsPpsIdrIsH264Keyframe();
+}
+
+absl::optional<Syncable::Info> RtpVideoStreamReceiver2::GetSyncInfo() const {
+ RTC_DCHECK_RUN_ON(&packet_sequence_checker_);
+ Syncable::Info info;
+ if (rtp_rtcp_->RemoteNTP(&info.capture_time_ntp_secs,
+ &info.capture_time_ntp_frac,
+ /*rtcp_arrival_time_secs=*/nullptr,
+ /*rtcp_arrival_time_frac=*/nullptr,
+ &info.capture_time_source_clock) != 0) {
+ return absl::nullopt;
+ }
+
+ if (!last_received_rtp_timestamp_ || !last_received_rtp_system_time_) {
+ return absl::nullopt;
+ }
+ info.latest_received_capture_timestamp = *last_received_rtp_timestamp_;
+ info.latest_receive_time_ms = last_received_rtp_system_time_->ms();
+
+ // Leaves info.current_delay_ms uninitialized.
+ return info;
+}
+
+RtpVideoStreamReceiver2::ParseGenericDependenciesResult
+RtpVideoStreamReceiver2::ParseGenericDependenciesExtension(
+ const RtpPacketReceived& rtp_packet,
+ RTPVideoHeader* video_header) {
+ RTC_DCHECK_RUN_ON(&packet_sequence_checker_);
+ if (rtp_packet.HasExtension<RtpDependencyDescriptorExtension>()) {
+ webrtc::DependencyDescriptor dependency_descriptor;
+ if (!rtp_packet.GetExtension<RtpDependencyDescriptorExtension>(
+ video_structure_.get(), &dependency_descriptor)) {
+ // Descriptor is there, but failed to parse. Either it is invalid,
+ // or too old packet (after relevant video_structure_ changed),
+ // or too new packet (before relevant video_structure_ arrived).
+ // Drop such packet to be on the safe side.
+ // TODO(bugs.webrtc.org/10342): Stash too new packet.
+ Timestamp now = clock_->CurrentTime();
+ if (now - last_logged_failed_to_parse_dd_ > TimeDelta::Seconds(1)) {
+ last_logged_failed_to_parse_dd_ = now;
+ RTC_LOG(LS_WARNING) << "ssrc: " << rtp_packet.Ssrc()
+ << " Failed to parse dependency descriptor.";
+ }
+ return kDropPacket;
+ }
+ if (dependency_descriptor.attached_structure != nullptr &&
+ !dependency_descriptor.first_packet_in_frame) {
+ RTC_LOG(LS_WARNING) << "ssrc: " << rtp_packet.Ssrc()
+ << "Invalid dependency descriptor: structure "
+ "attached to non first packet of a frame.";
+ return kDropPacket;
+ }
+ video_header->is_first_packet_in_frame =
+ dependency_descriptor.first_packet_in_frame;
+ video_header->is_last_packet_in_frame =
+ dependency_descriptor.last_packet_in_frame;
+
+ int64_t frame_id =
+ frame_id_unwrapper_.Unwrap(dependency_descriptor.frame_number);
+ auto& generic_descriptor_info = video_header->generic.emplace();
+ generic_descriptor_info.frame_id = frame_id;
+ generic_descriptor_info.spatial_index =
+ dependency_descriptor.frame_dependencies.spatial_id;
+ generic_descriptor_info.temporal_index =
+ dependency_descriptor.frame_dependencies.temporal_id;
+ for (int fdiff : dependency_descriptor.frame_dependencies.frame_diffs) {
+ generic_descriptor_info.dependencies.push_back(frame_id - fdiff);
+ }
+ generic_descriptor_info.decode_target_indications =
+ dependency_descriptor.frame_dependencies.decode_target_indications;
+ if (dependency_descriptor.resolution) {
+ video_header->width = dependency_descriptor.resolution->Width();
+ video_header->height = dependency_descriptor.resolution->Height();
+ }
+
+ // FrameDependencyStructure is sent in dependency descriptor of the first
+ // packet of a key frame and required for parsed dependency descriptor in
+ // all the following packets until next key frame.
+ // Save it if there is a (potentially) new structure.
+ if (dependency_descriptor.attached_structure) {
+ RTC_DCHECK(dependency_descriptor.first_packet_in_frame);
+ if (video_structure_frame_id_ > frame_id) {
+ RTC_LOG(LS_WARNING)
+ << "Arrived key frame with id " << frame_id << " and structure id "
+ << dependency_descriptor.attached_structure->structure_id
+ << " is older than the latest received key frame with id "
+ << *video_structure_frame_id_ << " and structure id "
+ << video_structure_->structure_id;
+ return kDropPacket;
+ }
+ video_structure_ = std::move(dependency_descriptor.attached_structure);
+ video_structure_frame_id_ = frame_id;
+ video_header->frame_type = VideoFrameType::kVideoFrameKey;
+ } else {
+ video_header->frame_type = VideoFrameType::kVideoFrameDelta;
+ }
+ return kHasGenericDescriptor;
+ }
+
+ RtpGenericFrameDescriptor generic_frame_descriptor;
+ if (!rtp_packet.GetExtension<RtpGenericFrameDescriptorExtension00>(
+ &generic_frame_descriptor)) {
+ return kNoGenericDescriptor;
+ }
+
+ video_header->is_first_packet_in_frame =
+ generic_frame_descriptor.FirstPacketInSubFrame();
+ video_header->is_last_packet_in_frame =
+ generic_frame_descriptor.LastPacketInSubFrame();
+
+ if (generic_frame_descriptor.FirstPacketInSubFrame()) {
+ video_header->frame_type =
+ generic_frame_descriptor.FrameDependenciesDiffs().empty()
+ ? VideoFrameType::kVideoFrameKey
+ : VideoFrameType::kVideoFrameDelta;
+
+ auto& generic_descriptor_info = video_header->generic.emplace();
+ int64_t frame_id =
+ frame_id_unwrapper_.Unwrap(generic_frame_descriptor.FrameId());
+ generic_descriptor_info.frame_id = frame_id;
+ generic_descriptor_info.spatial_index =
+ generic_frame_descriptor.SpatialLayer();
+ generic_descriptor_info.temporal_index =
+ generic_frame_descriptor.TemporalLayer();
+ for (uint16_t fdiff : generic_frame_descriptor.FrameDependenciesDiffs()) {
+ generic_descriptor_info.dependencies.push_back(frame_id - fdiff);
+ }
+ }
+ video_header->width = generic_frame_descriptor.Width();
+ video_header->height = generic_frame_descriptor.Height();
+ return kHasGenericDescriptor;
+}
+
+void RtpVideoStreamReceiver2::OnReceivedPayloadData(
+ rtc::CopyOnWriteBuffer codec_payload,
+ const RtpPacketReceived& rtp_packet,
+ const RTPVideoHeader& video) {
+ RTC_DCHECK_RUN_ON(&packet_sequence_checker_);
+
+ auto packet =
+ std::make_unique<video_coding::PacketBuffer::Packet>(rtp_packet, video);
+
+ int64_t unwrapped_rtp_seq_num =
+ rtp_seq_num_unwrapper_.Unwrap(rtp_packet.SequenceNumber());
+
+ RtpPacketInfo& packet_info =
+ packet_infos_
+ .emplace(unwrapped_rtp_seq_num,
+ RtpPacketInfo(rtp_packet.Ssrc(), rtp_packet.Csrcs(),
+ rtp_packet.Timestamp(),
+ /*receive_time_ms=*/clock_->CurrentTime()))
+ .first->second;
+
+ // Try to extrapolate absolute capture time if it is missing.
+ packet_info.set_absolute_capture_time(
+ absolute_capture_time_interpolator_.OnReceivePacket(
+ AbsoluteCaptureTimeInterpolator::GetSource(packet_info.ssrc(),
+ packet_info.csrcs()),
+ packet_info.rtp_timestamp(),
+ // Assume frequency is the same one for all video frames.
+ kVideoPayloadTypeFrequency,
+ rtp_packet.GetExtension<AbsoluteCaptureTimeExtension>()));
+
+ RTPVideoHeader& video_header = packet->video_header;
+ video_header.rotation = kVideoRotation_0;
+ video_header.content_type = VideoContentType::UNSPECIFIED;
+ video_header.video_timing.flags = VideoSendTiming::kInvalid;
+ video_header.is_last_packet_in_frame |= rtp_packet.Marker();
+
+ rtp_packet.GetExtension<VideoOrientation>(&video_header.rotation);
+ rtp_packet.GetExtension<VideoContentTypeExtension>(
+ &video_header.content_type);
+ rtp_packet.GetExtension<VideoTimingExtension>(&video_header.video_timing);
+ if (forced_playout_delay_max_ms_ && forced_playout_delay_min_ms_) {
+ video_header.playout_delay.max_ms = *forced_playout_delay_max_ms_;
+ video_header.playout_delay.min_ms = *forced_playout_delay_min_ms_;
+ } else {
+ rtp_packet.GetExtension<PlayoutDelayLimits>(&video_header.playout_delay);
+ }
+
+ ParseGenericDependenciesResult generic_descriptor_state =
+ ParseGenericDependenciesExtension(rtp_packet, &video_header);
+
+ if (!rtp_packet.recovered()) {
+ UpdatePacketReceiveTimestamps(
+ rtp_packet, video_header.frame_type == VideoFrameType::kVideoFrameKey);
+ }
+
+ if (generic_descriptor_state == kDropPacket) {
+ Timestamp now = clock_->CurrentTime();
+ if (video_structure_ == nullptr &&
+ next_keyframe_request_for_missing_video_structure_ < now) {
+ // No video structure received yet, most likely part of the initial
+ // keyframe was lost.
+ RequestKeyFrame();
+ next_keyframe_request_for_missing_video_structure_ =
+ now + TimeDelta::Seconds(1);
+ }
+ return;
+ }
+
+ // Color space should only be transmitted in the last packet of a frame,
+ // therefore, neglect it otherwise so that last_color_space_ is not reset by
+ // mistake.
+ if (video_header.is_last_packet_in_frame) {
+ video_header.color_space = rtp_packet.GetExtension<ColorSpaceExtension>();
+ if (video_header.color_space ||
+ video_header.frame_type == VideoFrameType::kVideoFrameKey) {
+ // Store color space since it's only transmitted when changed or for key
+ // frames. Color space will be cleared if a key frame is transmitted
+ // without color space information.
+ last_color_space_ = video_header.color_space;
+ } else if (last_color_space_) {
+ video_header.color_space = last_color_space_;
+ }
+ }
+ video_header.video_frame_tracking_id =
+ rtp_packet.GetExtension<VideoFrameTrackingIdExtension>();
+
+ if (loss_notification_controller_) {
+ if (rtp_packet.recovered()) {
+ // TODO(bugs.webrtc.org/10336): Implement support for reordering.
+ RTC_LOG(LS_INFO)
+ << "LossNotificationController does not support reordering.";
+ } else if (generic_descriptor_state == kNoGenericDescriptor) {
+ RTC_LOG(LS_WARNING) << "LossNotificationController requires generic "
+ "frame descriptor, but it is missing.";
+ } else {
+ if (video_header.is_first_packet_in_frame) {
+ RTC_DCHECK(video_header.generic);
+ LossNotificationController::FrameDetails frame;
+ frame.is_keyframe =
+ video_header.frame_type == VideoFrameType::kVideoFrameKey;
+ frame.frame_id = video_header.generic->frame_id;
+ frame.frame_dependencies = video_header.generic->dependencies;
+ loss_notification_controller_->OnReceivedPacket(
+ rtp_packet.SequenceNumber(), &frame);
+ } else {
+ loss_notification_controller_->OnReceivedPacket(
+ rtp_packet.SequenceNumber(), nullptr);
+ }
+ }
+ }
+
+ if (nack_module_) {
+ const bool is_keyframe =
+ video_header.is_first_packet_in_frame &&
+ video_header.frame_type == VideoFrameType::kVideoFrameKey;
+
+ packet->times_nacked = nack_module_->OnReceivedPacket(
+ rtp_packet.SequenceNumber(), is_keyframe, rtp_packet.recovered());
+ } else {
+ packet->times_nacked = -1;
+ }
+
+ if (codec_payload.size() == 0) {
+ NotifyReceiverOfEmptyPacket(packet->seq_num);
+ rtcp_feedback_buffer_.SendBufferedRtcpFeedback();
+ return;
+ }
+
+ if (packet->codec() == kVideoCodecH264) {
+ // Only when we start to receive packets will we know what payload type
+ // that will be used. When we know the payload type insert the correct
+ // sps/pps into the tracker.
+ if (packet->payload_type != last_payload_type_) {
+ last_payload_type_ = packet->payload_type;
+ InsertSpsPpsIntoTracker(packet->payload_type);
+ }
+
+ video_coding::H264SpsPpsTracker::FixedBitstream fixed =
+ tracker_.CopyAndFixBitstream(
+ rtc::MakeArrayView(codec_payload.cdata(), codec_payload.size()),
+ &packet->video_header);
+
+ switch (fixed.action) {
+ case video_coding::H264SpsPpsTracker::kRequestKeyframe:
+ rtcp_feedback_buffer_.RequestKeyFrame();
+ rtcp_feedback_buffer_.SendBufferedRtcpFeedback();
+ [[fallthrough]];
+ case video_coding::H264SpsPpsTracker::kDrop:
+ return;
+ case video_coding::H264SpsPpsTracker::kInsert:
+ packet->video_payload = std::move(fixed.bitstream);
+ break;
+ }
+
+ } else {
+ packet->video_payload = std::move(codec_payload);
+ }
+
+ rtcp_feedback_buffer_.SendBufferedRtcpFeedback();
+ frame_counter_.Add(packet->timestamp);
+ OnInsertedPacket(packet_buffer_.InsertPacket(std::move(packet)));
+}
+
+void RtpVideoStreamReceiver2::OnRecoveredPacket(
+ const RtpPacketReceived& packet) {
+ RTC_DCHECK_RUN_ON(&packet_sequence_checker_);
+ if (packet.PayloadType() == red_payload_type_) {
+ RTC_LOG(LS_WARNING) << "Discarding recovered packet with RED encapsulation";
+ return;
+ }
+ ReceivePacket(packet);
+}
+
+// This method handles both regular RTP packets and packets recovered
+// via FlexFEC.
+void RtpVideoStreamReceiver2::OnRtpPacket(const RtpPacketReceived& packet) {
+ RTC_DCHECK_RUN_ON(&packet_sequence_checker_);
+
+ if (!receiving_)
+ return;
+
+ ReceivePacket(packet);
+
+ // Update receive statistics after ReceivePacket.
+ // Receive statistics will be reset if the payload type changes (make sure
+ // that the first packet is included in the stats).
+ if (!packet.recovered()) {
+ rtp_receive_statistics_->OnRtpPacket(packet);
+ }
+
+ if (packet_sink_) {
+ packet_sink_->OnRtpPacket(packet);
+ }
+}
+
+void RtpVideoStreamReceiver2::RequestKeyFrame() {
+ RTC_DCHECK_RUN_ON(&worker_task_checker_);
+ TRACE_EVENT2("webrtc", "RtpVideoStreamReceiver2::RequestKeyFrame",
+ "remote_ssrc", config_.rtp.remote_ssrc, "method",
+ keyframe_request_method_ == KeyFrameReqMethod::kPliRtcp ? "PLI"
+ : keyframe_request_method_ == KeyFrameReqMethod::kFirRtcp ? "FIR"
+ : keyframe_request_method_ == KeyFrameReqMethod::kNone ? "None"
+ : "Other");
+ // TODO(bugs.webrtc.org/10336): Allow the sender to ignore key frame requests
+ // issued by anything other than the LossNotificationController if it (the
+ // sender) is relying on LNTF alone.
+ if (keyframe_request_method_ == KeyFrameReqMethod::kPliRtcp) {
+ rtp_rtcp_->SendPictureLossIndication();
+ } else if (keyframe_request_method_ == KeyFrameReqMethod::kFirRtcp) {
+ rtp_rtcp_->SendFullIntraRequest();
+ }
+}
+
+void RtpVideoStreamReceiver2::SendNack(
+ const std::vector<uint16_t>& sequence_numbers,
+ bool /*buffering_allowed*/) {
+ rtp_rtcp_->SendNack(sequence_numbers);
+}
+
+void RtpVideoStreamReceiver2::SendLossNotification(
+ uint16_t last_decoded_seq_num,
+ uint16_t last_received_seq_num,
+ bool decodability_flag,
+ bool buffering_allowed) {
+ RTC_DCHECK(config_.rtp.lntf.enabled);
+ rtp_rtcp_->SendLossNotification(last_decoded_seq_num, last_received_seq_num,
+ decodability_flag, buffering_allowed);
+}
+
+bool RtpVideoStreamReceiver2::IsDecryptable() const {
+ RTC_DCHECK_RUN_ON(&worker_task_checker_);
+ return frames_decryptable_;
+}
+
+void RtpVideoStreamReceiver2::OnInsertedPacket(
+ video_coding::PacketBuffer::InsertResult result) {
+ RTC_DCHECK_RUN_ON(&packet_sequence_checker_);
+ RTC_DCHECK_RUN_ON(&worker_task_checker_);
+ video_coding::PacketBuffer::Packet* first_packet = nullptr;
+ int max_nack_count;
+ int64_t min_recv_time;
+ int64_t max_recv_time;
+ std::vector<rtc::ArrayView<const uint8_t>> payloads;
+ RtpPacketInfos::vector_type packet_infos;
+
+ bool frame_boundary = true;
+ for (auto& packet : result.packets) {
+ // PacketBuffer promisses frame boundaries are correctly set on each
+ // packet. Document that assumption with the DCHECKs.
+ RTC_DCHECK_EQ(frame_boundary, packet->is_first_packet_in_frame());
+ int64_t unwrapped_rtp_seq_num =
+ rtp_seq_num_unwrapper_.Unwrap(packet->seq_num);
+ RTC_DCHECK_GT(packet_infos_.count(unwrapped_rtp_seq_num), 0);
+ RtpPacketInfo& packet_info = packet_infos_[unwrapped_rtp_seq_num];
+ if (packet->is_first_packet_in_frame()) {
+ first_packet = packet.get();
+ max_nack_count = packet->times_nacked;
+ min_recv_time = packet_info.receive_time().ms();
+ max_recv_time = packet_info.receive_time().ms();
+ } else {
+ max_nack_count = std::max(max_nack_count, packet->times_nacked);
+ min_recv_time = std::min(min_recv_time, packet_info.receive_time().ms());
+ max_recv_time = std::max(max_recv_time, packet_info.receive_time().ms());
+ }
+ payloads.emplace_back(packet->video_payload);
+ packet_infos.push_back(packet_info);
+
+ frame_boundary = packet->is_last_packet_in_frame();
+ if (packet->is_last_packet_in_frame()) {
+ auto depacketizer_it = payload_type_map_.find(first_packet->payload_type);
+ RTC_CHECK(depacketizer_it != payload_type_map_.end());
+
+ rtc::scoped_refptr<EncodedImageBuffer> bitstream =
+ depacketizer_it->second->AssembleFrame(payloads);
+ if (!bitstream) {
+ // Failed to assemble a frame. Discard and continue.
+ continue;
+ }
+
+ const video_coding::PacketBuffer::Packet& last_packet = *packet;
+ OnAssembledFrame(std::make_unique<RtpFrameObject>(
+ first_packet->seq_num, //
+ last_packet.seq_num, //
+ last_packet.marker_bit, //
+ max_nack_count, //
+ min_recv_time, //
+ max_recv_time, //
+ first_packet->timestamp, //
+ ntp_estimator_.Estimate(first_packet->timestamp), //
+ last_packet.video_header.video_timing, //
+ first_packet->payload_type, //
+ first_packet->codec(), //
+ last_packet.video_header.rotation, //
+ last_packet.video_header.content_type, //
+ first_packet->video_header, //
+ last_packet.video_header.color_space, //
+ RtpPacketInfos(std::move(packet_infos)), //
+ std::move(bitstream)));
+ payloads.clear();
+ packet_infos.clear();
+ }
+ }
+ RTC_DCHECK(frame_boundary);
+ if (result.buffer_cleared) {
+ last_received_rtp_system_time_.reset();
+ last_received_keyframe_rtp_system_time_.reset();
+ last_received_keyframe_rtp_timestamp_.reset();
+ packet_infos_.clear();
+ RequestKeyFrame();
+ }
+}
+
+void RtpVideoStreamReceiver2::OnAssembledFrame(
+ std::unique_ptr<RtpFrameObject> frame) {
+ RTC_DCHECK_RUN_ON(&packet_sequence_checker_);
+ RTC_DCHECK(frame);
+
+ const absl::optional<RTPVideoHeader::GenericDescriptorInfo>& descriptor =
+ frame->GetRtpVideoHeader().generic;
+
+ if (loss_notification_controller_ && descriptor) {
+ loss_notification_controller_->OnAssembledFrame(
+ frame->first_seq_num(), descriptor->frame_id,
+ absl::c_linear_search(descriptor->decode_target_indications,
+ DecodeTargetIndication::kDiscardable),
+ descriptor->dependencies);
+ }
+
+ // If frames arrive before a key frame, they would not be decodable.
+ // In that case, request a key frame ASAP.
+ if (!has_received_frame_) {
+ if (frame->FrameType() != VideoFrameType::kVideoFrameKey) {
+ // `loss_notification_controller_`, if present, would have already
+ // requested a key frame when the first packet for the non-key frame
+ // had arrived, so no need to replicate the request.
+ if (!loss_notification_controller_) {
+ RequestKeyFrame();
+ }
+ }
+ has_received_frame_ = true;
+ }
+
+ // Reset `reference_finder_` if `frame` is new and the codec have changed.
+ if (current_codec_) {
+ bool frame_is_newer =
+ AheadOf(frame->Timestamp(), last_assembled_frame_rtp_timestamp_);
+
+ if (frame->codec_type() != current_codec_) {
+ if (frame_is_newer) {
+ // When we reset the `reference_finder_` we don't want new picture ids
+ // to overlap with old picture ids. To ensure that doesn't happen we
+ // start from the `last_completed_picture_id_` and add an offset in case
+ // of reordering.
+ reference_finder_ = std::make_unique<RtpFrameReferenceFinder>(
+ last_completed_picture_id_ + std::numeric_limits<uint16_t>::max());
+ current_codec_ = frame->codec_type();
+ } else {
+ // Old frame from before the codec switch, discard it.
+ return;
+ }
+ }
+
+ if (frame_is_newer) {
+ last_assembled_frame_rtp_timestamp_ = frame->Timestamp();
+ }
+ } else {
+ current_codec_ = frame->codec_type();
+ last_assembled_frame_rtp_timestamp_ = frame->Timestamp();
+ }
+
+ if (buffered_frame_decryptor_ != nullptr) {
+ buffered_frame_decryptor_->ManageEncryptedFrame(std::move(frame));
+ } else if (frame_transformer_delegate_) {
+ frame_transformer_delegate_->TransformFrame(std::move(frame));
+ } else {
+ OnCompleteFrames(reference_finder_->ManageFrame(std::move(frame)));
+ }
+}
+
+void RtpVideoStreamReceiver2::OnCompleteFrames(
+ RtpFrameReferenceFinder::ReturnVector frames) {
+ RTC_DCHECK_RUN_ON(&packet_sequence_checker_);
+ for (auto& frame : frames) {
+ last_seq_num_for_pic_id_[frame->Id()] = frame->last_seq_num();
+
+ last_completed_picture_id_ =
+ std::max(last_completed_picture_id_, frame->Id());
+ complete_frame_callback_->OnCompleteFrame(std::move(frame));
+ }
+}
+
+void RtpVideoStreamReceiver2::OnDecryptedFrame(
+ std::unique_ptr<RtpFrameObject> frame) {
+ RTC_DCHECK_RUN_ON(&packet_sequence_checker_);
+ OnCompleteFrames(reference_finder_->ManageFrame(std::move(frame)));
+}
+
+void RtpVideoStreamReceiver2::OnDecryptionStatusChange(
+ FrameDecryptorInterface::Status status) {
+ RTC_DCHECK_RUN_ON(&worker_task_checker_);
+ // Called from BufferedFrameDecryptor::DecryptFrame.
+ frames_decryptable_ =
+ (status == FrameDecryptorInterface::Status::kOk) ||
+ (status == FrameDecryptorInterface::Status::kRecoverable);
+}
+
+void RtpVideoStreamReceiver2::SetFrameDecryptor(
+ rtc::scoped_refptr<FrameDecryptorInterface> frame_decryptor) {
+ // TODO(bugs.webrtc.org/11993): Update callers or post the operation over to
+ // the network thread.
+ RTC_DCHECK_RUN_ON(&packet_sequence_checker_);
+ if (buffered_frame_decryptor_ == nullptr) {
+ buffered_frame_decryptor_ =
+ std::make_unique<BufferedFrameDecryptor>(this, this, field_trials_);
+ }
+ buffered_frame_decryptor_->SetFrameDecryptor(std::move(frame_decryptor));
+}
+
+void RtpVideoStreamReceiver2::SetDepacketizerToDecoderFrameTransformer(
+ rtc::scoped_refptr<FrameTransformerInterface> frame_transformer) {
+ RTC_DCHECK_RUN_ON(&worker_task_checker_);
+ frame_transformer_delegate_ =
+ rtc::make_ref_counted<RtpVideoStreamReceiverFrameTransformerDelegate>(
+ this, std::move(frame_transformer), rtc::Thread::Current(),
+ config_.rtp.remote_ssrc);
+ frame_transformer_delegate_->Init();
+}
+
+void RtpVideoStreamReceiver2::SetRtpExtensions(
+ const std::vector<RtpExtension>& extensions) {
+ RTC_DCHECK_RUN_ON(&packet_sequence_checker_);
+ rtp_header_extensions_.Reset(extensions);
+}
+
+const RtpHeaderExtensionMap& RtpVideoStreamReceiver2::GetRtpExtensions() const {
+ RTC_DCHECK_RUN_ON(&packet_sequence_checker_);
+ return rtp_header_extensions_;
+}
+
+void RtpVideoStreamReceiver2::UpdateRtt(int64_t max_rtt_ms) {
+ RTC_DCHECK_RUN_ON(&packet_sequence_checker_);
+ if (nack_module_)
+ nack_module_->UpdateRtt(max_rtt_ms);
+}
+
+void RtpVideoStreamReceiver2::OnLocalSsrcChange(uint32_t local_ssrc) {
+ RTC_DCHECK_RUN_ON(&packet_sequence_checker_);
+ rtp_rtcp_->SetLocalSsrc(local_ssrc);
+}
+
+void RtpVideoStreamReceiver2::SetRtcpMode(RtcpMode mode) {
+ RTC_DCHECK_RUN_ON(&packet_sequence_checker_);
+ rtp_rtcp_->SetRTCPStatus(mode);
+}
+
+void RtpVideoStreamReceiver2::SetReferenceTimeReport(bool enabled) {
+ RTC_DCHECK_RUN_ON(&packet_sequence_checker_);
+ rtp_rtcp_->SetNonSenderRttMeasurement(enabled);
+}
+
+void RtpVideoStreamReceiver2::SetPacketSink(
+ RtpPacketSinkInterface* packet_sink) {
+ RTC_DCHECK_RUN_ON(&packet_sequence_checker_);
+ packet_sink_ = packet_sink;
+}
+
+void RtpVideoStreamReceiver2::SetLossNotificationEnabled(bool enabled) {
+ RTC_DCHECK_RUN_ON(&packet_sequence_checker_);
+ if (enabled && !loss_notification_controller_) {
+ loss_notification_controller_ =
+ std::make_unique<LossNotificationController>(&rtcp_feedback_buffer_,
+ &rtcp_feedback_buffer_);
+ } else if (!enabled && loss_notification_controller_) {
+ loss_notification_controller_.reset();
+ rtcp_feedback_buffer_.ClearLossNotificationState();
+ }
+}
+
+void RtpVideoStreamReceiver2::SetNackHistory(TimeDelta history) {
+ RTC_DCHECK_RUN_ON(&packet_sequence_checker_);
+ if (history.ms() == 0) {
+ nack_module_.reset();
+ } else if (!nack_module_) {
+ nack_module_ = std::make_unique<NackRequester>(
+ worker_queue_, nack_periodic_processor_, clock_, &rtcp_feedback_buffer_,
+ &rtcp_feedback_buffer_, field_trials_);
+ }
+
+ rtp_receive_statistics_->SetMaxReorderingThreshold(
+ config_.rtp.remote_ssrc,
+ history.ms() > 0 ? kMaxPacketAgeToNack : kDefaultMaxReorderingThreshold);
+}
+
+int RtpVideoStreamReceiver2::ulpfec_payload_type() const {
+ RTC_DCHECK_RUN_ON(&packet_sequence_checker_);
+ return ulpfec_receiver_ ? ulpfec_receiver_->ulpfec_payload_type() : -1;
+}
+
+int RtpVideoStreamReceiver2::red_payload_type() const {
+ RTC_DCHECK_RUN_ON(&packet_sequence_checker_);
+ return red_payload_type_;
+}
+
+void RtpVideoStreamReceiver2::SetProtectionPayloadTypes(
+ int red_payload_type,
+ int ulpfec_payload_type) {
+ RTC_DCHECK_RUN_ON(&packet_sequence_checker_);
+ RTC_DCHECK(red_payload_type >= -1 && red_payload_type < 0x80);
+ RTC_DCHECK(ulpfec_payload_type >= -1 && ulpfec_payload_type < 0x80);
+ red_payload_type_ = red_payload_type;
+ ulpfec_receiver_ =
+ MaybeConstructUlpfecReceiver(config_.rtp.remote_ssrc, red_payload_type,
+ ulpfec_payload_type, this, clock_);
+}
+
+absl::optional<int64_t> RtpVideoStreamReceiver2::LastReceivedPacketMs() const {
+ RTC_DCHECK_RUN_ON(&packet_sequence_checker_);
+ if (last_received_rtp_system_time_) {
+ return absl::optional<int64_t>(last_received_rtp_system_time_->ms());
+ }
+ return absl::nullopt;
+}
+
+absl::optional<int64_t> RtpVideoStreamReceiver2::LastReceivedKeyframePacketMs()
+ const {
+ RTC_DCHECK_RUN_ON(&packet_sequence_checker_);
+ if (last_received_keyframe_rtp_system_time_) {
+ return absl::optional<int64_t>(
+ last_received_keyframe_rtp_system_time_->ms());
+ }
+ return absl::nullopt;
+}
+
+// Mozilla modification: VideoReceiveStream2 and friends do not surface RTCP
+// stats at all, and even on the most recent libwebrtc code there does not
+// seem to be any support for these stats right now. So, we hack this in.
+void RtpVideoStreamReceiver2::RemoteRTCPSenderInfo(
+ uint32_t* packet_count, uint32_t* octet_count,
+ int64_t* ntp_timestamp_ms, int64_t* remote_ntp_timestamp_ms) const {
+ RTC_DCHECK_RUN_ON(&worker_task_checker_);
+ rtp_rtcp_->RemoteRTCPSenderInfo(packet_count, octet_count, ntp_timestamp_ms,
+ remote_ntp_timestamp_ms);
+}
+
+void RtpVideoStreamReceiver2::ManageFrame(
+ std::unique_ptr<RtpFrameObject> frame) {
+ RTC_DCHECK_RUN_ON(&packet_sequence_checker_);
+ OnCompleteFrames(reference_finder_->ManageFrame(std::move(frame)));
+}
+
+void RtpVideoStreamReceiver2::ReceivePacket(const RtpPacketReceived& packet) {
+ RTC_DCHECK_RUN_ON(&packet_sequence_checker_);
+
+ if (packet.payload_size() == 0) {
+ // Padding or keep-alive packet.
+ // TODO(nisse): Could drop empty packets earlier, but need to figure out how
+ // they should be counted in stats.
+ NotifyReceiverOfEmptyPacket(packet.SequenceNumber());
+ return;
+ }
+ if (packet.PayloadType() == red_payload_type_) {
+ ParseAndHandleEncapsulatingHeader(packet);
+ return;
+ }
+
+ const auto type_it = payload_type_map_.find(packet.PayloadType());
+ if (type_it == payload_type_map_.end()) {
+ return;
+ }
+ absl::optional<VideoRtpDepacketizer::ParsedRtpPayload> parsed_payload =
+ type_it->second->Parse(packet.PayloadBuffer());
+ if (parsed_payload == absl::nullopt) {
+ RTC_LOG(LS_WARNING) << "Failed parsing payload.";
+ return;
+ }
+
+ OnReceivedPayloadData(std::move(parsed_payload->video_payload), packet,
+ parsed_payload->video_header);
+}
+
+void RtpVideoStreamReceiver2::ParseAndHandleEncapsulatingHeader(
+ const RtpPacketReceived& packet) {
+ RTC_DCHECK_RUN_ON(&packet_sequence_checker_);
+ RTC_DCHECK_EQ(packet.PayloadType(), red_payload_type_);
+
+ if (!ulpfec_receiver_ || packet.payload_size() == 0U)
+ return;
+
+ if (packet.payload()[0] == ulpfec_receiver_->ulpfec_payload_type()) {
+ // Notify video_receiver about received FEC packets to avoid NACKing these
+ // packets.
+ NotifyReceiverOfEmptyPacket(packet.SequenceNumber());
+ }
+ if (ulpfec_receiver_->AddReceivedRedPacket(packet)) {
+ ulpfec_receiver_->ProcessReceivedFec();
+ }
+}
+
+// In the case of a video stream without picture ids and no rtx the
+// RtpFrameReferenceFinder will need to know about padding to
+// correctly calculate frame references.
+void RtpVideoStreamReceiver2::NotifyReceiverOfEmptyPacket(uint16_t seq_num) {
+ RTC_DCHECK_RUN_ON(&packet_sequence_checker_);
+ RTC_DCHECK_RUN_ON(&worker_task_checker_);
+
+ OnCompleteFrames(reference_finder_->PaddingReceived(seq_num));
+
+ OnInsertedPacket(packet_buffer_.InsertPadding(seq_num));
+ if (nack_module_) {
+ nack_module_->OnReceivedPacket(seq_num, /* is_keyframe = */ false,
+ /* is _recovered = */ false);
+ }
+ if (loss_notification_controller_) {
+ // TODO(bugs.webrtc.org/10336): Handle empty packets.
+ RTC_LOG(LS_WARNING)
+ << "LossNotificationController does not expect empty packets.";
+ }
+}
+
+bool RtpVideoStreamReceiver2::DeliverRtcp(const uint8_t* rtcp_packet,
+ size_t rtcp_packet_length) {
+ RTC_DCHECK_RUN_ON(&packet_sequence_checker_);
+
+ if (!receiving_) {
+ return false;
+ }
+
+ rtp_rtcp_->IncomingRtcpPacket(rtcp_packet, rtcp_packet_length);
+
+ int64_t rtt = 0;
+ rtp_rtcp_->RTT(config_.rtp.remote_ssrc, &rtt, nullptr, nullptr, nullptr);
+ if (rtt == 0) {
+ // Waiting for valid rtt.
+ return true;
+ }
+ uint32_t ntp_secs = 0;
+ uint32_t ntp_frac = 0;
+ uint32_t rtp_timestamp = 0;
+ uint32_t received_ntp_secs = 0;
+ uint32_t received_ntp_frac = 0;
+ if (rtp_rtcp_->RemoteNTP(&ntp_secs, &ntp_frac, &received_ntp_secs,
+ &received_ntp_frac, &rtp_timestamp) != 0) {
+ // Waiting for RTCP.
+ return true;
+ }
+ NtpTime received_ntp(received_ntp_secs, received_ntp_frac);
+ int64_t time_since_received =
+ clock_->CurrentNtpInMilliseconds() - received_ntp.ToMs();
+ // Don't use old SRs to estimate time.
+ if (time_since_received <= 1) {
+ ntp_estimator_.UpdateRtcpTimestamp(
+ TimeDelta::Millis(rtt), NtpTime(ntp_secs, ntp_frac), rtp_timestamp);
+ absl::optional<int64_t> remote_to_local_clock_offset =
+ ntp_estimator_.EstimateRemoteToLocalClockOffset();
+ if (remote_to_local_clock_offset.has_value()) {
+ capture_clock_offset_updater_.SetRemoteToLocalClockOffset(
+ *remote_to_local_clock_offset);
+ }
+ }
+
+ return true;
+}
+
+void RtpVideoStreamReceiver2::FrameContinuous(int64_t picture_id) {
+ RTC_DCHECK_RUN_ON(&packet_sequence_checker_);
+ if (!nack_module_)
+ return;
+
+ int seq_num = -1;
+ auto seq_num_it = last_seq_num_for_pic_id_.find(picture_id);
+ if (seq_num_it != last_seq_num_for_pic_id_.end())
+ seq_num = seq_num_it->second;
+ if (seq_num != -1)
+ nack_module_->ClearUpTo(seq_num);
+}
+
+void RtpVideoStreamReceiver2::FrameDecoded(int64_t picture_id) {
+ RTC_DCHECK_RUN_ON(&packet_sequence_checker_);
+ int seq_num = -1;
+ auto seq_num_it = last_seq_num_for_pic_id_.find(picture_id);
+ if (seq_num_it != last_seq_num_for_pic_id_.end()) {
+ seq_num = seq_num_it->second;
+ last_seq_num_for_pic_id_.erase(last_seq_num_for_pic_id_.begin(),
+ ++seq_num_it);
+ }
+
+ if (seq_num != -1) {
+ int64_t unwrapped_rtp_seq_num = rtp_seq_num_unwrapper_.Unwrap(seq_num);
+ packet_infos_.erase(packet_infos_.begin(),
+ packet_infos_.upper_bound(unwrapped_rtp_seq_num));
+ uint32_t num_packets_cleared = packet_buffer_.ClearTo(seq_num);
+ if (num_packets_cleared > 0) {
+ TRACE_EVENT2("webrtc",
+ "RtpVideoStreamReceiver2::FrameDecoded Cleared Old Packets",
+ "remote_ssrc", config_.rtp.remote_ssrc, "seq_num", seq_num);
+ vcm_receive_statistics_->OnDiscardedPackets(num_packets_cleared);
+ }
+ reference_finder_->ClearTo(seq_num);
+ }
+}
+
+void RtpVideoStreamReceiver2::SignalNetworkState(NetworkState state) {
+ RTC_DCHECK_RUN_ON(&worker_task_checker_);
+ rtp_rtcp_->SetRTCPStatus(state == kNetworkUp ? config_.rtp.rtcp_mode
+ : RtcpMode::kOff);
+}
+
+void RtpVideoStreamReceiver2::StartReceive() {
+ RTC_DCHECK_RUN_ON(&packet_sequence_checker_);
+ receiving_ = true;
+}
+
+void RtpVideoStreamReceiver2::StopReceive() {
+ RTC_DCHECK_RUN_ON(&packet_sequence_checker_);
+ receiving_ = false;
+}
+
+void RtpVideoStreamReceiver2::InsertSpsPpsIntoTracker(uint8_t payload_type) {
+ RTC_DCHECK_RUN_ON(&packet_sequence_checker_);
+ RTC_DCHECK_RUN_ON(&worker_task_checker_);
+
+ auto codec_params_it = pt_codec_params_.find(payload_type);
+ if (codec_params_it == pt_codec_params_.end())
+ return;
+
+ RTC_LOG(LS_INFO) << "Found out of band supplied codec parameters for"
+ " payload type: "
+ << static_cast<int>(payload_type);
+
+ H264SpropParameterSets sprop_decoder;
+ auto sprop_base64_it =
+ codec_params_it->second.find(cricket::kH264FmtpSpropParameterSets);
+
+ if (sprop_base64_it == codec_params_it->second.end())
+ return;
+
+ if (!sprop_decoder.DecodeSprop(sprop_base64_it->second.c_str()))
+ return;
+
+ tracker_.InsertSpsPpsNalus(sprop_decoder.sps_nalu(),
+ sprop_decoder.pps_nalu());
+}
+
+void RtpVideoStreamReceiver2::UpdatePacketReceiveTimestamps(
+ const RtpPacketReceived& packet,
+ bool is_keyframe) {
+ Timestamp now = clock_->CurrentTime();
+ if (is_keyframe ||
+ last_received_keyframe_rtp_timestamp_ == packet.Timestamp()) {
+ last_received_keyframe_rtp_timestamp_ = packet.Timestamp();
+ last_received_keyframe_rtp_system_time_ = now;
+ }
+ last_received_rtp_system_time_ = now;
+ last_received_rtp_timestamp_ = packet.Timestamp();
+
+ // Periodically log the RTP header of incoming packets.
+ if (now.ms() - last_packet_log_ms_ > kPacketLogIntervalMs) {
+ rtc::StringBuilder ss;
+ ss << "Packet received on SSRC: " << packet.Ssrc()
+ << " with payload type: " << static_cast<int>(packet.PayloadType())
+ << ", timestamp: " << packet.Timestamp()
+ << ", sequence number: " << packet.SequenceNumber()
+ << ", arrival time: " << ToString(packet.arrival_time());
+ int32_t time_offset;
+ if (packet.GetExtension<TransmissionOffset>(&time_offset)) {
+ ss << ", toffset: " << time_offset;
+ }
+ uint32_t send_time;
+ if (packet.GetExtension<AbsoluteSendTime>(&send_time)) {
+ ss << ", abs send time: " << send_time;
+ }
+ RTC_LOG(LS_INFO) << ss.str();
+ last_packet_log_ms_ = now.ms();
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/video/rtp_video_stream_receiver2.h b/third_party/libwebrtc/video/rtp_video_stream_receiver2.h
new file mode 100644
index 0000000000..931525a054
--- /dev/null
+++ b/third_party/libwebrtc/video/rtp_video_stream_receiver2.h
@@ -0,0 +1,455 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VIDEO_RTP_VIDEO_STREAM_RECEIVER2_H_
+#define VIDEO_RTP_VIDEO_STREAM_RECEIVER2_H_
+
+#include <map>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "absl/types/optional.h"
+#include "api/crypto/frame_decryptor_interface.h"
+#include "api/sequence_checker.h"
+#include "api/units/timestamp.h"
+#include "api/video/color_space.h"
+#include "api/video/video_codec_type.h"
+#include "call/rtp_packet_sink_interface.h"
+#include "call/syncable.h"
+#include "call/video_receive_stream.h"
+#include "modules/rtp_rtcp/include/receive_statistics.h"
+#include "modules/rtp_rtcp/include/recovered_packet_receiver.h"
+#include "modules/rtp_rtcp/include/remote_ntp_time_estimator.h"
+#include "modules/rtp_rtcp/include/rtp_header_extension_map.h"
+#include "modules/rtp_rtcp/source/absolute_capture_time_interpolator.h"
+#include "modules/rtp_rtcp/source/capture_clock_offset_updater.h"
+#include "modules/rtp_rtcp/source/rtp_dependency_descriptor_extension.h"
+#include "modules/rtp_rtcp/source/rtp_packet_received.h"
+#include "modules/rtp_rtcp/source/rtp_rtcp_impl2.h"
+#include "modules/rtp_rtcp/source/rtp_rtcp_interface.h"
+#include "modules/rtp_rtcp/source/rtp_video_header.h"
+#include "modules/rtp_rtcp/source/video_rtp_depacketizer.h"
+#include "modules/video_coding/h264_sps_pps_tracker.h"
+#include "modules/video_coding/loss_notification_controller.h"
+#include "modules/video_coding/nack_requester.h"
+#include "modules/video_coding/packet_buffer.h"
+#include "modules/video_coding/rtp_frame_reference_finder.h"
+#include "rtc_base/experiments/field_trial_parser.h"
+#include "rtc_base/numerics/sequence_number_unwrapper.h"
+#include "rtc_base/system/no_unique_address.h"
+#include "rtc_base/thread_annotations.h"
+#include "video/buffered_frame_decryptor.h"
+#include "video/rtp_video_stream_receiver_frame_transformer_delegate.h"
+#include "video/unique_timestamp_counter.h"
+
+namespace webrtc {
+
+class NackRequester;
+class PacketRouter;
+class ReceiveStatistics;
+class RtcpRttStats;
+class RtpPacketReceived;
+class Transport;
+class UlpfecReceiver;
+
+class RtpVideoStreamReceiver2 : public LossNotificationSender,
+ public RecoveredPacketReceiver,
+ public RtpPacketSinkInterface,
+ public KeyFrameRequestSender,
+ public NackSender,
+ public OnDecryptedFrameCallback,
+ public OnDecryptionStatusChangeCallback,
+ public RtpVideoFrameReceiver {
+ public:
+ // A complete frame is a frame which has received all its packets and all its
+ // references are known.
+ class OnCompleteFrameCallback {
+ public:
+ virtual ~OnCompleteFrameCallback() {}
+ virtual void OnCompleteFrame(std::unique_ptr<EncodedFrame> frame) = 0;
+ };
+
+ RtpVideoStreamReceiver2(
+ TaskQueueBase* current_queue,
+ Clock* clock,
+ Transport* transport,
+ RtcpRttStats* rtt_stats,
+ // The packet router is optional; if provided, the RtpRtcp module for this
+ // stream is registered as a candidate for sending REMB and transport
+ // feedback.
+ PacketRouter* packet_router,
+ const VideoReceiveStreamInterface::Config* config,
+ ReceiveStatistics* rtp_receive_statistics,
+ RtcpPacketTypeCounterObserver* rtcp_packet_type_counter_observer,
+ RtcpCnameCallback* rtcp_cname_callback,
+ NackPeriodicProcessor* nack_periodic_processor,
+ VCMReceiveStatisticsCallback* vcm_receive_statistics,
+ // The KeyFrameRequestSender is optional; if not provided, key frame
+ // requests are sent via the internal RtpRtcp module.
+ OnCompleteFrameCallback* complete_frame_callback,
+ rtc::scoped_refptr<FrameDecryptorInterface> frame_decryptor,
+ rtc::scoped_refptr<FrameTransformerInterface> frame_transformer,
+ const FieldTrialsView& field_trials,
+ RtcEventLog* event_log);
+ ~RtpVideoStreamReceiver2() override;
+
+ void AddReceiveCodec(uint8_t payload_type,
+ VideoCodecType video_codec,
+ const std::map<std::string, std::string>& codec_params,
+ bool raw_payload);
+ void RemoveReceiveCodec(uint8_t payload_type);
+
+ // Clears state for all receive codecs added via `AddReceiveCodec`.
+ void RemoveReceiveCodecs();
+
+ void StartReceive();
+ void StopReceive();
+
+ // Produces the transport-related timestamps; current_delay_ms is left unset.
+ absl::optional<Syncable::Info> GetSyncInfo() const;
+
+ bool DeliverRtcp(const uint8_t* rtcp_packet, size_t rtcp_packet_length);
+
+ void FrameContinuous(int64_t seq_num);
+
+ void FrameDecoded(int64_t seq_num);
+
+ void SignalNetworkState(NetworkState state);
+
+ // Returns number of different frames seen.
+ int GetUniqueFramesSeen() const {
+ RTC_DCHECK_RUN_ON(&packet_sequence_checker_);
+ return frame_counter_.GetUniqueSeen();
+ }
+
+ // Implements RtpPacketSinkInterface.
+ void OnRtpPacket(const RtpPacketReceived& packet) override;
+
+ // Public only for tests.
+ void OnReceivedPayloadData(rtc::CopyOnWriteBuffer codec_payload,
+ const RtpPacketReceived& rtp_packet,
+ const RTPVideoHeader& video);
+
+ // Implements RecoveredPacketReceiver.
+ void OnRecoveredPacket(const RtpPacketReceived& packet) override;
+
+ // Send an RTCP keyframe request.
+ void RequestKeyFrame() override;
+
+ // Implements NackSender.
+ void SendNack(const std::vector<uint16_t>& sequence_numbers,
+ bool buffering_allowed) override;
+
+ // Implements LossNotificationSender.
+ void SendLossNotification(uint16_t last_decoded_seq_num,
+ uint16_t last_received_seq_num,
+ bool decodability_flag,
+ bool buffering_allowed) override;
+
+ // Returns true if a decryptor is attached and frames can be decrypted.
+ // Updated by OnDecryptionStatusChangeCallback. Note this refers to Frame
+ // Decryption not SRTP.
+ bool IsDecryptable() const;
+
+ // Implements OnDecryptedFrameCallback.
+ void OnDecryptedFrame(std::unique_ptr<RtpFrameObject> frame) override;
+
+ // Implements OnDecryptionStatusChangeCallback.
+ void OnDecryptionStatusChange(
+ FrameDecryptorInterface::Status status) override;
+
+ // Optionally set a frame decryptor after a stream has started. This will not
+ // reset the decoder state.
+ void SetFrameDecryptor(
+ rtc::scoped_refptr<FrameDecryptorInterface> frame_decryptor);
+
+ // Sets a frame transformer after a stream has started, if no transformer
+ // has previously been set. Does not reset the decoder state.
+ void SetDepacketizerToDecoderFrameTransformer(
+ rtc::scoped_refptr<FrameTransformerInterface> frame_transformer);
+
+ // Updates the rtp header extensions at runtime. Must be called on the
+ // `packet_sequence_checker_` thread.
+ void SetRtpExtensions(const std::vector<RtpExtension>& extensions);
+ const RtpHeaderExtensionMap& GetRtpExtensions() const;
+
+ // Called by VideoReceiveStreamInterface when stats are updated.
+ void UpdateRtt(int64_t max_rtt_ms);
+
+ // Called when the local_ssrc is changed to match with a sender.
+ void OnLocalSsrcChange(uint32_t local_ssrc);
+
+ // Forwards the call to set rtcp_sender_ to the RTCP mode of the rtcp sender.
+ void SetRtcpMode(RtcpMode mode);
+
+ void SetReferenceTimeReport(bool enabled);
+
+ // Sets or clears the callback sink that gets called for RTP packets. Used for
+ // packet handlers such as FlexFec. Must be called on the packet delivery
+ // thread (same context as `OnRtpPacket` is called on).
+ // TODO(bugs.webrtc.org/11993): Packet delivery thread today means `worker
+ // thread` but will be `network thread`.
+ void SetPacketSink(RtpPacketSinkInterface* packet_sink);
+
+ // Turns on/off loss notifications. Must be called on the packet delivery
+ // thread.
+ void SetLossNotificationEnabled(bool enabled);
+
+ void SetNackHistory(TimeDelta history);
+
+ int ulpfec_payload_type() const;
+ int red_payload_type() const;
+ void SetProtectionPayloadTypes(int red_payload_type, int ulpfec_payload_type);
+
+ absl::optional<int64_t> LastReceivedPacketMs() const;
+ absl::optional<int64_t> LastReceivedKeyframePacketMs() const;
+
+ // Mozilla modification: VideoReceiveStream2 and friends do not surface RTCP
+ // stats at all, and even on the most recent libwebrtc code there does not
+ // seem to be any support for these stats right now. So, we hack this in.
+ void RemoteRTCPSenderInfo(uint32_t* packet_count, uint32_t* octet_count,
+ int64_t* ntp_timestamp_ms,
+ int64_t* remote_ntp_timestamp_ms) const;
+
+ private:
+ // Implements RtpVideoFrameReceiver.
+ void ManageFrame(std::unique_ptr<RtpFrameObject> frame) override;
+
+ void OnCompleteFrames(RtpFrameReferenceFinder::ReturnVector frame)
+ RTC_RUN_ON(packet_sequence_checker_);
+
+ // Used for buffering RTCP feedback messages and sending them all together.
+ // Note:
+ // 1. Key frame requests and NACKs are mutually exclusive, with the
+ // former taking precedence over the latter.
+ // 2. Loss notifications are orthogonal to either. (That is, may be sent
+ // alongside either.)
+ class RtcpFeedbackBuffer : public KeyFrameRequestSender,
+ public NackSender,
+ public LossNotificationSender {
+ public:
+ RtcpFeedbackBuffer(KeyFrameRequestSender* key_frame_request_sender,
+ NackSender* nack_sender,
+ LossNotificationSender* loss_notification_sender);
+
+ ~RtcpFeedbackBuffer() override = default;
+
+ // KeyFrameRequestSender implementation.
+ void RequestKeyFrame() override;
+
+ // NackSender implementation.
+ void SendNack(const std::vector<uint16_t>& sequence_numbers,
+ bool buffering_allowed) override;
+
+ // LossNotificationSender implementation.
+ void SendLossNotification(uint16_t last_decoded_seq_num,
+ uint16_t last_received_seq_num,
+ bool decodability_flag,
+ bool buffering_allowed) override;
+
+ // Send all RTCP feedback messages buffered thus far.
+ void SendBufferedRtcpFeedback();
+
+ void ClearLossNotificationState();
+
+ private:
+ // LNTF-related state.
+ struct LossNotificationState {
+ LossNotificationState(uint16_t last_decoded_seq_num,
+ uint16_t last_received_seq_num,
+ bool decodability_flag)
+ : last_decoded_seq_num(last_decoded_seq_num),
+ last_received_seq_num(last_received_seq_num),
+ decodability_flag(decodability_flag) {}
+
+ uint16_t last_decoded_seq_num;
+ uint16_t last_received_seq_num;
+ bool decodability_flag;
+ };
+
+ RTC_NO_UNIQUE_ADDRESS SequenceChecker packet_sequence_checker_;
+ KeyFrameRequestSender* const key_frame_request_sender_;
+ NackSender* const nack_sender_;
+ LossNotificationSender* const loss_notification_sender_;
+
+ // Key-frame-request-related state.
+ bool request_key_frame_ RTC_GUARDED_BY(packet_sequence_checker_);
+
+ // NACK-related state.
+ std::vector<uint16_t> nack_sequence_numbers_
+ RTC_GUARDED_BY(packet_sequence_checker_);
+
+ absl::optional<LossNotificationState> lntf_state_
+ RTC_GUARDED_BY(packet_sequence_checker_);
+ };
+ enum ParseGenericDependenciesResult {
+ kDropPacket,
+ kHasGenericDescriptor,
+ kNoGenericDescriptor
+ };
+
+ // Entry point doing non-stats work for a received packet. Called
+ // for the same packet both before and after RED decapsulation.
+ void ReceivePacket(const RtpPacketReceived& packet)
+ RTC_RUN_ON(packet_sequence_checker_);
+
+ // Parses and handles RED headers.
+ // This function assumes that it's being called from only one thread.
+ void ParseAndHandleEncapsulatingHeader(const RtpPacketReceived& packet)
+ RTC_RUN_ON(packet_sequence_checker_);
+ void NotifyReceiverOfEmptyPacket(uint16_t seq_num)
+ RTC_RUN_ON(packet_sequence_checker_);
+ bool IsRedEnabled() const;
+ void InsertSpsPpsIntoTracker(uint8_t payload_type)
+ RTC_RUN_ON(packet_sequence_checker_);
+ void OnInsertedPacket(video_coding::PacketBuffer::InsertResult result)
+ RTC_RUN_ON(packet_sequence_checker_);
+ ParseGenericDependenciesResult ParseGenericDependenciesExtension(
+ const RtpPacketReceived& rtp_packet,
+ RTPVideoHeader* video_header) RTC_RUN_ON(packet_sequence_checker_);
+ void OnAssembledFrame(std::unique_ptr<RtpFrameObject> frame)
+ RTC_RUN_ON(packet_sequence_checker_);
+ void UpdatePacketReceiveTimestamps(const RtpPacketReceived& packet,
+ bool is_keyframe)
+ RTC_RUN_ON(packet_sequence_checker_);
+
+ const FieldTrialsView& field_trials_;
+ TaskQueueBase* const worker_queue_;
+ Clock* const clock_;
+ // Ownership of this object lies with VideoReceiveStreamInterface, which owns
+ // `this`.
+ const VideoReceiveStreamInterface::Config& config_;
+ PacketRouter* const packet_router_;
+
+ RemoteNtpTimeEstimator ntp_estimator_;
+
+ RtpHeaderExtensionMap rtp_header_extensions_
+ RTC_GUARDED_BY(packet_sequence_checker_);
+ // Set by the field trial WebRTC-ForcePlayoutDelay to override any playout
+ // delay that is specified in the received packets.
+ FieldTrialOptional<int> forced_playout_delay_max_ms_;
+ FieldTrialOptional<int> forced_playout_delay_min_ms_;
+ ReceiveStatistics* const rtp_receive_statistics_;
+ std::unique_ptr<UlpfecReceiver> ulpfec_receiver_
+ RTC_GUARDED_BY(packet_sequence_checker_);
+ int red_payload_type_ RTC_GUARDED_BY(packet_sequence_checker_);
+
+ RTC_NO_UNIQUE_ADDRESS SequenceChecker worker_task_checker_;
+ // TODO(bugs.webrtc.org/11993): This checker conceptually represents
+ // operations that belong to the network thread. The Call class is currently
+ // moving towards handling network packets on the network thread and while
+ // that work is ongoing, this checker may in practice represent the worker
+ // thread, but still serves as a mechanism of grouping together concepts
+ // that belong to the network thread. Once the packets are fully delivered
+ // on the network thread, this comment will be deleted.
+ RTC_NO_UNIQUE_ADDRESS SequenceChecker packet_sequence_checker_;
+ RtpPacketSinkInterface* packet_sink_ RTC_GUARDED_BY(packet_sequence_checker_);
+ bool receiving_ RTC_GUARDED_BY(packet_sequence_checker_);
+ int64_t last_packet_log_ms_ RTC_GUARDED_BY(packet_sequence_checker_);
+
+ const std::unique_ptr<ModuleRtpRtcpImpl2> rtp_rtcp_;
+
+ NackPeriodicProcessor* const nack_periodic_processor_;
+ OnCompleteFrameCallback* complete_frame_callback_;
+ const KeyFrameReqMethod keyframe_request_method_;
+
+ RtcpFeedbackBuffer rtcp_feedback_buffer_;
+ // TODO(tommi): Consider absl::optional<NackRequester> instead of unique_ptr
+ // since nack is usually configured.
+ std::unique_ptr<NackRequester> nack_module_
+ RTC_GUARDED_BY(packet_sequence_checker_);
+ std::unique_ptr<LossNotificationController> loss_notification_controller_
+ RTC_GUARDED_BY(packet_sequence_checker_);
+
+ VCMReceiveStatisticsCallback* const vcm_receive_statistics_;
+ video_coding::PacketBuffer packet_buffer_
+ RTC_GUARDED_BY(packet_sequence_checker_);
+ UniqueTimestampCounter frame_counter_
+ RTC_GUARDED_BY(packet_sequence_checker_);
+ SeqNumUnwrapper<uint16_t> frame_id_unwrapper_
+ RTC_GUARDED_BY(packet_sequence_checker_);
+
+ // Video structure provided in the dependency descriptor in a first packet
+ // of a key frame. It is required to parse dependency descriptor in the
+ // following delta packets.
+ std::unique_ptr<FrameDependencyStructure> video_structure_
+ RTC_GUARDED_BY(packet_sequence_checker_);
+ // Frame id of the last frame with the attached video structure.
+ // absl::nullopt when `video_structure_ == nullptr`;
+ absl::optional<int64_t> video_structure_frame_id_
+ RTC_GUARDED_BY(packet_sequence_checker_);
+ Timestamp last_logged_failed_to_parse_dd_
+ RTC_GUARDED_BY(packet_sequence_checker_) = Timestamp::MinusInfinity();
+
+ std::unique_ptr<RtpFrameReferenceFinder> reference_finder_
+ RTC_GUARDED_BY(packet_sequence_checker_);
+ absl::optional<VideoCodecType> current_codec_
+ RTC_GUARDED_BY(packet_sequence_checker_);
+ uint32_t last_assembled_frame_rtp_timestamp_
+ RTC_GUARDED_BY(packet_sequence_checker_);
+
+ std::map<int64_t, uint16_t> last_seq_num_for_pic_id_
+ RTC_GUARDED_BY(packet_sequence_checker_);
+ video_coding::H264SpsPpsTracker tracker_
+ RTC_GUARDED_BY(packet_sequence_checker_);
+
+ // Maps payload id to the depacketizer.
+ std::map<uint8_t, std::unique_ptr<VideoRtpDepacketizer>> payload_type_map_
+ RTC_GUARDED_BY(packet_sequence_checker_);
+
+ // TODO(johan): Remove pt_codec_params_ once
+ // https://bugs.chromium.org/p/webrtc/issues/detail?id=6883 is resolved.
+ // Maps a payload type to a map of out-of-band supplied codec parameters.
+ std::map<uint8_t, std::map<std::string, std::string>> pt_codec_params_
+ RTC_GUARDED_BY(packet_sequence_checker_);
+ int16_t last_payload_type_ RTC_GUARDED_BY(packet_sequence_checker_) = -1;
+
+ bool has_received_frame_ RTC_GUARDED_BY(packet_sequence_checker_);
+
+ absl::optional<uint32_t> last_received_rtp_timestamp_
+ RTC_GUARDED_BY(packet_sequence_checker_);
+ absl::optional<uint32_t> last_received_keyframe_rtp_timestamp_
+ RTC_GUARDED_BY(packet_sequence_checker_);
+ absl::optional<Timestamp> last_received_rtp_system_time_
+ RTC_GUARDED_BY(packet_sequence_checker_);
+ absl::optional<Timestamp> last_received_keyframe_rtp_system_time_
+ RTC_GUARDED_BY(packet_sequence_checker_);
+
+ // Handles incoming encrypted frames and forwards them to the
+ // rtp_reference_finder if they are decryptable.
+ std::unique_ptr<BufferedFrameDecryptor> buffered_frame_decryptor_
+ RTC_PT_GUARDED_BY(packet_sequence_checker_);
+ bool frames_decryptable_ RTC_GUARDED_BY(worker_task_checker_);
+ absl::optional<ColorSpace> last_color_space_;
+
+ AbsoluteCaptureTimeInterpolator absolute_capture_time_interpolator_
+ RTC_GUARDED_BY(packet_sequence_checker_);
+
+ CaptureClockOffsetUpdater capture_clock_offset_updater_
+ RTC_GUARDED_BY(packet_sequence_checker_);
+
+ int64_t last_completed_picture_id_ = 0;
+
+ rtc::scoped_refptr<RtpVideoStreamReceiverFrameTransformerDelegate>
+ frame_transformer_delegate_;
+
+ SeqNumUnwrapper<uint16_t> rtp_seq_num_unwrapper_
+ RTC_GUARDED_BY(packet_sequence_checker_);
+ std::map<int64_t, RtpPacketInfo> packet_infos_
+ RTC_GUARDED_BY(packet_sequence_checker_);
+
+ Timestamp next_keyframe_request_for_missing_video_structure_ =
+ Timestamp::MinusInfinity();
+};
+
+} // namespace webrtc
+
+#endif // VIDEO_RTP_VIDEO_STREAM_RECEIVER2_H_
diff --git a/third_party/libwebrtc/video/rtp_video_stream_receiver2_unittest.cc b/third_party/libwebrtc/video/rtp_video_stream_receiver2_unittest.cc
new file mode 100644
index 0000000000..2ffe4788a1
--- /dev/null
+++ b/third_party/libwebrtc/video/rtp_video_stream_receiver2_unittest.cc
@@ -0,0 +1,1233 @@
+/*
+ * Copyright 2017 The WebRTC Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video/rtp_video_stream_receiver2.h"
+
+#include <memory>
+#include <utility>
+
+#include "api/task_queue/task_queue_base.h"
+#include "api/video/video_codec_type.h"
+#include "api/video/video_frame_type.h"
+#include "call/test/mock_rtp_packet_sink_interface.h"
+#include "common_video/h264/h264_common.h"
+#include "media/base/media_constants.h"
+#include "modules/rtp_rtcp/source/rtp_descriptor_authentication.h"
+#include "modules/rtp_rtcp/source/rtp_format.h"
+#include "modules/rtp_rtcp/source/rtp_format_vp9.h"
+#include "modules/rtp_rtcp/source/rtp_generic_frame_descriptor.h"
+#include "modules/rtp_rtcp/source/rtp_generic_frame_descriptor_extension.h"
+#include "modules/rtp_rtcp/source/rtp_header_extensions.h"
+#include "modules/rtp_rtcp/source/rtp_packet_received.h"
+#include "modules/rtp_rtcp/source/rtp_packet_to_send.h"
+#include "modules/video_coding/frame_object.h"
+#include "modules/video_coding/include/video_coding_defines.h"
+#include "modules/video_coding/rtp_frame_reference_finder.h"
+#include "rtc_base/byte_buffer.h"
+#include "rtc_base/logging.h"
+#include "system_wrappers/include/clock.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+#include "test/mock_frame_transformer.h"
+#include "test/mock_transport.h"
+#include "test/rtcp_packet_parser.h"
+#include "test/scoped_key_value_config.h"
+#include "test/time_controller/simulated_task_queue.h"
+#include "test/time_controller/simulated_time_controller.h"
+
+using ::testing::_;
+using ::testing::ElementsAre;
+using ::testing::Eq;
+using ::testing::Invoke;
+using ::testing::SizeIs;
+using ::testing::Values;
+
+namespace webrtc {
+
+namespace {
+
+const uint8_t kH264StartCode[] = {0x00, 0x00, 0x00, 0x01};
+
+std::vector<uint64_t> GetAbsoluteCaptureTimestamps(const EncodedFrame* frame) {
+ std::vector<uint64_t> result;
+ for (const auto& packet_info : frame->PacketInfos()) {
+ if (packet_info.absolute_capture_time()) {
+ result.push_back(
+ packet_info.absolute_capture_time()->absolute_capture_timestamp);
+ }
+ }
+ return result;
+}
+
+RTPVideoHeader GetGenericVideoHeader(VideoFrameType frame_type) {
+ RTPVideoHeader video_header;
+ video_header.is_first_packet_in_frame = true;
+ video_header.is_last_packet_in_frame = true;
+ video_header.codec = kVideoCodecGeneric;
+ video_header.frame_type = frame_type;
+ return video_header;
+}
+
+class MockNackSender : public NackSender {
+ public:
+ MOCK_METHOD(void,
+ SendNack,
+ (const std::vector<uint16_t>& sequence_numbers,
+ bool buffering_allowed),
+ (override));
+};
+
+class MockKeyFrameRequestSender : public KeyFrameRequestSender {
+ public:
+ MOCK_METHOD(void, RequestKeyFrame, (), (override));
+};
+
+class MockOnCompleteFrameCallback
+ : public RtpVideoStreamReceiver2::OnCompleteFrameCallback {
+ public:
+ MOCK_METHOD(void, DoOnCompleteFrame, (EncodedFrame*), ());
+ MOCK_METHOD(void, DoOnCompleteFrameFailNullptr, (EncodedFrame*), ());
+ MOCK_METHOD(void, DoOnCompleteFrameFailLength, (EncodedFrame*), ());
+ MOCK_METHOD(void, DoOnCompleteFrameFailBitstream, (EncodedFrame*), ());
+ void OnCompleteFrame(std::unique_ptr<EncodedFrame> frame) override {
+ if (!frame) {
+ DoOnCompleteFrameFailNullptr(nullptr);
+ return;
+ }
+ EXPECT_EQ(buffer_.Length(), frame->size());
+ if (buffer_.Length() != frame->size()) {
+ DoOnCompleteFrameFailLength(frame.get());
+ return;
+ }
+ if (frame->size() != buffer_.Length() ||
+ memcmp(buffer_.Data(), frame->data(), buffer_.Length()) != 0) {
+ DoOnCompleteFrameFailBitstream(frame.get());
+ return;
+ }
+ DoOnCompleteFrame(frame.get());
+ }
+
+ void ClearExpectedBitstream() { buffer_.Clear(); }
+
+ void AppendExpectedBitstream(const uint8_t data[], size_t size_in_bytes) {
+ // TODO(Johan): Let rtc::ByteBuffer handle uint8_t* instead of char*.
+ buffer_.WriteBytes(reinterpret_cast<const char*>(data), size_in_bytes);
+ }
+ rtc::ByteBufferWriter buffer_;
+};
+
+constexpr uint32_t kSsrc = 111;
+constexpr int kPayloadType = 100;
+constexpr int kRedPayloadType = 125;
+
+std::unique_ptr<RtpPacketReceived> CreateRtpPacketReceived() {
+ constexpr uint16_t kSequenceNumber = 222;
+ auto packet = std::make_unique<RtpPacketReceived>();
+ packet->SetSsrc(kSsrc);
+ packet->SetSequenceNumber(kSequenceNumber);
+ packet->SetPayloadType(kPayloadType);
+ return packet;
+}
+
+MATCHER_P(SamePacketAs, other, "") {
+ return arg.Ssrc() == other.Ssrc() &&
+ arg.SequenceNumber() == other.SequenceNumber();
+}
+
+} // namespace
+
+class RtpVideoStreamReceiver2Test : public ::testing::Test,
+ public RtpPacketSinkInterface {
+ public:
+ RtpVideoStreamReceiver2Test() : RtpVideoStreamReceiver2Test("") {}
+ explicit RtpVideoStreamReceiver2Test(std::string field_trials)
+ : time_controller_(Timestamp::Millis(100)),
+ task_queue_(time_controller_.GetTaskQueueFactory()->CreateTaskQueue(
+ "RtpVideoStreamReceiver2Test",
+ TaskQueueFactory::Priority::NORMAL)),
+ task_queue_setter_(task_queue_.get()),
+ field_trials_(field_trials),
+ config_(CreateConfig()) {
+ rtp_receive_statistics_ =
+ ReceiveStatistics::Create(Clock::GetRealTimeClock());
+ rtp_video_stream_receiver_ = std::make_unique<RtpVideoStreamReceiver2>(
+ TaskQueueBase::Current(), Clock::GetRealTimeClock(), &mock_transport_,
+ nullptr, nullptr, &config_, rtp_receive_statistics_.get(), nullptr,
+ nullptr, &nack_periodic_processor_, &mock_on_complete_frame_callback_,
+ nullptr, nullptr, field_trials_, nullptr);
+ rtp_video_stream_receiver_->AddReceiveCodec(kPayloadType,
+ kVideoCodecGeneric, {},
+ /*raw_payload=*/false);
+ ON_CALL(mock_transport_, SendRtcp)
+ .WillByDefault(
+ Invoke(&rtcp_packet_parser_, &test::RtcpPacketParser::Parse));
+ }
+
+ RTPVideoHeader GetDefaultH264VideoHeader() {
+ RTPVideoHeader video_header;
+ video_header.codec = kVideoCodecH264;
+ video_header.video_type_header.emplace<RTPVideoHeaderH264>();
+ return video_header;
+ }
+
+ // TODO(Johan): refactor h264_sps_pps_tracker_unittests.cc to avoid duplicate
+ // code.
+ void AddSps(RTPVideoHeader* video_header,
+ uint8_t sps_id,
+ rtc::CopyOnWriteBuffer* data) {
+ NaluInfo info;
+ info.type = H264::NaluType::kSps;
+ info.sps_id = sps_id;
+ info.pps_id = -1;
+ data->AppendData<uint8_t, 2>({H264::NaluType::kSps, sps_id});
+ auto& h264 = absl::get<RTPVideoHeaderH264>(video_header->video_type_header);
+ h264.nalus[h264.nalus_length++] = info;
+ }
+
+ void AddPps(RTPVideoHeader* video_header,
+ uint8_t sps_id,
+ uint8_t pps_id,
+ rtc::CopyOnWriteBuffer* data) {
+ NaluInfo info;
+ info.type = H264::NaluType::kPps;
+ info.sps_id = sps_id;
+ info.pps_id = pps_id;
+ data->AppendData<uint8_t, 2>({H264::NaluType::kPps, pps_id});
+ auto& h264 = absl::get<RTPVideoHeaderH264>(video_header->video_type_header);
+ h264.nalus[h264.nalus_length++] = info;
+ }
+
+ void AddIdr(RTPVideoHeader* video_header, int pps_id) {
+ NaluInfo info;
+ info.type = H264::NaluType::kIdr;
+ info.sps_id = -1;
+ info.pps_id = pps_id;
+ auto& h264 = absl::get<RTPVideoHeaderH264>(video_header->video_type_header);
+ h264.nalus[h264.nalus_length++] = info;
+ }
+
+ void OnRtpPacket(const RtpPacketReceived& packet) override {
+ if (test_packet_sink_)
+ test_packet_sink_->OnRtpPacket(packet);
+ }
+
+ protected:
+ VideoReceiveStreamInterface::Config CreateConfig() {
+ VideoReceiveStreamInterface::Config config(nullptr);
+ config.rtp.remote_ssrc = 1111;
+ config.rtp.local_ssrc = 2222;
+ config.rtp.red_payload_type = kRedPayloadType;
+ config.rtp.packet_sink_ = this;
+ return config;
+ }
+
+ GlobalSimulatedTimeController time_controller_;
+ std::unique_ptr<TaskQueueBase, TaskQueueDeleter> task_queue_;
+ TokenTaskQueue::CurrentTaskQueueSetter task_queue_setter_;
+
+ webrtc::test::ScopedKeyValueConfig field_trials_;
+ VideoReceiveStreamInterface::Config config_;
+ NackPeriodicProcessor nack_periodic_processor_;
+ test::RtcpPacketParser rtcp_packet_parser_;
+ MockTransport mock_transport_;
+ MockOnCompleteFrameCallback mock_on_complete_frame_callback_;
+ std::unique_ptr<ReceiveStatistics> rtp_receive_statistics_;
+ std::unique_ptr<RtpVideoStreamReceiver2> rtp_video_stream_receiver_;
+ RtpPacketSinkInterface* test_packet_sink_ = nullptr;
+};
+
+TEST_F(RtpVideoStreamReceiver2Test, CacheColorSpaceFromLastPacketOfKeyframe) {
+ // Test that color space is cached from the last packet of a key frame and
+ // that it's not reset by padding packets without color space.
+ constexpr int kVp9PayloadType = 99;
+ const ColorSpace kColorSpace(
+ ColorSpace::PrimaryID::kFILM, ColorSpace::TransferID::kBT2020_12,
+ ColorSpace::MatrixID::kBT2020_NCL, ColorSpace::RangeID::kFull);
+ const std::vector<uint8_t> kKeyFramePayload = {0, 1, 2, 3, 4, 5,
+ 6, 7, 8, 9, 10};
+ const std::vector<uint8_t> kDeltaFramePayload = {0, 1, 2, 3, 4};
+
+ // Anonymous helper class that generates received packets.
+ class {
+ public:
+ void SetPayload(const std::vector<uint8_t>& payload,
+ VideoFrameType video_frame_type) {
+ video_frame_type_ = video_frame_type;
+ RtpPacketizer::PayloadSizeLimits pay_load_size_limits;
+ // Reduce max payload length to make sure the key frame generates two
+ // packets.
+ pay_load_size_limits.max_payload_len = 8;
+ RTPVideoHeaderVP9 rtp_video_header_vp9;
+ rtp_video_header_vp9.InitRTPVideoHeaderVP9();
+ rtp_video_header_vp9.inter_pic_predicted =
+ (video_frame_type == VideoFrameType::kVideoFrameDelta);
+ rtp_packetizer_ = std::make_unique<RtpPacketizerVp9>(
+ payload, pay_load_size_limits, rtp_video_header_vp9);
+ }
+
+ size_t NumPackets() { return rtp_packetizer_->NumPackets(); }
+ void SetColorSpace(const ColorSpace& color_space) {
+ color_space_ = color_space;
+ }
+
+ RtpPacketReceived NextPacket() {
+ RtpHeaderExtensionMap extension_map;
+ extension_map.Register<ColorSpaceExtension>(1);
+ RtpPacketToSend packet_to_send(&extension_map);
+ packet_to_send.SetSequenceNumber(sequence_number_++);
+ packet_to_send.SetSsrc(kSsrc);
+ packet_to_send.SetPayloadType(kVp9PayloadType);
+ bool include_color_space =
+ (rtp_packetizer_->NumPackets() == 1u &&
+ video_frame_type_ == VideoFrameType::kVideoFrameKey);
+ if (include_color_space) {
+ EXPECT_TRUE(
+ packet_to_send.SetExtension<ColorSpaceExtension>(color_space_));
+ }
+ rtp_packetizer_->NextPacket(&packet_to_send);
+
+ RtpPacketReceived received_packet(&extension_map);
+ received_packet.Parse(packet_to_send.data(), packet_to_send.size());
+ return received_packet;
+ }
+
+ private:
+ uint16_t sequence_number_ = 0;
+ VideoFrameType video_frame_type_;
+ ColorSpace color_space_;
+ std::unique_ptr<RtpPacketizer> rtp_packetizer_;
+ } received_packet_generator;
+ received_packet_generator.SetColorSpace(kColorSpace);
+
+ // Prepare the receiver for VP9.
+ std::map<std::string, std::string> codec_params;
+ rtp_video_stream_receiver_->AddReceiveCodec(kVp9PayloadType, kVideoCodecVP9,
+ codec_params,
+ /*raw_payload=*/false);
+
+ // Generate key frame packets.
+ received_packet_generator.SetPayload(kKeyFramePayload,
+ VideoFrameType::kVideoFrameKey);
+ EXPECT_EQ(received_packet_generator.NumPackets(), 2u);
+ RtpPacketReceived key_frame_packet1 = received_packet_generator.NextPacket();
+ RtpPacketReceived key_frame_packet2 = received_packet_generator.NextPacket();
+
+ // Generate delta frame packet.
+ received_packet_generator.SetPayload(kDeltaFramePayload,
+ VideoFrameType::kVideoFrameDelta);
+ EXPECT_EQ(received_packet_generator.NumPackets(), 1u);
+ RtpPacketReceived delta_frame_packet = received_packet_generator.NextPacket();
+
+ rtp_video_stream_receiver_->StartReceive();
+ mock_on_complete_frame_callback_.AppendExpectedBitstream(
+ kKeyFramePayload.data(), kKeyFramePayload.size());
+
+ // Send the key frame and expect a callback with color space information.
+ EXPECT_FALSE(key_frame_packet1.GetExtension<ColorSpaceExtension>());
+ EXPECT_TRUE(key_frame_packet2.GetExtension<ColorSpaceExtension>());
+ rtp_video_stream_receiver_->OnRtpPacket(key_frame_packet1);
+ EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame(_))
+ .WillOnce(Invoke([kColorSpace](EncodedFrame* frame) {
+ ASSERT_TRUE(frame->EncodedImage().ColorSpace());
+ EXPECT_EQ(*frame->EncodedImage().ColorSpace(), kColorSpace);
+ }));
+ rtp_video_stream_receiver_->OnRtpPacket(key_frame_packet2);
+ // Resend the first key frame packet to simulate padding for example.
+ rtp_video_stream_receiver_->OnRtpPacket(key_frame_packet1);
+
+ mock_on_complete_frame_callback_.ClearExpectedBitstream();
+ mock_on_complete_frame_callback_.AppendExpectedBitstream(
+ kDeltaFramePayload.data(), kDeltaFramePayload.size());
+
+ // Expect delta frame to have color space set even though color space not
+ // included in the RTP packet.
+ EXPECT_FALSE(delta_frame_packet.GetExtension<ColorSpaceExtension>());
+ EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame(_))
+ .WillOnce(Invoke([kColorSpace](EncodedFrame* frame) {
+ ASSERT_TRUE(frame->EncodedImage().ColorSpace());
+ EXPECT_EQ(*frame->EncodedImage().ColorSpace(), kColorSpace);
+ }));
+ rtp_video_stream_receiver_->OnRtpPacket(delta_frame_packet);
+}
+
+TEST_F(RtpVideoStreamReceiver2Test, GenericKeyFrame) {
+ RtpPacketReceived rtp_packet;
+ rtc::CopyOnWriteBuffer data({'1', '2', '3', '4'});
+ rtp_packet.SetPayloadType(kPayloadType);
+ rtp_packet.SetSequenceNumber(1);
+ RTPVideoHeader video_header =
+ GetGenericVideoHeader(VideoFrameType::kVideoFrameKey);
+ mock_on_complete_frame_callback_.AppendExpectedBitstream(data.data(),
+ data.size());
+ EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame(_));
+ rtp_video_stream_receiver_->OnReceivedPayloadData(data, rtp_packet,
+ video_header);
+}
+
+TEST_F(RtpVideoStreamReceiver2Test, SetProtectionPayloadTypes) {
+ EXPECT_NE(rtp_video_stream_receiver_->red_payload_type(), 104);
+ EXPECT_NE(rtp_video_stream_receiver_->ulpfec_payload_type(), 107);
+
+ rtp_video_stream_receiver_->SetProtectionPayloadTypes(104, 107);
+
+ EXPECT_EQ(rtp_video_stream_receiver_->red_payload_type(), 104);
+ EXPECT_EQ(rtp_video_stream_receiver_->ulpfec_payload_type(), 107);
+}
+
+TEST_F(RtpVideoStreamReceiver2Test, PacketInfoIsPropagatedIntoVideoFrames) {
+ constexpr uint64_t kAbsoluteCaptureTimestamp = 12;
+ constexpr int kId0 = 1;
+
+ RtpHeaderExtensionMap extension_map;
+ extension_map.Register<AbsoluteCaptureTimeExtension>(kId0);
+ RtpPacketReceived rtp_packet(&extension_map);
+ rtp_packet.SetPayloadType(kPayloadType);
+ rtc::CopyOnWriteBuffer data({'1', '2', '3', '4'});
+ rtp_packet.SetSequenceNumber(1);
+ rtp_packet.SetTimestamp(1);
+ rtp_packet.SetSsrc(kSsrc);
+ rtp_packet.SetExtension<AbsoluteCaptureTimeExtension>(
+ AbsoluteCaptureTime{kAbsoluteCaptureTimestamp,
+ /*estimated_capture_clock_offset=*/absl::nullopt});
+
+ RTPVideoHeader video_header =
+ GetGenericVideoHeader(VideoFrameType::kVideoFrameKey);
+ mock_on_complete_frame_callback_.AppendExpectedBitstream(data.data(),
+ data.size());
+ EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame(_))
+ .WillOnce(Invoke([kAbsoluteCaptureTimestamp](EncodedFrame* frame) {
+ EXPECT_THAT(GetAbsoluteCaptureTimestamps(frame),
+ ElementsAre(kAbsoluteCaptureTimestamp));
+ }));
+ rtp_video_stream_receiver_->OnReceivedPayloadData(data, rtp_packet,
+ video_header);
+}
+
+TEST_F(RtpVideoStreamReceiver2Test,
+ MissingAbsoluteCaptureTimeIsFilledWithExtrapolatedValue) {
+ constexpr uint64_t kAbsoluteCaptureTimestamp = 12;
+ constexpr int kId0 = 1;
+
+ RtpHeaderExtensionMap extension_map;
+ extension_map.Register<AbsoluteCaptureTimeExtension>(kId0);
+ RtpPacketReceived rtp_packet(&extension_map);
+ rtp_packet.SetPayloadType(kPayloadType);
+
+ rtc::CopyOnWriteBuffer data({'1', '2', '3', '4'});
+ uint16_t sequence_number = 1;
+ uint32_t rtp_timestamp = 1;
+ rtp_packet.SetSequenceNumber(sequence_number);
+ rtp_packet.SetTimestamp(rtp_timestamp);
+ rtp_packet.SetSsrc(kSsrc);
+ rtp_packet.SetExtension<AbsoluteCaptureTimeExtension>(
+ AbsoluteCaptureTime{kAbsoluteCaptureTimestamp,
+ /*estimated_capture_clock_offset=*/absl::nullopt});
+
+ RTPVideoHeader video_header =
+ GetGenericVideoHeader(VideoFrameType::kVideoFrameKey);
+ mock_on_complete_frame_callback_.AppendExpectedBitstream(data.data(),
+ data.size());
+ EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame(_));
+ rtp_video_stream_receiver_->OnReceivedPayloadData(data, rtp_packet,
+ video_header);
+
+ // Rtp packet without absolute capture time.
+ rtp_packet = RtpPacketReceived(&extension_map);
+ rtp_packet.SetPayloadType(kPayloadType);
+ rtp_packet.SetSequenceNumber(++sequence_number);
+ rtp_packet.SetTimestamp(++rtp_timestamp);
+ rtp_packet.SetSsrc(kSsrc);
+
+ // There is no absolute capture time in the second packet.
+ // Expect rtp video stream receiver to extrapolate it for the resulting video
+ // frame using absolute capture time from the previous packet.
+ EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame(_))
+ .WillOnce(Invoke([](EncodedFrame* frame) {
+ EXPECT_THAT(GetAbsoluteCaptureTimestamps(frame), SizeIs(1));
+ }));
+ rtp_video_stream_receiver_->OnReceivedPayloadData(data, rtp_packet,
+ video_header);
+}
+
+TEST_F(RtpVideoStreamReceiver2Test,
+ NoInfiniteRecursionOnEncapsulatedRedPacket) {
+ const std::vector<uint8_t> data({
+ 0x80, // RTP version.
+ kRedPayloadType, // Payload type.
+ 0, 0, 0, 0, 0, 0, // Don't care.
+ 0, 0, 0x4, 0x57, // SSRC
+ kRedPayloadType, // RED header.
+ 0, 0, 0, 0, 0 // Don't care.
+ });
+ RtpPacketReceived packet;
+ EXPECT_TRUE(packet.Parse(data.data(), data.size()));
+ rtp_video_stream_receiver_->StartReceive();
+ rtp_video_stream_receiver_->OnRtpPacket(packet);
+}
+
+TEST_F(RtpVideoStreamReceiver2Test,
+ DropsPacketWithRedPayloadTypeAndEmptyPayload) {
+ const uint8_t kRedPayloadType = 125;
+ config_.rtp.red_payload_type = kRedPayloadType;
+ SetUp(); // re-create rtp_video_stream_receiver with red payload type.
+ // clang-format off
+ const uint8_t data[] = {
+ 0x80, // RTP version.
+ kRedPayloadType, // Payload type.
+ 0, 0, 0, 0, 0, 0, // Don't care.
+ 0, 0, 0x4, 0x57, // SSRC
+ // Empty rtp payload.
+ };
+ // clang-format on
+ RtpPacketReceived packet;
+ // Manually convert to CopyOnWriteBuffer to be sure capacity == size
+ // and asan bot can catch read buffer overflow.
+ EXPECT_TRUE(packet.Parse(rtc::CopyOnWriteBuffer(data)));
+ rtp_video_stream_receiver_->StartReceive();
+ rtp_video_stream_receiver_->OnRtpPacket(packet);
+ // Expect asan doesn't find anything.
+}
+
+TEST_F(RtpVideoStreamReceiver2Test, GenericKeyFrameBitstreamError) {
+ RtpPacketReceived rtp_packet;
+ rtp_packet.SetPayloadType(kPayloadType);
+ rtc::CopyOnWriteBuffer data({'1', '2', '3', '4'});
+ rtp_packet.SetSequenceNumber(1);
+ RTPVideoHeader video_header =
+ GetGenericVideoHeader(VideoFrameType::kVideoFrameKey);
+ constexpr uint8_t expected_bitsteam[] = {1, 2, 3, 0xff};
+ mock_on_complete_frame_callback_.AppendExpectedBitstream(
+ expected_bitsteam, sizeof(expected_bitsteam));
+ EXPECT_CALL(mock_on_complete_frame_callback_,
+ DoOnCompleteFrameFailBitstream(_));
+ rtp_video_stream_receiver_->OnReceivedPayloadData(data, rtp_packet,
+ video_header);
+}
+
+class RtpVideoStreamReceiver2TestH264
+ : public RtpVideoStreamReceiver2Test,
+ public ::testing::WithParamInterface<std::string> {
+ protected:
+ RtpVideoStreamReceiver2TestH264() : RtpVideoStreamReceiver2Test(GetParam()) {}
+};
+
+INSTANTIATE_TEST_SUITE_P(SpsPpsIdrIsKeyframe,
+ RtpVideoStreamReceiver2TestH264,
+ Values("", "WebRTC-SpsPpsIdrIsH264Keyframe/Enabled/"));
+
+TEST_P(RtpVideoStreamReceiver2TestH264, InBandSpsPps) {
+ rtc::CopyOnWriteBuffer sps_data;
+ RtpPacketReceived rtp_packet;
+ RTPVideoHeader sps_video_header = GetDefaultH264VideoHeader();
+ AddSps(&sps_video_header, 0, &sps_data);
+ rtp_packet.SetSequenceNumber(0);
+ rtp_packet.SetPayloadType(kPayloadType);
+ sps_video_header.is_first_packet_in_frame = true;
+ sps_video_header.frame_type = VideoFrameType::kEmptyFrame;
+ mock_on_complete_frame_callback_.AppendExpectedBitstream(
+ kH264StartCode, sizeof(kH264StartCode));
+ mock_on_complete_frame_callback_.AppendExpectedBitstream(sps_data.data(),
+ sps_data.size());
+ rtp_video_stream_receiver_->OnReceivedPayloadData(sps_data, rtp_packet,
+ sps_video_header);
+
+ rtc::CopyOnWriteBuffer pps_data;
+ RTPVideoHeader pps_video_header = GetDefaultH264VideoHeader();
+ AddPps(&pps_video_header, 0, 1, &pps_data);
+ rtp_packet.SetSequenceNumber(1);
+ pps_video_header.is_first_packet_in_frame = true;
+ pps_video_header.frame_type = VideoFrameType::kEmptyFrame;
+ mock_on_complete_frame_callback_.AppendExpectedBitstream(
+ kH264StartCode, sizeof(kH264StartCode));
+ mock_on_complete_frame_callback_.AppendExpectedBitstream(pps_data.data(),
+ pps_data.size());
+ rtp_video_stream_receiver_->OnReceivedPayloadData(pps_data, rtp_packet,
+ pps_video_header);
+
+ rtc::CopyOnWriteBuffer idr_data;
+ RTPVideoHeader idr_video_header = GetDefaultH264VideoHeader();
+ AddIdr(&idr_video_header, 1);
+ rtp_packet.SetSequenceNumber(2);
+ idr_video_header.is_first_packet_in_frame = true;
+ idr_video_header.is_last_packet_in_frame = true;
+ idr_video_header.frame_type = VideoFrameType::kVideoFrameKey;
+ const uint8_t idr[] = {0x65, 1, 2, 3};
+ idr_data.AppendData(idr);
+ mock_on_complete_frame_callback_.AppendExpectedBitstream(
+ kH264StartCode, sizeof(kH264StartCode));
+ mock_on_complete_frame_callback_.AppendExpectedBitstream(idr_data.data(),
+ idr_data.size());
+ EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame(_));
+ rtp_video_stream_receiver_->OnReceivedPayloadData(idr_data, rtp_packet,
+ idr_video_header);
+}
+
+TEST_P(RtpVideoStreamReceiver2TestH264, OutOfBandFmtpSpsPps) {
+ constexpr int kPayloadType = 99;
+ std::map<std::string, std::string> codec_params;
+ // Example parameter sets from https://tools.ietf.org/html/rfc3984#section-8.2
+ // .
+ codec_params.insert(
+ {cricket::kH264FmtpSpropParameterSets, "Z0IACpZTBYmI,aMljiA=="});
+ rtp_video_stream_receiver_->AddReceiveCodec(kPayloadType, kVideoCodecH264,
+ codec_params,
+ /*raw_payload=*/false);
+ const uint8_t binary_sps[] = {0x67, 0x42, 0x00, 0x0a, 0x96,
+ 0x53, 0x05, 0x89, 0x88};
+ mock_on_complete_frame_callback_.AppendExpectedBitstream(
+ kH264StartCode, sizeof(kH264StartCode));
+ mock_on_complete_frame_callback_.AppendExpectedBitstream(binary_sps,
+ sizeof(binary_sps));
+ const uint8_t binary_pps[] = {0x68, 0xc9, 0x63, 0x88};
+ mock_on_complete_frame_callback_.AppendExpectedBitstream(
+ kH264StartCode, sizeof(kH264StartCode));
+ mock_on_complete_frame_callback_.AppendExpectedBitstream(binary_pps,
+ sizeof(binary_pps));
+
+ RtpPacketReceived rtp_packet;
+ RTPVideoHeader video_header = GetDefaultH264VideoHeader();
+ AddIdr(&video_header, 0);
+ rtp_packet.SetPayloadType(kPayloadType);
+ rtp_packet.SetSequenceNumber(2);
+ video_header.is_first_packet_in_frame = true;
+ video_header.is_last_packet_in_frame = true;
+ video_header.codec = kVideoCodecH264;
+ video_header.frame_type = VideoFrameType::kVideoFrameKey;
+ rtc::CopyOnWriteBuffer data({'1', '2', '3'});
+ mock_on_complete_frame_callback_.AppendExpectedBitstream(
+ kH264StartCode, sizeof(kH264StartCode));
+ mock_on_complete_frame_callback_.AppendExpectedBitstream(data.data(),
+ data.size());
+ EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame(_));
+ rtp_video_stream_receiver_->OnReceivedPayloadData(data, rtp_packet,
+ video_header);
+}
+
+TEST_P(RtpVideoStreamReceiver2TestH264, ForceSpsPpsIdrIsKeyframe) {
+ constexpr int kPayloadType = 99;
+ std::map<std::string, std::string> codec_params;
+ if (GetParam() ==
+ "") { // Forcing can be done either with field trial or codec_params.
+ codec_params.insert({cricket::kH264FmtpSpsPpsIdrInKeyframe, ""});
+ }
+ rtp_video_stream_receiver_->AddReceiveCodec(kPayloadType, kVideoCodecH264,
+ codec_params,
+ /*raw_payload=*/false);
+ rtc::CopyOnWriteBuffer sps_data;
+ RtpPacketReceived rtp_packet;
+ RTPVideoHeader sps_video_header = GetDefaultH264VideoHeader();
+ AddSps(&sps_video_header, 0, &sps_data);
+ rtp_packet.SetSequenceNumber(0);
+ rtp_packet.SetPayloadType(kPayloadType);
+ sps_video_header.is_first_packet_in_frame = true;
+ sps_video_header.frame_type = VideoFrameType::kEmptyFrame;
+ mock_on_complete_frame_callback_.AppendExpectedBitstream(
+ kH264StartCode, sizeof(kH264StartCode));
+ mock_on_complete_frame_callback_.AppendExpectedBitstream(sps_data.data(),
+ sps_data.size());
+ rtp_video_stream_receiver_->OnReceivedPayloadData(sps_data, rtp_packet,
+ sps_video_header);
+
+ rtc::CopyOnWriteBuffer pps_data;
+ RTPVideoHeader pps_video_header = GetDefaultH264VideoHeader();
+ AddPps(&pps_video_header, 0, 1, &pps_data);
+ rtp_packet.SetSequenceNumber(1);
+ pps_video_header.is_first_packet_in_frame = true;
+ pps_video_header.frame_type = VideoFrameType::kEmptyFrame;
+ mock_on_complete_frame_callback_.AppendExpectedBitstream(
+ kH264StartCode, sizeof(kH264StartCode));
+ mock_on_complete_frame_callback_.AppendExpectedBitstream(pps_data.data(),
+ pps_data.size());
+ rtp_video_stream_receiver_->OnReceivedPayloadData(pps_data, rtp_packet,
+ pps_video_header);
+
+ rtc::CopyOnWriteBuffer idr_data;
+ RTPVideoHeader idr_video_header = GetDefaultH264VideoHeader();
+ AddIdr(&idr_video_header, 1);
+ rtp_packet.SetSequenceNumber(2);
+ idr_video_header.is_first_packet_in_frame = true;
+ idr_video_header.is_last_packet_in_frame = true;
+ idr_video_header.frame_type = VideoFrameType::kVideoFrameKey;
+ const uint8_t idr[] = {0x65, 1, 2, 3};
+ idr_data.AppendData(idr);
+ mock_on_complete_frame_callback_.AppendExpectedBitstream(
+ kH264StartCode, sizeof(kH264StartCode));
+ mock_on_complete_frame_callback_.AppendExpectedBitstream(idr_data.data(),
+ idr_data.size());
+ EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame)
+ .WillOnce(
+ [&](EncodedFrame* frame) { EXPECT_TRUE(frame->is_keyframe()); });
+ rtp_video_stream_receiver_->OnReceivedPayloadData(idr_data, rtp_packet,
+ idr_video_header);
+ mock_on_complete_frame_callback_.ClearExpectedBitstream();
+ mock_on_complete_frame_callback_.AppendExpectedBitstream(
+ kH264StartCode, sizeof(kH264StartCode));
+ mock_on_complete_frame_callback_.AppendExpectedBitstream(idr_data.data(),
+ idr_data.size());
+ rtp_packet.SetSequenceNumber(3);
+ EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame)
+ .WillOnce(
+ [&](EncodedFrame* frame) { EXPECT_FALSE(frame->is_keyframe()); });
+ rtp_video_stream_receiver_->OnReceivedPayloadData(idr_data, rtp_packet,
+ idr_video_header);
+}
+
+TEST_F(RtpVideoStreamReceiver2Test, PaddingInMediaStream) {
+ RtpPacketReceived rtp_packet;
+ RTPVideoHeader video_header = GetDefaultH264VideoHeader();
+ rtc::CopyOnWriteBuffer data({'1', '2', '3'});
+ rtp_packet.SetPayloadType(kPayloadType);
+ rtp_packet.SetSequenceNumber(2);
+ video_header.is_first_packet_in_frame = true;
+ video_header.is_last_packet_in_frame = true;
+ video_header.codec = kVideoCodecGeneric;
+ video_header.frame_type = VideoFrameType::kVideoFrameKey;
+ mock_on_complete_frame_callback_.AppendExpectedBitstream(data.data(),
+ data.size());
+
+ EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame(_));
+ rtp_video_stream_receiver_->OnReceivedPayloadData(data, rtp_packet,
+ video_header);
+
+ rtp_packet.SetSequenceNumber(3);
+ rtp_video_stream_receiver_->OnReceivedPayloadData({}, rtp_packet,
+ video_header);
+
+ rtp_packet.SetSequenceNumber(4);
+ EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame(_));
+ video_header.frame_type = VideoFrameType::kVideoFrameDelta;
+ rtp_video_stream_receiver_->OnReceivedPayloadData(data, rtp_packet,
+ video_header);
+
+ rtp_packet.SetSequenceNumber(6);
+ rtp_video_stream_receiver_->OnReceivedPayloadData(data, rtp_packet,
+ video_header);
+
+ EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame(_));
+ rtp_packet.SetSequenceNumber(5);
+ rtp_video_stream_receiver_->OnReceivedPayloadData({}, rtp_packet,
+ video_header);
+}
+
+TEST_F(RtpVideoStreamReceiver2Test, RequestKeyframeIfFirstFrameIsDelta) {
+ RtpPacketReceived rtp_packet;
+ rtp_packet.SetPayloadType(kPayloadType);
+ rtc::CopyOnWriteBuffer data({'1', '2', '3', '4'});
+ rtp_packet.SetSequenceNumber(1);
+ RTPVideoHeader video_header =
+ GetGenericVideoHeader(VideoFrameType::kVideoFrameDelta);
+
+ rtp_video_stream_receiver_->OnReceivedPayloadData(data, rtp_packet,
+ video_header);
+ EXPECT_THAT(rtcp_packet_parser_.pli()->num_packets(), Eq(1));
+}
+
+TEST_F(RtpVideoStreamReceiver2Test, RequestKeyframeWhenPacketBufferGetsFull) {
+ constexpr int kPacketBufferMaxSize = 2048;
+
+ RtpPacketReceived rtp_packet;
+ rtp_packet.SetPayloadType(kPayloadType);
+ rtc::CopyOnWriteBuffer data({'1', '2', '3', '4'});
+ RTPVideoHeader video_header =
+ GetGenericVideoHeader(VideoFrameType::kVideoFrameDelta);
+ // Incomplete frames so that the packet buffer is filling up.
+ video_header.is_last_packet_in_frame = false;
+ uint16_t start_sequence_number = 1234;
+ rtp_packet.SetSequenceNumber(start_sequence_number);
+ while (rtp_packet.SequenceNumber() - start_sequence_number <
+ kPacketBufferMaxSize) {
+ rtp_video_stream_receiver_->OnReceivedPayloadData(data, rtp_packet,
+ video_header);
+ rtp_packet.SetSequenceNumber(rtp_packet.SequenceNumber() + 2);
+ }
+
+ rtp_video_stream_receiver_->OnReceivedPayloadData(data, rtp_packet,
+ video_header);
+ EXPECT_THAT(rtcp_packet_parser_.pli()->num_packets(), Eq(1));
+}
+
+TEST_F(RtpVideoStreamReceiver2Test, SinkGetsRtpNotifications) {
+ rtp_video_stream_receiver_->StartReceive();
+
+ MockRtpPacketSink test_sink;
+ test_packet_sink_ = &test_sink;
+
+ auto rtp_packet = CreateRtpPacketReceived();
+ EXPECT_CALL(test_sink, OnRtpPacket(SamePacketAs(*rtp_packet)));
+
+ rtp_video_stream_receiver_->OnRtpPacket(*rtp_packet);
+
+ // Test tear-down.
+ rtp_video_stream_receiver_->StopReceive();
+ test_packet_sink_ = nullptr;
+}
+
+TEST_F(RtpVideoStreamReceiver2Test, NonStartedStreamGetsNoRtpCallbacks) {
+ // Explicitly showing that the stream is not in the `started` state,
+ // regardless of whether streams start out `started` or `stopped`.
+ rtp_video_stream_receiver_->StopReceive();
+
+ MockRtpPacketSink test_sink;
+ test_packet_sink_ = &test_sink;
+
+ auto rtp_packet = CreateRtpPacketReceived();
+ EXPECT_CALL(test_sink, OnRtpPacket(_)).Times(0);
+
+ rtp_video_stream_receiver_->OnRtpPacket(*rtp_packet);
+
+ test_packet_sink_ = nullptr;
+}
+
+TEST_F(RtpVideoStreamReceiver2Test, ParseGenericDescriptorOnePacket) {
+ const std::vector<uint8_t> data = {0, 1, 2, 3, 4};
+ const int kSpatialIndex = 1;
+
+ rtp_video_stream_receiver_->StartReceive();
+
+ RtpHeaderExtensionMap extension_map;
+ extension_map.Register<RtpGenericFrameDescriptorExtension00>(5);
+ RtpPacketReceived rtp_packet(&extension_map);
+ rtp_packet.SetPayloadType(kPayloadType);
+
+ RtpGenericFrameDescriptor generic_descriptor;
+ generic_descriptor.SetFirstPacketInSubFrame(true);
+ generic_descriptor.SetLastPacketInSubFrame(true);
+ generic_descriptor.SetFrameId(100);
+ generic_descriptor.SetSpatialLayersBitmask(1 << kSpatialIndex);
+ generic_descriptor.AddFrameDependencyDiff(90);
+ generic_descriptor.AddFrameDependencyDiff(80);
+ ASSERT_TRUE(rtp_packet.SetExtension<RtpGenericFrameDescriptorExtension00>(
+ generic_descriptor));
+
+ uint8_t* payload = rtp_packet.SetPayloadSize(data.size());
+ memcpy(payload, data.data(), data.size());
+ // The first byte is the header, so we ignore the first byte of `data`.
+ mock_on_complete_frame_callback_.AppendExpectedBitstream(data.data() + 1,
+ data.size() - 1);
+
+ rtp_packet.SetMarker(true);
+ rtp_packet.SetPayloadType(kPayloadType);
+ rtp_packet.SetSequenceNumber(1);
+
+ EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame)
+ .WillOnce(Invoke([kSpatialIndex](EncodedFrame* frame) {
+ EXPECT_EQ(frame->num_references, 2U);
+ EXPECT_EQ(frame->references[0], frame->Id() - 90);
+ EXPECT_EQ(frame->references[1], frame->Id() - 80);
+ EXPECT_EQ(frame->SpatialIndex(), kSpatialIndex);
+ EXPECT_THAT(frame->PacketInfos(), SizeIs(1));
+ }));
+
+ rtp_video_stream_receiver_->OnRtpPacket(rtp_packet);
+}
+
+TEST_F(RtpVideoStreamReceiver2Test, ParseGenericDescriptorTwoPackets) {
+ const std::vector<uint8_t> data = {0, 1, 2, 3, 4};
+ const int kSpatialIndex = 1;
+
+ rtp_video_stream_receiver_->StartReceive();
+
+ RtpHeaderExtensionMap extension_map;
+ extension_map.Register<RtpGenericFrameDescriptorExtension00>(5);
+ RtpPacketReceived first_packet(&extension_map);
+
+ RtpGenericFrameDescriptor first_packet_descriptor;
+ first_packet_descriptor.SetFirstPacketInSubFrame(true);
+ first_packet_descriptor.SetLastPacketInSubFrame(false);
+ first_packet_descriptor.SetFrameId(100);
+ first_packet_descriptor.SetSpatialLayersBitmask(1 << kSpatialIndex);
+ first_packet_descriptor.SetResolution(480, 360);
+ ASSERT_TRUE(first_packet.SetExtension<RtpGenericFrameDescriptorExtension00>(
+ first_packet_descriptor));
+
+ uint8_t* first_packet_payload = first_packet.SetPayloadSize(data.size());
+ memcpy(first_packet_payload, data.data(), data.size());
+ // The first byte is the header, so we ignore the first byte of `data`.
+ mock_on_complete_frame_callback_.AppendExpectedBitstream(data.data() + 1,
+ data.size() - 1);
+
+ first_packet.SetPayloadType(kPayloadType);
+ first_packet.SetSequenceNumber(1);
+ rtp_video_stream_receiver_->OnRtpPacket(first_packet);
+
+ RtpPacketReceived second_packet(&extension_map);
+ RtpGenericFrameDescriptor second_packet_descriptor;
+ second_packet_descriptor.SetFirstPacketInSubFrame(false);
+ second_packet_descriptor.SetLastPacketInSubFrame(true);
+ ASSERT_TRUE(second_packet.SetExtension<RtpGenericFrameDescriptorExtension00>(
+ second_packet_descriptor));
+
+ second_packet.SetMarker(true);
+ second_packet.SetPayloadType(kPayloadType);
+ second_packet.SetSequenceNumber(2);
+
+ uint8_t* second_packet_payload = second_packet.SetPayloadSize(data.size());
+ memcpy(second_packet_payload, data.data(), data.size());
+ // The first byte is the header, so we ignore the first byte of `data`.
+ mock_on_complete_frame_callback_.AppendExpectedBitstream(data.data() + 1,
+ data.size() - 1);
+
+ EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame)
+ .WillOnce(Invoke([kSpatialIndex](EncodedFrame* frame) {
+ EXPECT_EQ(frame->num_references, 0U);
+ EXPECT_EQ(frame->SpatialIndex(), kSpatialIndex);
+ EXPECT_EQ(frame->EncodedImage()._encodedWidth, 480u);
+ EXPECT_EQ(frame->EncodedImage()._encodedHeight, 360u);
+ EXPECT_THAT(frame->PacketInfos(), SizeIs(2));
+ }));
+
+ rtp_video_stream_receiver_->OnRtpPacket(second_packet);
+}
+
+TEST_F(RtpVideoStreamReceiver2Test, ParseGenericDescriptorRawPayload) {
+ const std::vector<uint8_t> data = {0, 1, 2, 3, 4};
+ const int kRawPayloadType = 123;
+
+ rtp_video_stream_receiver_->AddReceiveCodec(kRawPayloadType,
+ kVideoCodecGeneric, {},
+ /*raw_payload=*/true);
+ rtp_video_stream_receiver_->StartReceive();
+
+ RtpHeaderExtensionMap extension_map;
+ extension_map.Register<RtpGenericFrameDescriptorExtension00>(5);
+ RtpPacketReceived rtp_packet(&extension_map);
+
+ RtpGenericFrameDescriptor generic_descriptor;
+ generic_descriptor.SetFirstPacketInSubFrame(true);
+ generic_descriptor.SetLastPacketInSubFrame(true);
+ ASSERT_TRUE(rtp_packet.SetExtension<RtpGenericFrameDescriptorExtension00>(
+ generic_descriptor));
+
+ uint8_t* payload = rtp_packet.SetPayloadSize(data.size());
+ memcpy(payload, data.data(), data.size());
+ mock_on_complete_frame_callback_.AppendExpectedBitstream(data.data(),
+ data.size());
+
+ rtp_packet.SetMarker(true);
+ rtp_packet.SetPayloadType(kRawPayloadType);
+ rtp_packet.SetSequenceNumber(1);
+
+ EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame);
+ rtp_video_stream_receiver_->OnRtpPacket(rtp_packet);
+}
+
+TEST_F(RtpVideoStreamReceiver2Test, UnwrapsFrameId) {
+ const std::vector<uint8_t> data = {0, 1, 2, 3, 4};
+ const int kPayloadType = 123;
+
+ rtp_video_stream_receiver_->AddReceiveCodec(kPayloadType, kVideoCodecGeneric,
+ {},
+ /*raw_payload=*/true);
+ rtp_video_stream_receiver_->StartReceive();
+ RtpHeaderExtensionMap extension_map;
+ extension_map.Register<RtpGenericFrameDescriptorExtension00>(5);
+
+ uint16_t rtp_sequence_number = 1;
+ auto inject_packet = [&](uint16_t wrapped_frame_id) {
+ RtpPacketReceived rtp_packet(&extension_map);
+
+ RtpGenericFrameDescriptor generic_descriptor;
+ generic_descriptor.SetFirstPacketInSubFrame(true);
+ generic_descriptor.SetLastPacketInSubFrame(true);
+ generic_descriptor.SetFrameId(wrapped_frame_id);
+ ASSERT_TRUE(rtp_packet.SetExtension<RtpGenericFrameDescriptorExtension00>(
+ generic_descriptor));
+
+ uint8_t* payload = rtp_packet.SetPayloadSize(data.size());
+ ASSERT_TRUE(payload);
+ memcpy(payload, data.data(), data.size());
+ mock_on_complete_frame_callback_.ClearExpectedBitstream();
+ mock_on_complete_frame_callback_.AppendExpectedBitstream(data.data(),
+ data.size());
+ rtp_packet.SetMarker(true);
+ rtp_packet.SetPayloadType(kPayloadType);
+ rtp_packet.SetSequenceNumber(++rtp_sequence_number);
+ rtp_video_stream_receiver_->OnRtpPacket(rtp_packet);
+ };
+
+ int64_t first_picture_id;
+ EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame)
+ .WillOnce([&](EncodedFrame* frame) { first_picture_id = frame->Id(); });
+ inject_packet(/*wrapped_frame_id=*/0xffff);
+
+ EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame)
+ .WillOnce([&](EncodedFrame* frame) {
+ EXPECT_EQ(frame->Id() - first_picture_id, 3);
+ });
+ inject_packet(/*wrapped_frame_id=*/0x0002);
+}
+
+class RtpVideoStreamReceiver2DependencyDescriptorTest
+ : public RtpVideoStreamReceiver2Test {
+ public:
+ RtpVideoStreamReceiver2DependencyDescriptorTest() {
+ rtp_video_stream_receiver_->AddReceiveCodec(payload_type_,
+ kVideoCodecGeneric, {},
+ /*raw_payload=*/true);
+ extension_map_.Register<RtpDependencyDescriptorExtension>(7);
+ rtp_video_stream_receiver_->StartReceive();
+ }
+
+ // Returns some valid structure for the DependencyDescriptors.
+ // First template of that structure always fit for a key frame.
+ static FrameDependencyStructure CreateStreamStructure() {
+ FrameDependencyStructure stream_structure;
+ stream_structure.num_decode_targets = 1;
+ stream_structure.templates = {
+ FrameDependencyTemplate().Dtis("S"),
+ FrameDependencyTemplate().Dtis("S").FrameDiffs({1}),
+ };
+ return stream_structure;
+ }
+
+ void InjectPacketWith(const FrameDependencyStructure& stream_structure,
+ const DependencyDescriptor& dependency_descriptor) {
+ const std::vector<uint8_t> data = {0, 1, 2, 3, 4};
+ RtpPacketReceived rtp_packet(&extension_map_);
+ ASSERT_TRUE(rtp_packet.SetExtension<RtpDependencyDescriptorExtension>(
+ stream_structure, dependency_descriptor));
+ uint8_t* payload = rtp_packet.SetPayloadSize(data.size());
+ ASSERT_TRUE(payload);
+ memcpy(payload, data.data(), data.size());
+ mock_on_complete_frame_callback_.ClearExpectedBitstream();
+ mock_on_complete_frame_callback_.AppendExpectedBitstream(data.data(),
+ data.size());
+ rtp_packet.SetMarker(true);
+ rtp_packet.SetPayloadType(payload_type_);
+ rtp_packet.SetSequenceNumber(++rtp_sequence_number_);
+ rtp_video_stream_receiver_->OnRtpPacket(rtp_packet);
+ }
+
+ private:
+ const int payload_type_ = 123;
+ RtpHeaderExtensionMap extension_map_;
+ uint16_t rtp_sequence_number_ = 321;
+};
+
+TEST_F(RtpVideoStreamReceiver2DependencyDescriptorTest, UnwrapsFrameId) {
+ FrameDependencyStructure stream_structure = CreateStreamStructure();
+
+ DependencyDescriptor keyframe_descriptor;
+ keyframe_descriptor.attached_structure =
+ std::make_unique<FrameDependencyStructure>(stream_structure);
+ keyframe_descriptor.frame_dependencies = stream_structure.templates[0];
+ keyframe_descriptor.frame_number = 0xfff0;
+ // DependencyDescriptor doesn't support reordering delta frame before
+ // keyframe. Thus feed a key frame first, then test reodered delta frames.
+ int64_t first_picture_id;
+ EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame)
+ .WillOnce([&](EncodedFrame* frame) { first_picture_id = frame->Id(); });
+ InjectPacketWith(stream_structure, keyframe_descriptor);
+
+ DependencyDescriptor deltaframe1_descriptor;
+ deltaframe1_descriptor.frame_dependencies = stream_structure.templates[1];
+ deltaframe1_descriptor.frame_number = 0xfffe;
+
+ DependencyDescriptor deltaframe2_descriptor;
+ deltaframe1_descriptor.frame_dependencies = stream_structure.templates[1];
+ deltaframe2_descriptor.frame_number = 0x0002;
+
+ // Parser should unwrap frame ids correctly even if packets were reordered by
+ // the network.
+ EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame)
+ .WillOnce([&](EncodedFrame* frame) {
+ // 0x0002 - 0xfff0
+ EXPECT_EQ(frame->Id() - first_picture_id, 18);
+ })
+ .WillOnce([&](EncodedFrame* frame) {
+ // 0xfffe - 0xfff0
+ EXPECT_EQ(frame->Id() - first_picture_id, 14);
+ });
+ InjectPacketWith(stream_structure, deltaframe2_descriptor);
+ InjectPacketWith(stream_structure, deltaframe1_descriptor);
+}
+
+TEST_F(RtpVideoStreamReceiver2DependencyDescriptorTest,
+ DropsLateDeltaFramePacketWithDependencyDescriptorExtension) {
+ FrameDependencyStructure stream_structure1 = CreateStreamStructure();
+ FrameDependencyStructure stream_structure2 = CreateStreamStructure();
+ // Make sure template ids for these two structures do not collide:
+ // adjust structure_id (that is also used as template id offset).
+ stream_structure1.structure_id = 13;
+ stream_structure2.structure_id =
+ stream_structure1.structure_id + stream_structure1.templates.size();
+
+ DependencyDescriptor keyframe1_descriptor;
+ keyframe1_descriptor.attached_structure =
+ std::make_unique<FrameDependencyStructure>(stream_structure1);
+ keyframe1_descriptor.frame_dependencies = stream_structure1.templates[0];
+ keyframe1_descriptor.frame_number = 1;
+ EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame);
+ InjectPacketWith(stream_structure1, keyframe1_descriptor);
+
+ // Pass in 2nd key frame with different structure.
+ DependencyDescriptor keyframe2_descriptor;
+ keyframe2_descriptor.attached_structure =
+ std::make_unique<FrameDependencyStructure>(stream_structure2);
+ keyframe2_descriptor.frame_dependencies = stream_structure2.templates[0];
+ keyframe2_descriptor.frame_number = 3;
+ EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame);
+ InjectPacketWith(stream_structure2, keyframe2_descriptor);
+
+ // Pass in late delta frame that uses structure of the 1st key frame.
+ DependencyDescriptor deltaframe_descriptor;
+ deltaframe_descriptor.frame_dependencies = stream_structure1.templates[0];
+ deltaframe_descriptor.frame_number = 2;
+ EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame).Times(0);
+ InjectPacketWith(stream_structure1, deltaframe_descriptor);
+}
+
+TEST_F(RtpVideoStreamReceiver2DependencyDescriptorTest,
+ DropsLateKeyFramePacketWithDependencyDescriptorExtension) {
+ FrameDependencyStructure stream_structure1 = CreateStreamStructure();
+ FrameDependencyStructure stream_structure2 = CreateStreamStructure();
+ // Make sure template ids for these two structures do not collide:
+ // adjust structure_id (that is also used as template id offset).
+ stream_structure1.structure_id = 13;
+ stream_structure2.structure_id =
+ stream_structure1.structure_id + stream_structure1.templates.size();
+
+ DependencyDescriptor keyframe1_descriptor;
+ keyframe1_descriptor.attached_structure =
+ std::make_unique<FrameDependencyStructure>(stream_structure1);
+ keyframe1_descriptor.frame_dependencies = stream_structure1.templates[0];
+ keyframe1_descriptor.frame_number = 1;
+
+ DependencyDescriptor keyframe2_descriptor;
+ keyframe2_descriptor.attached_structure =
+ std::make_unique<FrameDependencyStructure>(stream_structure2);
+ keyframe2_descriptor.frame_dependencies = stream_structure2.templates[0];
+ keyframe2_descriptor.frame_number = 3;
+
+ EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame)
+ .WillOnce(
+ [&](EncodedFrame* frame) { EXPECT_EQ(frame->Id() & 0xFFFF, 3); });
+ InjectPacketWith(stream_structure2, keyframe2_descriptor);
+ InjectPacketWith(stream_structure1, keyframe1_descriptor);
+
+ // Pass in delta frame that uses structure of the 2nd key frame. Late key
+ // frame shouldn't block it.
+ DependencyDescriptor deltaframe_descriptor;
+ deltaframe_descriptor.frame_dependencies = stream_structure2.templates[0];
+ deltaframe_descriptor.frame_number = 4;
+ EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame)
+ .WillOnce(
+ [&](EncodedFrame* frame) { EXPECT_EQ(frame->Id() & 0xFFFF, 4); });
+ InjectPacketWith(stream_structure2, deltaframe_descriptor);
+}
+
+TEST_F(RtpVideoStreamReceiver2DependencyDescriptorTest,
+ RequestKeyframeIfInitialKeyframePacketIsLost) {
+ FrameDependencyStructure stream_structure = CreateStreamStructure();
+
+ DependencyDescriptor keyframe_descriptor_without_structure;
+ keyframe_descriptor_without_structure.frame_dependencies =
+ stream_structure.templates[0];
+ keyframe_descriptor_without_structure.frame_number = 0;
+
+ InjectPacketWith(stream_structure, keyframe_descriptor_without_structure);
+
+ // Not enough time since last keyframe request
+ time_controller_.AdvanceTime(TimeDelta::Millis(500));
+ InjectPacketWith(stream_structure, keyframe_descriptor_without_structure);
+ EXPECT_THAT(rtcp_packet_parser_.pli()->num_packets(), Eq(1));
+
+ time_controller_.AdvanceTime(TimeDelta::Millis(501));
+ InjectPacketWith(stream_structure, keyframe_descriptor_without_structure);
+ EXPECT_THAT(rtcp_packet_parser_.pli()->num_packets(), Eq(2));
+}
+
+TEST_F(RtpVideoStreamReceiver2Test, TransformFrame) {
+ rtc::scoped_refptr<MockFrameTransformer> mock_frame_transformer =
+ rtc::make_ref_counted<testing::NiceMock<MockFrameTransformer>>();
+ EXPECT_CALL(*mock_frame_transformer,
+ RegisterTransformedFrameSinkCallback(_, config_.rtp.remote_ssrc));
+ auto receiver = std::make_unique<RtpVideoStreamReceiver2>(
+ TaskQueueBase::Current(), Clock::GetRealTimeClock(), &mock_transport_,
+ nullptr, nullptr, &config_, rtp_receive_statistics_.get(), nullptr,
+ nullptr, &nack_periodic_processor_, &mock_on_complete_frame_callback_,
+ nullptr, mock_frame_transformer, field_trials_, nullptr);
+ receiver->AddReceiveCodec(kPayloadType, kVideoCodecGeneric, {},
+ /*raw_payload=*/false);
+
+ RtpPacketReceived rtp_packet;
+ rtp_packet.SetPayloadType(kPayloadType);
+ rtc::CopyOnWriteBuffer data({'1', '2', '3', '4'});
+ rtp_packet.SetSequenceNumber(1);
+ RTPVideoHeader video_header =
+ GetGenericVideoHeader(VideoFrameType::kVideoFrameKey);
+ mock_on_complete_frame_callback_.AppendExpectedBitstream(data.data(),
+ data.size());
+ EXPECT_CALL(*mock_frame_transformer, Transform(_));
+ receiver->OnReceivedPayloadData(data, rtp_packet, video_header);
+
+ EXPECT_CALL(*mock_frame_transformer,
+ UnregisterTransformedFrameSinkCallback(config_.rtp.remote_ssrc));
+ receiver = nullptr;
+}
+
+// Test default behavior and when playout delay is overridden by field trial.
+const VideoPlayoutDelay kTransmittedPlayoutDelay = {100, 200};
+const VideoPlayoutDelay kForcedPlayoutDelay = {70, 90};
+struct PlayoutDelayOptions {
+ std::string field_trial;
+ VideoPlayoutDelay expected_delay;
+};
+const PlayoutDelayOptions kDefaultBehavior = {
+ /*field_trial=*/"", /*expected_delay=*/kTransmittedPlayoutDelay};
+const PlayoutDelayOptions kOverridePlayoutDelay = {
+ /*field_trial=*/"WebRTC-ForcePlayoutDelay/min_ms:70,max_ms:90/",
+ /*expected_delay=*/kForcedPlayoutDelay};
+
+class RtpVideoStreamReceiver2TestPlayoutDelay
+ : public RtpVideoStreamReceiver2Test,
+ public ::testing::WithParamInterface<PlayoutDelayOptions> {
+ protected:
+ RtpVideoStreamReceiver2TestPlayoutDelay()
+ : RtpVideoStreamReceiver2Test(GetParam().field_trial) {}
+};
+
+INSTANTIATE_TEST_SUITE_P(PlayoutDelay,
+ RtpVideoStreamReceiver2TestPlayoutDelay,
+ Values(kDefaultBehavior, kOverridePlayoutDelay));
+
+TEST_P(RtpVideoStreamReceiver2TestPlayoutDelay, PlayoutDelay) {
+ rtc::CopyOnWriteBuffer payload_data({'1', '2', '3', '4'});
+ RtpHeaderExtensionMap extension_map;
+ extension_map.Register<PlayoutDelayLimits>(1);
+ RtpPacketToSend packet_to_send(&extension_map);
+ packet_to_send.SetPayloadType(kPayloadType);
+ packet_to_send.SetSequenceNumber(1);
+
+ // Set playout delay on outgoing packet.
+ EXPECT_TRUE(packet_to_send.SetExtension<PlayoutDelayLimits>(
+ kTransmittedPlayoutDelay));
+ uint8_t* payload = packet_to_send.AllocatePayload(payload_data.size());
+ memcpy(payload, payload_data.data(), payload_data.size());
+
+ RtpPacketReceived received_packet(&extension_map);
+ received_packet.Parse(packet_to_send.data(), packet_to_send.size());
+
+ RTPVideoHeader video_header =
+ GetGenericVideoHeader(VideoFrameType::kVideoFrameKey);
+ mock_on_complete_frame_callback_.AppendExpectedBitstream(payload_data.data(),
+ payload_data.size());
+ // Expect the playout delay of encoded frame to be the same as the transmitted
+ // playout delay unless it was overridden by a field trial.
+ EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame(_))
+ .WillOnce(Invoke([expected_playout_delay =
+ GetParam().expected_delay](EncodedFrame* frame) {
+ EXPECT_EQ(frame->EncodedImage().playout_delay_, expected_playout_delay);
+ }));
+ rtp_video_stream_receiver_->OnReceivedPayloadData(
+ received_packet.PayloadBuffer(), received_packet, video_header);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/video/rtp_video_stream_receiver_frame_transformer_delegate.cc b/third_party/libwebrtc/video/rtp_video_stream_receiver_frame_transformer_delegate.cc
new file mode 100644
index 0000000000..b1907fa7a2
--- /dev/null
+++ b/third_party/libwebrtc/video/rtp_video_stream_receiver_frame_transformer_delegate.cc
@@ -0,0 +1,129 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video/rtp_video_stream_receiver_frame_transformer_delegate.h"
+
+#include <utility>
+#include <vector>
+
+#include "absl/memory/memory.h"
+#include "modules/rtp_rtcp/source/rtp_descriptor_authentication.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/thread.h"
+
+namespace webrtc {
+
+namespace {
+class TransformableVideoReceiverFrame
+ : public TransformableVideoFrameInterface {
+ public:
+ TransformableVideoReceiverFrame(std::unique_ptr<RtpFrameObject> frame,
+ uint32_t ssrc)
+ : frame_(std::move(frame)),
+ metadata_(frame_->GetRtpVideoHeader().GetAsMetadata()),
+ ssrc_(ssrc) {}
+ ~TransformableVideoReceiverFrame() override = default;
+
+ // Implements TransformableVideoFrameInterface.
+ rtc::ArrayView<const uint8_t> GetData() const override {
+ return *frame_->GetEncodedData();
+ }
+
+ void SetData(rtc::ArrayView<const uint8_t> data) override {
+ frame_->SetEncodedData(
+ EncodedImageBuffer::Create(data.data(), data.size()));
+ }
+
+ uint8_t GetPayloadType() const override { return frame_->PayloadType(); }
+ uint32_t GetSsrc() const override { return ssrc_; }
+ uint32_t GetTimestamp() const override { return frame_->Timestamp(); }
+
+ bool IsKeyFrame() const override {
+ return frame_->FrameType() == VideoFrameType::kVideoFrameKey;
+ }
+
+ std::vector<uint8_t> GetAdditionalData() const override {
+ return RtpDescriptorAuthentication(frame_->GetRtpVideoHeader());
+ }
+
+ const VideoFrameMetadata& GetMetadata() const override { return metadata_; }
+ void SetMetadata(const VideoFrameMetadata&) override {
+ RTC_DCHECK_NOTREACHED()
+ << "TransformableVideoReceiverFrame::SetMetadata is not implemented";
+ }
+
+ std::unique_ptr<RtpFrameObject> ExtractFrame() && {
+ return std::move(frame_);
+ }
+
+ Direction GetDirection() const override { return Direction::kReceiver; }
+
+ private:
+ std::unique_ptr<RtpFrameObject> frame_;
+ const VideoFrameMetadata metadata_;
+ const uint32_t ssrc_;
+};
+} // namespace
+
+RtpVideoStreamReceiverFrameTransformerDelegate::
+ RtpVideoStreamReceiverFrameTransformerDelegate(
+ RtpVideoFrameReceiver* receiver,
+ rtc::scoped_refptr<FrameTransformerInterface> frame_transformer,
+ rtc::Thread* network_thread,
+ uint32_t ssrc)
+ : receiver_(receiver),
+ frame_transformer_(std::move(frame_transformer)),
+ network_thread_(network_thread),
+ ssrc_(ssrc) {}
+
+void RtpVideoStreamReceiverFrameTransformerDelegate::Init() {
+ RTC_DCHECK_RUN_ON(&network_sequence_checker_);
+ frame_transformer_->RegisterTransformedFrameSinkCallback(
+ rtc::scoped_refptr<TransformedFrameCallback>(this), ssrc_);
+}
+
+void RtpVideoStreamReceiverFrameTransformerDelegate::Reset() {
+ RTC_DCHECK_RUN_ON(&network_sequence_checker_);
+ frame_transformer_->UnregisterTransformedFrameSinkCallback(ssrc_);
+ frame_transformer_ = nullptr;
+ receiver_ = nullptr;
+}
+
+void RtpVideoStreamReceiverFrameTransformerDelegate::TransformFrame(
+ std::unique_ptr<RtpFrameObject> frame) {
+ RTC_DCHECK_RUN_ON(&network_sequence_checker_);
+ frame_transformer_->Transform(
+ std::make_unique<TransformableVideoReceiverFrame>(std::move(frame),
+ ssrc_));
+}
+
+void RtpVideoStreamReceiverFrameTransformerDelegate::OnTransformedFrame(
+ std::unique_ptr<TransformableFrameInterface> frame) {
+ rtc::scoped_refptr<RtpVideoStreamReceiverFrameTransformerDelegate> delegate(
+ this);
+ network_thread_->PostTask(
+ [delegate = std::move(delegate), frame = std::move(frame)]() mutable {
+ delegate->ManageFrame(std::move(frame));
+ });
+}
+
+void RtpVideoStreamReceiverFrameTransformerDelegate::ManageFrame(
+ std::unique_ptr<TransformableFrameInterface> frame) {
+ RTC_DCHECK_RUN_ON(&network_sequence_checker_);
+ RTC_CHECK_EQ(frame->GetDirection(),
+ TransformableFrameInterface::Direction::kReceiver);
+ if (!receiver_)
+ return;
+ auto transformed_frame = absl::WrapUnique(
+ static_cast<TransformableVideoReceiverFrame*>(frame.release()));
+ receiver_->ManageFrame(std::move(*transformed_frame).ExtractFrame());
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/video/rtp_video_stream_receiver_frame_transformer_delegate.h b/third_party/libwebrtc/video/rtp_video_stream_receiver_frame_transformer_delegate.h
new file mode 100644
index 0000000000..e2472fadb5
--- /dev/null
+++ b/third_party/libwebrtc/video/rtp_video_stream_receiver_frame_transformer_delegate.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VIDEO_RTP_VIDEO_STREAM_RECEIVER_FRAME_TRANSFORMER_DELEGATE_H_
+#define VIDEO_RTP_VIDEO_STREAM_RECEIVER_FRAME_TRANSFORMER_DELEGATE_H_
+
+#include <memory>
+
+#include "api/frame_transformer_interface.h"
+#include "api/sequence_checker.h"
+#include "modules/video_coding/frame_object.h"
+#include "rtc_base/system/no_unique_address.h"
+#include "rtc_base/thread.h"
+
+namespace webrtc {
+
+// Called back by RtpVideoStreamReceiverFrameTransformerDelegate on the network
+// thread after transformation.
+class RtpVideoFrameReceiver {
+ public:
+ virtual void ManageFrame(std::unique_ptr<RtpFrameObject> frame) = 0;
+
+ protected:
+ virtual ~RtpVideoFrameReceiver() = default;
+};
+
+// Delegates calls to FrameTransformerInterface to transform frames, and to
+// RtpVideoStreamReceiver to manage transformed frames on the `network_thread_`.
+class RtpVideoStreamReceiverFrameTransformerDelegate
+ : public TransformedFrameCallback {
+ public:
+ RtpVideoStreamReceiverFrameTransformerDelegate(
+ RtpVideoFrameReceiver* receiver,
+ rtc::scoped_refptr<FrameTransformerInterface> frame_transformer,
+ rtc::Thread* network_thread,
+ uint32_t ssrc);
+
+ void Init();
+ void Reset();
+
+ // Delegates the call to FrameTransformerInterface::TransformFrame.
+ void TransformFrame(std::unique_ptr<RtpFrameObject> frame);
+
+ // Implements TransformedFrameCallback. Can be called on any thread. Posts
+ // the transformed frame to be managed on the `network_thread_`.
+ void OnTransformedFrame(
+ std::unique_ptr<TransformableFrameInterface> frame) override;
+
+ // Delegates the call to RtpVideoFrameReceiver::ManageFrame on the
+ // `network_thread_`.
+ void ManageFrame(std::unique_ptr<TransformableFrameInterface> frame);
+
+ protected:
+ ~RtpVideoStreamReceiverFrameTransformerDelegate() override = default;
+
+ private:
+ RTC_NO_UNIQUE_ADDRESS SequenceChecker network_sequence_checker_;
+ RtpVideoFrameReceiver* receiver_ RTC_GUARDED_BY(network_sequence_checker_);
+ rtc::scoped_refptr<FrameTransformerInterface> frame_transformer_
+ RTC_GUARDED_BY(network_sequence_checker_);
+ rtc::Thread* const network_thread_;
+ const uint32_t ssrc_;
+};
+
+} // namespace webrtc
+
+#endif // VIDEO_RTP_VIDEO_STREAM_RECEIVER_FRAME_TRANSFORMER_DELEGATE_H_
diff --git a/third_party/libwebrtc/video/rtp_video_stream_receiver_frame_transformer_delegate_unittest.cc b/third_party/libwebrtc/video/rtp_video_stream_receiver_frame_transformer_delegate_unittest.cc
new file mode 100644
index 0000000000..e757fa20ac
--- /dev/null
+++ b/third_party/libwebrtc/video/rtp_video_stream_receiver_frame_transformer_delegate_unittest.cc
@@ -0,0 +1,167 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video/rtp_video_stream_receiver_frame_transformer_delegate.h"
+
+#include <cstdio>
+#include <memory>
+#include <utility>
+#include <vector>
+
+#include "absl/memory/memory.h"
+#include "api/call/transport.h"
+#include "call/video_receive_stream.h"
+#include "modules/rtp_rtcp/source/rtp_descriptor_authentication.h"
+#include "rtc_base/event.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+#include "test/mock_frame_transformer.h"
+
+namespace webrtc {
+namespace {
+
+using ::testing::_;
+using ::testing::ElementsAre;
+using ::testing::NiceMock;
+using ::testing::SaveArg;
+
+std::unique_ptr<RtpFrameObject> CreateRtpFrameObject(
+ const RTPVideoHeader& video_header) {
+ return std::make_unique<RtpFrameObject>(
+ 0, 0, true, 0, 0, 0, 0, 0, VideoSendTiming(), 0, video_header.codec,
+ kVideoRotation_0, VideoContentType::UNSPECIFIED, video_header,
+ absl::nullopt, RtpPacketInfos(), EncodedImageBuffer::Create(0));
+}
+
+std::unique_ptr<RtpFrameObject> CreateRtpFrameObject() {
+ return CreateRtpFrameObject(RTPVideoHeader());
+}
+
+class TestRtpVideoFrameReceiver : public RtpVideoFrameReceiver {
+ public:
+ TestRtpVideoFrameReceiver() {}
+ ~TestRtpVideoFrameReceiver() override = default;
+
+ MOCK_METHOD(void,
+ ManageFrame,
+ (std::unique_ptr<RtpFrameObject> frame),
+ (override));
+};
+
+TEST(RtpVideoStreamReceiverFrameTransformerDelegateTest,
+ RegisterTransformedFrameCallbackSinkOnInit) {
+ TestRtpVideoFrameReceiver receiver;
+ auto frame_transformer(rtc::make_ref_counted<MockFrameTransformer>());
+ auto delegate(
+ rtc::make_ref_counted<RtpVideoStreamReceiverFrameTransformerDelegate>(
+ &receiver, frame_transformer, rtc::Thread::Current(),
+ /*remote_ssrc*/ 1111));
+ EXPECT_CALL(*frame_transformer,
+ RegisterTransformedFrameSinkCallback(testing::_, 1111));
+ delegate->Init();
+}
+
+TEST(RtpVideoStreamReceiverFrameTransformerDelegateTest,
+ UnregisterTransformedFrameSinkCallbackOnReset) {
+ TestRtpVideoFrameReceiver receiver;
+ auto frame_transformer(rtc::make_ref_counted<MockFrameTransformer>());
+ auto delegate(
+ rtc::make_ref_counted<RtpVideoStreamReceiverFrameTransformerDelegate>(
+ &receiver, frame_transformer, rtc::Thread::Current(),
+ /*remote_ssrc*/ 1111));
+ EXPECT_CALL(*frame_transformer, UnregisterTransformedFrameSinkCallback(1111));
+ delegate->Reset();
+}
+
+TEST(RtpVideoStreamReceiverFrameTransformerDelegateTest, TransformFrame) {
+ TestRtpVideoFrameReceiver receiver;
+ auto frame_transformer(
+ rtc::make_ref_counted<testing::NiceMock<MockFrameTransformer>>());
+ auto delegate(
+ rtc::make_ref_counted<RtpVideoStreamReceiverFrameTransformerDelegate>(
+ &receiver, frame_transformer, rtc::Thread::Current(),
+ /*remote_ssrc*/ 1111));
+ auto frame = CreateRtpFrameObject();
+ EXPECT_CALL(*frame_transformer, Transform);
+ delegate->TransformFrame(std::move(frame));
+}
+
+TEST(RtpVideoStreamReceiverFrameTransformerDelegateTest,
+ ManageFrameOnTransformedFrame) {
+ rtc::AutoThread main_thread_;
+ TestRtpVideoFrameReceiver receiver;
+ auto mock_frame_transformer(
+ rtc::make_ref_counted<NiceMock<MockFrameTransformer>>());
+ auto delegate =
+ rtc::make_ref_counted<RtpVideoStreamReceiverFrameTransformerDelegate>(
+ &receiver, mock_frame_transformer, rtc::Thread::Current(),
+ /*remote_ssrc*/ 1111);
+
+ rtc::scoped_refptr<TransformedFrameCallback> callback;
+ EXPECT_CALL(*mock_frame_transformer, RegisterTransformedFrameSinkCallback)
+ .WillOnce(SaveArg<0>(&callback));
+ delegate->Init();
+ ASSERT_TRUE(callback);
+
+ EXPECT_CALL(receiver, ManageFrame);
+ ON_CALL(*mock_frame_transformer, Transform)
+ .WillByDefault(
+ [&callback](std::unique_ptr<TransformableFrameInterface> frame) {
+ callback->OnTransformedFrame(std::move(frame));
+ });
+ delegate->TransformFrame(CreateRtpFrameObject());
+ rtc::ThreadManager::ProcessAllMessageQueuesForTesting();
+}
+
+TEST(RtpVideoStreamReceiverFrameTransformerDelegateTest,
+ TransformableFrameMetadataHasCorrectValue) {
+ TestRtpVideoFrameReceiver receiver;
+ auto mock_frame_transformer =
+ rtc::make_ref_counted<NiceMock<MockFrameTransformer>>();
+ auto delegate =
+ rtc::make_ref_counted<RtpVideoStreamReceiverFrameTransformerDelegate>(
+ &receiver, mock_frame_transformer, rtc::Thread::Current(), 1111);
+ delegate->Init();
+ RTPVideoHeader video_header;
+ video_header.width = 1280u;
+ video_header.height = 720u;
+ RTPVideoHeader::GenericDescriptorInfo& generic =
+ video_header.generic.emplace();
+ generic.frame_id = 10;
+ generic.temporal_index = 3;
+ generic.spatial_index = 2;
+ generic.decode_target_indications = {DecodeTargetIndication::kSwitch};
+ generic.dependencies = {5};
+
+ // Check that the transformable frame passed to the frame transformer has the
+ // correct metadata.
+ EXPECT_CALL(*mock_frame_transformer, Transform)
+ .WillOnce(
+ [](std::unique_ptr<TransformableFrameInterface> transformable_frame) {
+ auto frame =
+ absl::WrapUnique(static_cast<TransformableVideoFrameInterface*>(
+ transformable_frame.release()));
+ ASSERT_TRUE(frame);
+ auto metadata = frame->GetMetadata();
+ EXPECT_EQ(metadata.GetWidth(), 1280u);
+ EXPECT_EQ(metadata.GetHeight(), 720u);
+ EXPECT_EQ(metadata.GetFrameId(), 10);
+ EXPECT_EQ(metadata.GetTemporalIndex(), 3);
+ EXPECT_EQ(metadata.GetSpatialIndex(), 2);
+ EXPECT_THAT(metadata.GetFrameDependencies(), ElementsAre(5));
+ EXPECT_THAT(metadata.GetDecodeTargetIndications(),
+ ElementsAre(DecodeTargetIndication::kSwitch));
+ });
+ // The delegate creates a transformable frame from the RtpFrameObject.
+ delegate->TransformFrame(CreateRtpFrameObject(video_header));
+}
+
+} // namespace
+} // namespace webrtc
diff --git a/third_party/libwebrtc/video/screenshare_loopback.cc b/third_party/libwebrtc/video/screenshare_loopback.cc
new file mode 100644
index 0000000000..239e472f6e
--- /dev/null
+++ b/third_party/libwebrtc/video/screenshare_loopback.cc
@@ -0,0 +1,387 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <stdio.h>
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "absl/flags/flag.h"
+#include "absl/flags/parse.h"
+#include "absl/types/optional.h"
+#include "api/test/simulated_network.h"
+#include "api/test/video_quality_test_fixture.h"
+#include "api/transport/bitrate_settings.h"
+#include "api/video_codecs/video_codec.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/string_encode.h"
+#include "system_wrappers/include/field_trial.h"
+#include "test/field_trial.h"
+#include "test/gtest.h"
+#include "test/run_test.h"
+#include "video/video_quality_test.h"
+
+using ::webrtc::BitrateConstraints;
+using ::webrtc::BuiltInNetworkBehaviorConfig;
+using ::webrtc::InterLayerPredMode;
+using ::webrtc::SdpVideoFormat;
+using ::webrtc::VideoQualityTest;
+
+// Flags common with video loopback, with different default values.
+ABSL_FLAG(int, width, 1850, "Video width (crops source).");
+size_t Width() {
+ return static_cast<size_t>(absl::GetFlag(FLAGS_width));
+}
+
+ABSL_FLAG(int, height, 1110, "Video height (crops source).");
+size_t Height() {
+ return static_cast<size_t>(absl::GetFlag(FLAGS_height));
+}
+
+ABSL_FLAG(int, fps, 5, "Frames per second.");
+int Fps() {
+ return absl::GetFlag(FLAGS_fps);
+}
+
+ABSL_FLAG(int, min_bitrate, 50, "Call and stream min bitrate in kbps.");
+int MinBitrateKbps() {
+ return absl::GetFlag(FLAGS_min_bitrate);
+}
+
+ABSL_FLAG(int, start_bitrate, 300, "Call start bitrate in kbps.");
+int StartBitrateKbps() {
+ return absl::GetFlag(FLAGS_start_bitrate);
+}
+
+ABSL_FLAG(int, target_bitrate, 200, "Stream target bitrate in kbps.");
+int TargetBitrateKbps() {
+ return absl::GetFlag(FLAGS_target_bitrate);
+}
+
+ABSL_FLAG(int, max_bitrate, 1000, "Call and stream max bitrate in kbps.");
+int MaxBitrateKbps() {
+ return absl::GetFlag(FLAGS_max_bitrate);
+}
+
+ABSL_FLAG(int, num_temporal_layers, 2, "Number of temporal layers to use.");
+int NumTemporalLayers() {
+ return absl::GetFlag(FLAGS_num_temporal_layers);
+}
+
+// Flags common with video loopback, with equal default values.
+ABSL_FLAG(std::string, codec, "VP8", "Video codec to use.");
+std::string Codec() {
+ return absl::GetFlag(FLAGS_codec);
+}
+
+ABSL_FLAG(std::string,
+ rtc_event_log_name,
+ "",
+ "Filename for rtc event log. Two files "
+ "with \"_send\" and \"_recv\" suffixes will be created.");
+std::string RtcEventLogName() {
+ return absl::GetFlag(FLAGS_rtc_event_log_name);
+}
+
+ABSL_FLAG(std::string,
+ rtp_dump_name,
+ "",
+ "Filename for dumped received RTP stream.");
+std::string RtpDumpName() {
+ return absl::GetFlag(FLAGS_rtp_dump_name);
+}
+
+ABSL_FLAG(int,
+ selected_tl,
+ -1,
+ "Temporal layer to show or analyze. -1 to disable filtering.");
+int SelectedTL() {
+ return absl::GetFlag(FLAGS_selected_tl);
+}
+
+ABSL_FLAG(
+ int,
+ duration,
+ 0,
+ "Duration of the test in seconds. If 0, rendered will be shown instead.");
+int DurationSecs() {
+ return absl::GetFlag(FLAGS_duration);
+}
+
+ABSL_FLAG(std::string, output_filename, "", "Target graph data filename.");
+std::string OutputFilename() {
+ return absl::GetFlag(FLAGS_output_filename);
+}
+
+ABSL_FLAG(std::string,
+ graph_title,
+ "",
+ "If empty, title will be generated automatically.");
+std::string GraphTitle() {
+ return absl::GetFlag(FLAGS_graph_title);
+}
+
+ABSL_FLAG(int, loss_percent, 0, "Percentage of packets randomly lost.");
+int LossPercent() {
+ return absl::GetFlag(FLAGS_loss_percent);
+}
+
+ABSL_FLAG(int,
+ link_capacity,
+ 0,
+ "Capacity (kbps) of the fake link. 0 means infinite.");
+int LinkCapacityKbps() {
+ return absl::GetFlag(FLAGS_link_capacity);
+}
+
+ABSL_FLAG(int, queue_size, 0, "Size of the bottleneck link queue in packets.");
+int QueueSize() {
+ return absl::GetFlag(FLAGS_queue_size);
+}
+
+ABSL_FLAG(int,
+ avg_propagation_delay_ms,
+ 0,
+ "Average link propagation delay in ms.");
+int AvgPropagationDelayMs() {
+ return absl::GetFlag(FLAGS_avg_propagation_delay_ms);
+}
+
+ABSL_FLAG(int,
+ std_propagation_delay_ms,
+ 0,
+ "Link propagation delay standard deviation in ms.");
+int StdPropagationDelayMs() {
+ return absl::GetFlag(FLAGS_std_propagation_delay_ms);
+}
+
+ABSL_FLAG(int, num_streams, 0, "Number of streams to show or analyze.");
+int NumStreams() {
+ return absl::GetFlag(FLAGS_num_streams);
+}
+
+ABSL_FLAG(int,
+ selected_stream,
+ 0,
+ "ID of the stream to show or analyze. "
+ "Set to the number of streams to show them all.");
+int SelectedStream() {
+ return absl::GetFlag(FLAGS_selected_stream);
+}
+
+ABSL_FLAG(int, num_spatial_layers, 1, "Number of spatial layers to use.");
+int NumSpatialLayers() {
+ return absl::GetFlag(FLAGS_num_spatial_layers);
+}
+
+ABSL_FLAG(int,
+ inter_layer_pred,
+ 0,
+ "Inter-layer prediction mode. "
+ "0 - enabled, 1 - disabled, 2 - enabled only for key pictures.");
+InterLayerPredMode InterLayerPred() {
+ if (absl::GetFlag(FLAGS_inter_layer_pred) == 0) {
+ return webrtc::InterLayerPredMode::kOn;
+ } else if (absl::GetFlag(FLAGS_inter_layer_pred) == 1) {
+ return webrtc::InterLayerPredMode::kOff;
+ } else {
+ RTC_DCHECK_EQ(absl::GetFlag(FLAGS_inter_layer_pred), 2);
+ return webrtc::InterLayerPredMode::kOnKeyPic;
+ }
+}
+
+ABSL_FLAG(int,
+ selected_sl,
+ -1,
+ "Spatial layer to show or analyze. -1 to disable filtering.");
+int SelectedSL() {
+ return absl::GetFlag(FLAGS_selected_sl);
+}
+
+ABSL_FLAG(std::string,
+ stream0,
+ "",
+ "Comma separated values describing VideoStream for stream #0.");
+std::string Stream0() {
+ return absl::GetFlag(FLAGS_stream0);
+}
+
+ABSL_FLAG(std::string,
+ stream1,
+ "",
+ "Comma separated values describing VideoStream for stream #1.");
+std::string Stream1() {
+ return absl::GetFlag(FLAGS_stream1);
+}
+
+ABSL_FLAG(std::string,
+ sl0,
+ "",
+ "Comma separated values describing SpatialLayer for layer #0.");
+std::string SL0() {
+ return absl::GetFlag(FLAGS_sl0);
+}
+
+ABSL_FLAG(std::string,
+ sl1,
+ "",
+ "Comma separated values describing SpatialLayer for layer #1.");
+std::string SL1() {
+ return absl::GetFlag(FLAGS_sl1);
+}
+
+ABSL_FLAG(std::string,
+ encoded_frame_path,
+ "",
+ "The base path for encoded frame logs. Created files will have "
+ "the form <encoded_frame_path>.<n>.(recv|send.<m>).ivf");
+std::string EncodedFramePath() {
+ return absl::GetFlag(FLAGS_encoded_frame_path);
+}
+
+ABSL_FLAG(bool, logs, false, "print logs to stderr");
+
+ABSL_FLAG(bool, send_side_bwe, true, "Use send-side bandwidth estimation");
+
+ABSL_FLAG(bool, generic_descriptor, false, "Use the generic frame descriptor.");
+
+ABSL_FLAG(bool, allow_reordering, false, "Allow packet reordering to occur");
+
+ABSL_FLAG(
+ std::string,
+ force_fieldtrials,
+ "",
+ "Field trials control experimental feature code which can be forced. "
+ "E.g. running with --force_fieldtrials=WebRTC-FooFeature/Enable/"
+ " will assign the group Enable to field trial WebRTC-FooFeature. Multiple "
+ "trials are separated by \"/\"");
+
+// Screenshare-specific flags.
+ABSL_FLAG(int,
+ min_transmit_bitrate,
+ 400,
+ "Min transmit bitrate incl. padding.");
+int MinTransmitBitrateKbps() {
+ return absl::GetFlag(FLAGS_min_transmit_bitrate);
+}
+
+ABSL_FLAG(bool,
+ generate_slides,
+ false,
+ "Whether to use randomly generated slides or read them from files.");
+bool GenerateSlides() {
+ return absl::GetFlag(FLAGS_generate_slides);
+}
+
+ABSL_FLAG(int,
+ slide_change_interval,
+ 10,
+ "Interval (in seconds) between simulated slide changes.");
+int SlideChangeInterval() {
+ return absl::GetFlag(FLAGS_slide_change_interval);
+}
+
+ABSL_FLAG(
+ int,
+ scroll_duration,
+ 0,
+ "Duration (in seconds) during which a slide will be scrolled into place.");
+int ScrollDuration() {
+ return absl::GetFlag(FLAGS_scroll_duration);
+}
+
+ABSL_FLAG(std::string,
+ slides,
+ "",
+ "Comma-separated list of *.yuv files to display as slides.");
+std::vector<std::string> Slides() {
+ std::vector<std::string> slides;
+ std::string slides_list = absl::GetFlag(FLAGS_slides);
+ rtc::tokenize(slides_list, ',', &slides);
+ return slides;
+}
+
+void Loopback() {
+ BuiltInNetworkBehaviorConfig pipe_config;
+ pipe_config.loss_percent = LossPercent();
+ pipe_config.link_capacity_kbps = LinkCapacityKbps();
+ pipe_config.queue_length_packets = QueueSize();
+ pipe_config.queue_delay_ms = AvgPropagationDelayMs();
+ pipe_config.delay_standard_deviation_ms = StdPropagationDelayMs();
+ pipe_config.allow_reordering = absl::GetFlag(FLAGS_allow_reordering);
+
+ BitrateConstraints call_bitrate_config;
+ call_bitrate_config.min_bitrate_bps = MinBitrateKbps() * 1000;
+ call_bitrate_config.start_bitrate_bps = StartBitrateKbps() * 1000;
+ call_bitrate_config.max_bitrate_bps = -1; // Don't cap bandwidth estimate.
+
+ VideoQualityTest::Params params;
+ params.call.send_side_bwe = absl::GetFlag(FLAGS_send_side_bwe);
+ params.call.generic_descriptor = absl::GetFlag(FLAGS_generic_descriptor);
+ params.call.call_bitrate_config = call_bitrate_config;
+ params.video[0].enabled = true;
+ params.video[0].width = Width();
+ params.video[0].height = Height();
+ params.video[0].fps = Fps();
+ params.video[0].min_bitrate_bps = MinBitrateKbps() * 1000;
+ params.video[0].target_bitrate_bps = TargetBitrateKbps() * 1000;
+ params.video[0].max_bitrate_bps = MaxBitrateKbps() * 1000;
+ params.video[0].codec = Codec();
+ params.video[0].num_temporal_layers = NumTemporalLayers();
+ params.video[0].selected_tl = SelectedTL();
+ params.video[0].min_transmit_bps = MinTransmitBitrateKbps() * 1000;
+ params.screenshare[0].enabled = true;
+ params.screenshare[0].generate_slides = GenerateSlides();
+ params.screenshare[0].slide_change_interval = SlideChangeInterval();
+ params.screenshare[0].scroll_duration = ScrollDuration();
+ params.screenshare[0].slides = Slides();
+ params.config = pipe_config;
+ params.logging.rtc_event_log_name = RtcEventLogName();
+ params.logging.rtp_dump_name = RtpDumpName();
+ params.logging.encoded_frame_base_path = EncodedFramePath();
+
+ if (NumStreams() > 1 && Stream0().empty() && Stream1().empty()) {
+ params.ss[0].infer_streams = true;
+ }
+
+ std::vector<std::string> stream_descriptors;
+ stream_descriptors.push_back(Stream0());
+ stream_descriptors.push_back(Stream1());
+ std::vector<std::string> SL_descriptors;
+ SL_descriptors.push_back(SL0());
+ SL_descriptors.push_back(SL1());
+ VideoQualityTest::FillScalabilitySettings(
+ &params, 0, stream_descriptors, NumStreams(), SelectedStream(),
+ NumSpatialLayers(), SelectedSL(), InterLayerPred(), SL_descriptors);
+
+ auto fixture = std::make_unique<VideoQualityTest>(nullptr);
+ if (DurationSecs()) {
+ fixture->RunWithAnalyzer(params);
+ } else {
+ fixture->RunWithRenderers(params);
+ }
+}
+
+int main(int argc, char* argv[]) {
+ ::testing::InitGoogleTest(&argc, argv);
+ absl::ParseCommandLine(argc, argv);
+
+ rtc::LogMessage::SetLogToStderr(absl::GetFlag(FLAGS_logs));
+
+ // InitFieldTrialsFromString stores the char*, so the char array must outlive
+ // the application.
+ const std::string field_trials = absl::GetFlag(FLAGS_force_fieldtrials);
+ webrtc::field_trial::InitFieldTrialsFromString(field_trials.c_str());
+
+ webrtc::test::RunTest(Loopback);
+ return 0;
+}
diff --git a/third_party/libwebrtc/video/send_delay_stats.cc b/third_party/libwebrtc/video/send_delay_stats.cc
new file mode 100644
index 0000000000..56c4164424
--- /dev/null
+++ b/third_party/libwebrtc/video/send_delay_stats.cc
@@ -0,0 +1,120 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video/send_delay_stats.h"
+
+#include <utility>
+
+#include "rtc_base/logging.h"
+#include "system_wrappers/include/metrics.h"
+
+namespace webrtc {
+namespace {
+// Packet with a larger delay are removed and excluded from the delay stats.
+// Set to larger than max histogram delay which is 10000.
+const int64_t kMaxSentPacketDelayMs = 11000;
+const size_t kMaxPacketMapSize = 2000;
+
+// Limit for the maximum number of streams to calculate stats for.
+const size_t kMaxSsrcMapSize = 50;
+const int kMinRequiredPeriodicSamples = 5;
+} // namespace
+
+SendDelayStats::SendDelayStats(Clock* clock)
+ : clock_(clock), num_old_packets_(0), num_skipped_packets_(0) {}
+
+SendDelayStats::~SendDelayStats() {
+ if (num_old_packets_ > 0 || num_skipped_packets_ > 0) {
+ RTC_LOG(LS_WARNING) << "Delay stats: number of old packets "
+ << num_old_packets_ << ", skipped packets "
+ << num_skipped_packets_ << ". Number of streams "
+ << send_delay_counters_.size();
+ }
+ UpdateHistograms();
+}
+
+void SendDelayStats::UpdateHistograms() {
+ MutexLock lock(&mutex_);
+ for (const auto& it : send_delay_counters_) {
+ AggregatedStats stats = it.second->GetStats();
+ if (stats.num_samples >= kMinRequiredPeriodicSamples) {
+ RTC_HISTOGRAM_COUNTS_10000("WebRTC.Video.SendDelayInMs", stats.average);
+ RTC_LOG(LS_INFO) << "WebRTC.Video.SendDelayInMs, " << stats.ToString();
+ }
+ }
+}
+
+void SendDelayStats::AddSsrcs(const VideoSendStream::Config& config) {
+ MutexLock lock(&mutex_);
+ if (ssrcs_.size() > kMaxSsrcMapSize)
+ return;
+ for (const auto& ssrc : config.rtp.ssrcs)
+ ssrcs_.insert(ssrc);
+}
+
+AvgCounter* SendDelayStats::GetSendDelayCounter(uint32_t ssrc) {
+ const auto& it = send_delay_counters_.find(ssrc);
+ if (it != send_delay_counters_.end())
+ return it->second.get();
+
+ AvgCounter* counter = new AvgCounter(clock_, nullptr, false);
+ send_delay_counters_[ssrc].reset(counter);
+ return counter;
+}
+
+void SendDelayStats::OnSendPacket(uint16_t packet_id,
+ int64_t capture_time_ms,
+ uint32_t ssrc) {
+ // Packet sent to transport.
+ MutexLock lock(&mutex_);
+ if (ssrcs_.find(ssrc) == ssrcs_.end())
+ return;
+
+ int64_t now = clock_->TimeInMilliseconds();
+ RemoveOld(now, &packets_);
+
+ if (packets_.size() > kMaxPacketMapSize) {
+ ++num_skipped_packets_;
+ return;
+ }
+ packets_.insert(
+ std::make_pair(packet_id, Packet(ssrc, capture_time_ms, now)));
+}
+
+bool SendDelayStats::OnSentPacket(int packet_id, int64_t time_ms) {
+ // Packet leaving socket.
+ if (packet_id == -1)
+ return false;
+
+ MutexLock lock(&mutex_);
+ auto it = packets_.find(packet_id);
+ if (it == packets_.end())
+ return false;
+
+ // TODO(asapersson): Remove SendSideDelayUpdated(), use capture -> sent.
+ // Elapsed time from send (to transport) -> sent (leaving socket).
+ int diff_ms = time_ms - it->second.send_time_ms;
+ GetSendDelayCounter(it->second.ssrc)->Add(diff_ms);
+ packets_.erase(it);
+ return true;
+}
+
+void SendDelayStats::RemoveOld(int64_t now, PacketMap* packets) {
+ while (!packets->empty()) {
+ auto it = packets->begin();
+ if (now - it->second.capture_time_ms < kMaxSentPacketDelayMs)
+ break;
+
+ packets->erase(it);
+ ++num_old_packets_;
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/video/send_delay_stats.h b/third_party/libwebrtc/video/send_delay_stats.h
new file mode 100644
index 0000000000..fa76a1e39c
--- /dev/null
+++ b/third_party/libwebrtc/video/send_delay_stats.h
@@ -0,0 +1,94 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VIDEO_SEND_DELAY_STATS_H_
+#define VIDEO_SEND_DELAY_STATS_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <map>
+#include <memory>
+#include <set>
+
+#include "call/video_send_stream.h"
+#include "modules/include/module_common_types_public.h"
+#include "rtc_base/synchronization/mutex.h"
+#include "rtc_base/thread_annotations.h"
+#include "system_wrappers/include/clock.h"
+#include "video/stats_counter.h"
+
+namespace webrtc {
+
+// Used to collect delay stats for video streams. The class gets callbacks
+// from more than one threads and internally uses a mutex for data access
+// synchronization.
+// TODO(bugs.webrtc.org/11993): OnSendPacket and OnSentPacket will eventually
+// be called consistently on the same thread. Once we're there, we should be
+// able to avoid locking (at least for the fast path).
+class SendDelayStats : public SendPacketObserver {
+ public:
+ explicit SendDelayStats(Clock* clock);
+ ~SendDelayStats() override;
+
+ // Adds the configured ssrcs for the rtp streams.
+ // Stats will be calculated for these streams.
+ void AddSsrcs(const VideoSendStream::Config& config);
+
+ // Called when a packet is sent (leaving socket).
+ bool OnSentPacket(int packet_id, int64_t time_ms);
+
+ protected:
+ // From SendPacketObserver.
+ // Called when a packet is sent to the transport.
+ void OnSendPacket(uint16_t packet_id,
+ int64_t capture_time_ms,
+ uint32_t ssrc) override;
+
+ private:
+ // Map holding sent packets (mapped by sequence number).
+ struct SequenceNumberOlderThan {
+ bool operator()(uint16_t seq1, uint16_t seq2) const {
+ return IsNewerSequenceNumber(seq2, seq1);
+ }
+ };
+ struct Packet {
+ Packet(uint32_t ssrc, int64_t capture_time_ms, int64_t send_time_ms)
+ : ssrc(ssrc),
+ capture_time_ms(capture_time_ms),
+ send_time_ms(send_time_ms) {}
+ uint32_t ssrc;
+ int64_t capture_time_ms;
+ int64_t send_time_ms;
+ };
+ typedef std::map<uint16_t, Packet, SequenceNumberOlderThan> PacketMap;
+
+ void UpdateHistograms();
+ void RemoveOld(int64_t now, PacketMap* packets)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+ AvgCounter* GetSendDelayCounter(uint32_t ssrc)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+
+ Clock* const clock_;
+ Mutex mutex_;
+
+ PacketMap packets_ RTC_GUARDED_BY(mutex_);
+ size_t num_old_packets_ RTC_GUARDED_BY(mutex_);
+ size_t num_skipped_packets_ RTC_GUARDED_BY(mutex_);
+
+ std::set<uint32_t> ssrcs_ RTC_GUARDED_BY(mutex_);
+
+ // Mapped by SSRC.
+ std::map<uint32_t, std::unique_ptr<AvgCounter>> send_delay_counters_
+ RTC_GUARDED_BY(mutex_);
+};
+
+} // namespace webrtc
+#endif // VIDEO_SEND_DELAY_STATS_H_
diff --git a/third_party/libwebrtc/video/send_delay_stats_unittest.cc b/third_party/libwebrtc/video/send_delay_stats_unittest.cc
new file mode 100644
index 0000000000..e7481f929e
--- /dev/null
+++ b/third_party/libwebrtc/video/send_delay_stats_unittest.cc
@@ -0,0 +1,133 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video/send_delay_stats.h"
+
+#include <cstdint>
+#include <vector>
+
+#include "call/rtp_config.h"
+#include "system_wrappers/include/metrics.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+const uint32_t kSsrc1 = 17;
+const uint32_t kSsrc2 = 42;
+const uint32_t kRtxSsrc1 = 18;
+const uint32_t kRtxSsrc2 = 43;
+const uint16_t kPacketId = 2345;
+const int64_t kMaxPacketDelayMs = 11000;
+const int kMinRequiredPeriodicSamples = 5;
+const int kProcessIntervalMs = 2000;
+} // namespace
+
+class SendDelayStatsTest : public ::testing::Test {
+ public:
+ SendDelayStatsTest() : clock_(1234), config_(CreateConfig()) {}
+ virtual ~SendDelayStatsTest() {}
+
+ protected:
+ virtual void SetUp() {
+ stats_.reset(new SendDelayStats(&clock_));
+ stats_->AddSsrcs(config_);
+ }
+
+ VideoSendStream::Config CreateConfig() {
+ VideoSendStream::Config config(nullptr);
+ config.rtp.ssrcs.push_back(kSsrc1);
+ config.rtp.ssrcs.push_back(kSsrc2);
+ config.rtp.rtx.ssrcs.push_back(kRtxSsrc1);
+ config.rtp.rtx.ssrcs.push_back(kRtxSsrc2);
+ return config;
+ }
+
+ void OnSendPacket(uint16_t id, uint32_t ssrc) {
+ OnSendPacket(id, ssrc, clock_.TimeInMilliseconds());
+ }
+
+ void OnSendPacket(uint16_t id, uint32_t ssrc, int64_t capture_ms) {
+ SendPacketObserver* observer = stats_.get();
+ observer->OnSendPacket(id, capture_ms, ssrc);
+ }
+
+ bool OnSentPacket(uint16_t id) {
+ return stats_->OnSentPacket(id, clock_.TimeInMilliseconds());
+ }
+
+ SimulatedClock clock_;
+ VideoSendStream::Config config_;
+ std::unique_ptr<SendDelayStats> stats_;
+};
+
+TEST_F(SendDelayStatsTest, SentPacketFound) {
+ EXPECT_FALSE(OnSentPacket(kPacketId));
+ OnSendPacket(kPacketId, kSsrc1);
+ EXPECT_TRUE(OnSentPacket(kPacketId)); // Packet found.
+ EXPECT_FALSE(OnSentPacket(kPacketId)); // Packet removed when found.
+}
+
+TEST_F(SendDelayStatsTest, SentPacketNotFoundForNonRegisteredSsrc) {
+ OnSendPacket(kPacketId, kSsrc1);
+ EXPECT_TRUE(OnSentPacket(kPacketId));
+ OnSendPacket(kPacketId + 1, kSsrc2);
+ EXPECT_TRUE(OnSentPacket(kPacketId + 1));
+ OnSendPacket(kPacketId + 2, kRtxSsrc1); // RTX SSRC not registered.
+ EXPECT_FALSE(OnSentPacket(kPacketId + 2));
+}
+
+TEST_F(SendDelayStatsTest, SentPacketFoundWithMaxSendDelay) {
+ OnSendPacket(kPacketId, kSsrc1);
+ clock_.AdvanceTimeMilliseconds(kMaxPacketDelayMs - 1);
+ OnSendPacket(kPacketId + 1, kSsrc1); // kPacketId -> not old/removed.
+ EXPECT_TRUE(OnSentPacket(kPacketId)); // Packet found.
+ EXPECT_TRUE(OnSentPacket(kPacketId + 1)); // Packet found.
+}
+
+TEST_F(SendDelayStatsTest, OldPacketsRemoved) {
+ const int64_t kCaptureTimeMs = clock_.TimeInMilliseconds();
+ OnSendPacket(0xffffu, kSsrc1, kCaptureTimeMs);
+ OnSendPacket(0u, kSsrc1, kCaptureTimeMs);
+ OnSendPacket(1u, kSsrc1, kCaptureTimeMs + 1);
+ clock_.AdvanceTimeMilliseconds(kMaxPacketDelayMs); // 0xffff, 0 -> old.
+ OnSendPacket(2u, kSsrc1, kCaptureTimeMs + 2);
+
+ EXPECT_FALSE(OnSentPacket(0xffffu)); // Old removed.
+ EXPECT_FALSE(OnSentPacket(0u)); // Old removed.
+ EXPECT_TRUE(OnSentPacket(1u));
+ EXPECT_TRUE(OnSentPacket(2u));
+}
+
+TEST_F(SendDelayStatsTest, HistogramsAreUpdated) {
+ metrics::Reset();
+ const int64_t kDelayMs1 = 5;
+ const int64_t kDelayMs2 = 15;
+ const int kNumSamples = kMinRequiredPeriodicSamples * kProcessIntervalMs /
+ (kDelayMs1 + kDelayMs2) +
+ 1;
+
+ uint16_t id = 0;
+ for (int i = 0; i < kNumSamples; ++i) {
+ OnSendPacket(++id, kSsrc1);
+ clock_.AdvanceTimeMilliseconds(kDelayMs1);
+ EXPECT_TRUE(OnSentPacket(id));
+ OnSendPacket(++id, kSsrc2);
+ clock_.AdvanceTimeMilliseconds(kDelayMs2);
+ EXPECT_TRUE(OnSentPacket(id));
+ }
+ stats_.reset();
+ EXPECT_METRIC_EQ(2, metrics::NumSamples("WebRTC.Video.SendDelayInMs"));
+ EXPECT_METRIC_EQ(1,
+ metrics::NumEvents("WebRTC.Video.SendDelayInMs", kDelayMs1));
+ EXPECT_METRIC_EQ(1,
+ metrics::NumEvents("WebRTC.Video.SendDelayInMs", kDelayMs2));
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/video/send_statistics_proxy.cc b/third_party/libwebrtc/video/send_statistics_proxy.cc
new file mode 100644
index 0000000000..b6c2d60a73
--- /dev/null
+++ b/third_party/libwebrtc/video/send_statistics_proxy.cc
@@ -0,0 +1,1512 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video/send_statistics_proxy.h"
+
+#include <algorithm>
+#include <array>
+#include <cmath>
+#include <limits>
+#include <utility>
+
+#include "absl/strings/match.h"
+#include "api/video/video_codec_constants.h"
+#include "api/video/video_codec_type.h"
+#include "api/video_codecs/video_codec.h"
+#include "modules/video_coding/include/video_codec_interface.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/numerics/mod_ops.h"
+#include "rtc_base/strings/string_builder.h"
+#include "system_wrappers/include/metrics.h"
+
+namespace webrtc {
+namespace {
+const float kEncodeTimeWeigthFactor = 0.5f;
+const size_t kMaxEncodedFrameMapSize = 150;
+const int64_t kMaxEncodedFrameWindowMs = 800;
+const uint32_t kMaxEncodedFrameTimestampDiff = 900000; // 10 sec.
+const int64_t kBucketSizeMs = 100;
+const size_t kBucketCount = 10;
+
+const char kVp8ForcedFallbackEncoderFieldTrial[] =
+ "WebRTC-VP8-Forced-Fallback-Encoder-v2";
+const char kVp8SwCodecName[] = "libvpx";
+
+// Used by histograms. Values of entries should not be changed.
+enum HistogramCodecType {
+ kVideoUnknown = 0,
+ kVideoVp8 = 1,
+ kVideoVp9 = 2,
+ kVideoH264 = 3,
+ kVideoAv1 = 4,
+ kVideoMax = 64,
+};
+
+const char* kRealtimePrefix = "WebRTC.Video.";
+const char* kScreenPrefix = "WebRTC.Video.Screenshare.";
+
+const char* GetUmaPrefix(VideoEncoderConfig::ContentType content_type) {
+ switch (content_type) {
+ case VideoEncoderConfig::ContentType::kRealtimeVideo:
+ return kRealtimePrefix;
+ case VideoEncoderConfig::ContentType::kScreen:
+ return kScreenPrefix;
+ }
+ RTC_DCHECK_NOTREACHED();
+ return nullptr;
+}
+
+HistogramCodecType PayloadNameToHistogramCodecType(
+ const std::string& payload_name) {
+ VideoCodecType codecType = PayloadStringToCodecType(payload_name);
+ switch (codecType) {
+ case kVideoCodecVP8:
+ return kVideoVp8;
+ case kVideoCodecVP9:
+ return kVideoVp9;
+ case kVideoCodecH264:
+ return kVideoH264;
+ case kVideoCodecAV1:
+ return kVideoAv1;
+ default:
+ return kVideoUnknown;
+ }
+}
+
+void UpdateCodecTypeHistogram(const std::string& payload_name) {
+ RTC_HISTOGRAM_ENUMERATION("WebRTC.Video.Encoder.CodecType",
+ PayloadNameToHistogramCodecType(payload_name),
+ kVideoMax);
+}
+
+bool IsForcedFallbackPossible(const CodecSpecificInfo* codec_info,
+ int simulcast_index) {
+ return codec_info->codecType == kVideoCodecVP8 && simulcast_index == 0 &&
+ (codec_info->codecSpecific.VP8.temporalIdx == 0 ||
+ codec_info->codecSpecific.VP8.temporalIdx == kNoTemporalIdx);
+}
+
+absl::optional<int> GetFallbackMaxPixels(const std::string& group) {
+ if (group.empty())
+ return absl::nullopt;
+
+ int min_pixels;
+ int max_pixels;
+ int min_bps;
+ if (sscanf(group.c_str(), "-%d,%d,%d", &min_pixels, &max_pixels, &min_bps) !=
+ 3) {
+ return absl::optional<int>();
+ }
+
+ if (min_pixels <= 0 || max_pixels <= 0 || max_pixels < min_pixels)
+ return absl::optional<int>();
+
+ return absl::optional<int>(max_pixels);
+}
+
+absl::optional<int> GetFallbackMaxPixelsIfFieldTrialEnabled(
+ const webrtc::FieldTrialsView& field_trials) {
+ std::string group = field_trials.Lookup(kVp8ForcedFallbackEncoderFieldTrial);
+ return (absl::StartsWith(group, "Enabled"))
+ ? GetFallbackMaxPixels(group.substr(7))
+ : absl::optional<int>();
+}
+
+absl::optional<int> GetFallbackMaxPixelsIfFieldTrialDisabled(
+ const webrtc::FieldTrialsView& field_trials) {
+ std::string group = field_trials.Lookup(kVp8ForcedFallbackEncoderFieldTrial);
+ return (absl::StartsWith(group, "Disabled"))
+ ? GetFallbackMaxPixels(group.substr(8))
+ : absl::optional<int>();
+}
+} // namespace
+
+const int SendStatisticsProxy::kStatsTimeoutMs = 5000;
+
+SendStatisticsProxy::SendStatisticsProxy(
+ Clock* clock,
+ const VideoSendStream::Config& config,
+ VideoEncoderConfig::ContentType content_type,
+ const FieldTrialsView& field_trials)
+ : clock_(clock),
+ payload_name_(config.rtp.payload_name),
+ rtp_config_(config.rtp),
+ fallback_max_pixels_(
+ GetFallbackMaxPixelsIfFieldTrialEnabled(field_trials)),
+ fallback_max_pixels_disabled_(
+ GetFallbackMaxPixelsIfFieldTrialDisabled(field_trials)),
+ content_type_(content_type),
+ start_ms_(clock->TimeInMilliseconds()),
+ encode_time_(kEncodeTimeWeigthFactor),
+ quality_limitation_reason_tracker_(clock_),
+ media_byte_rate_tracker_(kBucketSizeMs, kBucketCount),
+ encoded_frame_rate_tracker_(kBucketSizeMs, kBucketCount),
+ last_num_spatial_layers_(0),
+ last_num_simulcast_streams_(0),
+ last_spatial_layer_use_{},
+ bw_limited_layers_(false),
+ internal_encoder_scaler_(false),
+ uma_container_(
+ new UmaSamplesContainer(GetUmaPrefix(content_type_), stats_, clock)) {
+}
+
+SendStatisticsProxy::~SendStatisticsProxy() {
+ MutexLock lock(&mutex_);
+ uma_container_->UpdateHistograms(rtp_config_, stats_);
+
+ int64_t elapsed_sec = (clock_->TimeInMilliseconds() - start_ms_) / 1000;
+ RTC_HISTOGRAM_COUNTS_100000("WebRTC.Video.SendStreamLifetimeInSeconds",
+ elapsed_sec);
+
+ if (elapsed_sec >= metrics::kMinRunTimeInSeconds)
+ UpdateCodecTypeHistogram(payload_name_);
+}
+
+SendStatisticsProxy::FallbackEncoderInfo::FallbackEncoderInfo() = default;
+
+SendStatisticsProxy::UmaSamplesContainer::UmaSamplesContainer(
+ const char* prefix,
+ const VideoSendStream::Stats& stats,
+ Clock* const clock)
+ : uma_prefix_(prefix),
+ clock_(clock),
+ input_frame_rate_tracker_(100, 10u),
+ input_fps_counter_(clock, nullptr, true),
+ sent_fps_counter_(clock, nullptr, true),
+ total_byte_counter_(clock, nullptr, true),
+ media_byte_counter_(clock, nullptr, true),
+ rtx_byte_counter_(clock, nullptr, true),
+ padding_byte_counter_(clock, nullptr, true),
+ retransmit_byte_counter_(clock, nullptr, true),
+ fec_byte_counter_(clock, nullptr, true),
+ first_rtcp_stats_time_ms_(-1),
+ first_rtp_stats_time_ms_(-1),
+ start_stats_(stats),
+ num_streams_(0),
+ num_pixels_highest_stream_(0) {
+ InitializeBitrateCounters(stats);
+ static_assert(
+ kMaxEncodedFrameTimestampDiff < std::numeric_limits<uint32_t>::max() / 2,
+ "has to be smaller than half range");
+}
+
+SendStatisticsProxy::UmaSamplesContainer::~UmaSamplesContainer() {}
+
+void SendStatisticsProxy::UmaSamplesContainer::InitializeBitrateCounters(
+ const VideoSendStream::Stats& stats) {
+ for (const auto& it : stats.substreams) {
+ uint32_t ssrc = it.first;
+ total_byte_counter_.SetLast(it.second.rtp_stats.transmitted.TotalBytes(),
+ ssrc);
+ padding_byte_counter_.SetLast(it.second.rtp_stats.transmitted.padding_bytes,
+ ssrc);
+ retransmit_byte_counter_.SetLast(
+ it.second.rtp_stats.retransmitted.TotalBytes(), ssrc);
+ fec_byte_counter_.SetLast(it.second.rtp_stats.fec.TotalBytes(), ssrc);
+ switch (it.second.type) {
+ case VideoSendStream::StreamStats::StreamType::kMedia:
+ media_byte_counter_.SetLast(it.second.rtp_stats.MediaPayloadBytes(),
+ ssrc);
+ break;
+ case VideoSendStream::StreamStats::StreamType::kRtx:
+ rtx_byte_counter_.SetLast(it.second.rtp_stats.transmitted.TotalBytes(),
+ ssrc);
+ break;
+ case VideoSendStream::StreamStats::StreamType::kFlexfec:
+ break;
+ }
+ }
+}
+
+void SendStatisticsProxy::UmaSamplesContainer::RemoveOld(int64_t now_ms) {
+ while (!encoded_frames_.empty()) {
+ auto it = encoded_frames_.begin();
+ if (now_ms - it->second.send_ms < kMaxEncodedFrameWindowMs)
+ break;
+
+ // Use max per timestamp.
+ sent_width_counter_.Add(it->second.max_width);
+ sent_height_counter_.Add(it->second.max_height);
+
+ // Check number of encoded streams per timestamp.
+ if (num_streams_ > static_cast<size_t>(it->second.max_simulcast_idx)) {
+ if (num_streams_ > 1) {
+ int disabled_streams =
+ static_cast<int>(num_streams_ - 1 - it->second.max_simulcast_idx);
+ // Can be limited in resolution or framerate.
+ uint32_t pixels = it->second.max_width * it->second.max_height;
+ bool bw_limited_resolution =
+ disabled_streams > 0 && pixels < num_pixels_highest_stream_;
+ bw_limited_frame_counter_.Add(bw_limited_resolution);
+ if (bw_limited_resolution) {
+ bw_resolutions_disabled_counter_.Add(disabled_streams);
+ }
+ }
+ }
+ encoded_frames_.erase(it);
+ }
+}
+
+bool SendStatisticsProxy::UmaSamplesContainer::InsertEncodedFrame(
+ const EncodedImage& encoded_frame,
+ int simulcast_idx) {
+ int64_t now_ms = clock_->TimeInMilliseconds();
+ RemoveOld(now_ms);
+ if (encoded_frames_.size() > kMaxEncodedFrameMapSize) {
+ encoded_frames_.clear();
+ }
+
+ // Check for jump in timestamp.
+ if (!encoded_frames_.empty()) {
+ uint32_t oldest_timestamp = encoded_frames_.begin()->first;
+ if (ForwardDiff(oldest_timestamp, encoded_frame.Timestamp()) >
+ kMaxEncodedFrameTimestampDiff) {
+ // Gap detected, clear frames to have a sequence where newest timestamp
+ // is not too far away from oldest in order to distinguish old and new.
+ encoded_frames_.clear();
+ }
+ }
+
+ auto it = encoded_frames_.find(encoded_frame.Timestamp());
+ if (it == encoded_frames_.end()) {
+ // First frame with this timestamp.
+ encoded_frames_.insert(
+ std::make_pair(encoded_frame.Timestamp(),
+ Frame(now_ms, encoded_frame._encodedWidth,
+ encoded_frame._encodedHeight, simulcast_idx)));
+ sent_fps_counter_.Add(1);
+ return true;
+ }
+
+ it->second.max_width =
+ std::max(it->second.max_width, encoded_frame._encodedWidth);
+ it->second.max_height =
+ std::max(it->second.max_height, encoded_frame._encodedHeight);
+ it->second.max_simulcast_idx =
+ std::max(it->second.max_simulcast_idx, simulcast_idx);
+ return false;
+}
+
+void SendStatisticsProxy::UmaSamplesContainer::UpdateHistograms(
+ const RtpConfig& rtp_config,
+ const VideoSendStream::Stats& current_stats) {
+ RTC_DCHECK(uma_prefix_ == kRealtimePrefix || uma_prefix_ == kScreenPrefix);
+ const int kIndex = uma_prefix_ == kScreenPrefix ? 1 : 0;
+ const int kMinRequiredPeriodicSamples = 6;
+ char log_stream_buf[8 * 1024];
+ rtc::SimpleStringBuilder log_stream(log_stream_buf);
+ int in_width = input_width_counter_.Avg(kMinRequiredMetricsSamples);
+ int in_height = input_height_counter_.Avg(kMinRequiredMetricsSamples);
+ if (in_width != -1) {
+ RTC_HISTOGRAMS_COUNTS_10000(kIndex, uma_prefix_ + "InputWidthInPixels",
+ in_width);
+ RTC_HISTOGRAMS_COUNTS_10000(kIndex, uma_prefix_ + "InputHeightInPixels",
+ in_height);
+ log_stream << uma_prefix_ << "InputWidthInPixels " << in_width << "\n"
+ << uma_prefix_ << "InputHeightInPixels " << in_height << "\n";
+ }
+ AggregatedStats in_fps = input_fps_counter_.GetStats();
+ if (in_fps.num_samples >= kMinRequiredPeriodicSamples) {
+ RTC_HISTOGRAMS_COUNTS_100(kIndex, uma_prefix_ + "InputFramesPerSecond",
+ in_fps.average);
+ log_stream << uma_prefix_ << "InputFramesPerSecond " << in_fps.ToString()
+ << "\n";
+ }
+
+ int sent_width = sent_width_counter_.Avg(kMinRequiredMetricsSamples);
+ int sent_height = sent_height_counter_.Avg(kMinRequiredMetricsSamples);
+ if (sent_width != -1) {
+ RTC_HISTOGRAMS_COUNTS_10000(kIndex, uma_prefix_ + "SentWidthInPixels",
+ sent_width);
+ RTC_HISTOGRAMS_COUNTS_10000(kIndex, uma_prefix_ + "SentHeightInPixels",
+ sent_height);
+ log_stream << uma_prefix_ << "SentWidthInPixels " << sent_width << "\n"
+ << uma_prefix_ << "SentHeightInPixels " << sent_height << "\n";
+ }
+ AggregatedStats sent_fps = sent_fps_counter_.GetStats();
+ if (sent_fps.num_samples >= kMinRequiredPeriodicSamples) {
+ RTC_HISTOGRAMS_COUNTS_100(kIndex, uma_prefix_ + "SentFramesPerSecond",
+ sent_fps.average);
+ log_stream << uma_prefix_ << "SentFramesPerSecond " << sent_fps.ToString()
+ << "\n";
+ }
+
+ if (in_fps.num_samples > kMinRequiredPeriodicSamples &&
+ sent_fps.num_samples >= kMinRequiredPeriodicSamples) {
+ int in_fps_avg = in_fps.average;
+ if (in_fps_avg > 0) {
+ int sent_fps_avg = sent_fps.average;
+ int sent_to_in_fps_ratio_percent =
+ (100 * sent_fps_avg + in_fps_avg / 2) / in_fps_avg;
+ // If reported period is small, it may happen that sent_fps is larger than
+ // input_fps briefly on average. This should be treated as 100% sent to
+ // input ratio.
+ if (sent_to_in_fps_ratio_percent > 100)
+ sent_to_in_fps_ratio_percent = 100;
+ RTC_HISTOGRAMS_PERCENTAGE(kIndex,
+ uma_prefix_ + "SentToInputFpsRatioPercent",
+ sent_to_in_fps_ratio_percent);
+ log_stream << uma_prefix_ << "SentToInputFpsRatioPercent "
+ << sent_to_in_fps_ratio_percent << "\n";
+ }
+ }
+
+ int encode_ms = encode_time_counter_.Avg(kMinRequiredMetricsSamples);
+ if (encode_ms != -1) {
+ RTC_HISTOGRAMS_COUNTS_1000(kIndex, uma_prefix_ + "EncodeTimeInMs",
+ encode_ms);
+ log_stream << uma_prefix_ << "EncodeTimeInMs " << encode_ms << "\n";
+ }
+ int key_frames_permille =
+ key_frame_counter_.Permille(kMinRequiredMetricsSamples);
+ if (key_frames_permille != -1) {
+ RTC_HISTOGRAMS_COUNTS_1000(kIndex, uma_prefix_ + "KeyFramesSentInPermille",
+ key_frames_permille);
+ log_stream << uma_prefix_ << "KeyFramesSentInPermille "
+ << key_frames_permille << "\n";
+ }
+ int quality_limited =
+ quality_limited_frame_counter_.Percent(kMinRequiredMetricsSamples);
+ if (quality_limited != -1) {
+ RTC_HISTOGRAMS_PERCENTAGE(kIndex,
+ uma_prefix_ + "QualityLimitedResolutionInPercent",
+ quality_limited);
+ log_stream << uma_prefix_ << "QualityLimitedResolutionInPercent "
+ << quality_limited << "\n";
+ }
+ int downscales = quality_downscales_counter_.Avg(kMinRequiredMetricsSamples);
+ if (downscales != -1) {
+ RTC_HISTOGRAMS_ENUMERATION(
+ kIndex, uma_prefix_ + "QualityLimitedResolutionDownscales", downscales,
+ 20);
+ }
+ int cpu_limited =
+ cpu_limited_frame_counter_.Percent(kMinRequiredMetricsSamples);
+ if (cpu_limited != -1) {
+ RTC_HISTOGRAMS_PERCENTAGE(
+ kIndex, uma_prefix_ + "CpuLimitedResolutionInPercent", cpu_limited);
+ }
+ int bw_limited =
+ bw_limited_frame_counter_.Percent(kMinRequiredMetricsSamples);
+ if (bw_limited != -1) {
+ RTC_HISTOGRAMS_PERCENTAGE(
+ kIndex, uma_prefix_ + "BandwidthLimitedResolutionInPercent",
+ bw_limited);
+ }
+ int num_disabled =
+ bw_resolutions_disabled_counter_.Avg(kMinRequiredMetricsSamples);
+ if (num_disabled != -1) {
+ RTC_HISTOGRAMS_ENUMERATION(
+ kIndex, uma_prefix_ + "BandwidthLimitedResolutionsDisabled",
+ num_disabled, 10);
+ }
+ int delay_ms = delay_counter_.Avg(kMinRequiredMetricsSamples);
+ if (delay_ms != -1)
+ RTC_HISTOGRAMS_COUNTS_100000(kIndex, uma_prefix_ + "SendSideDelayInMs",
+ delay_ms);
+
+ int max_delay_ms = max_delay_counter_.Avg(kMinRequiredMetricsSamples);
+ if (max_delay_ms != -1) {
+ RTC_HISTOGRAMS_COUNTS_100000(kIndex, uma_prefix_ + "SendSideDelayMaxInMs",
+ max_delay_ms);
+ }
+
+ for (const auto& it : qp_counters_) {
+ int qp_vp8 = it.second.vp8.Avg(kMinRequiredMetricsSamples);
+ if (qp_vp8 != -1) {
+ int spatial_idx = it.first;
+ if (spatial_idx == -1) {
+ RTC_HISTOGRAMS_COUNTS_200(kIndex, uma_prefix_ + "Encoded.Qp.Vp8",
+ qp_vp8);
+ } else if (spatial_idx == 0) {
+ RTC_HISTOGRAMS_COUNTS_200(kIndex, uma_prefix_ + "Encoded.Qp.Vp8.S0",
+ qp_vp8);
+ } else if (spatial_idx == 1) {
+ RTC_HISTOGRAMS_COUNTS_200(kIndex, uma_prefix_ + "Encoded.Qp.Vp8.S1",
+ qp_vp8);
+ } else if (spatial_idx == 2) {
+ RTC_HISTOGRAMS_COUNTS_200(kIndex, uma_prefix_ + "Encoded.Qp.Vp8.S2",
+ qp_vp8);
+ } else {
+ RTC_LOG(LS_WARNING)
+ << "QP stats not recorded for VP8 spatial idx " << spatial_idx;
+ }
+ }
+ int qp_vp9 = it.second.vp9.Avg(kMinRequiredMetricsSamples);
+ if (qp_vp9 != -1) {
+ int spatial_idx = it.first;
+ if (spatial_idx == -1) {
+ RTC_HISTOGRAMS_COUNTS_500(kIndex, uma_prefix_ + "Encoded.Qp.Vp9",
+ qp_vp9);
+ } else if (spatial_idx == 0) {
+ RTC_HISTOGRAMS_COUNTS_500(kIndex, uma_prefix_ + "Encoded.Qp.Vp9.S0",
+ qp_vp9);
+ } else if (spatial_idx == 1) {
+ RTC_HISTOGRAMS_COUNTS_500(kIndex, uma_prefix_ + "Encoded.Qp.Vp9.S1",
+ qp_vp9);
+ } else if (spatial_idx == 2) {
+ RTC_HISTOGRAMS_COUNTS_500(kIndex, uma_prefix_ + "Encoded.Qp.Vp9.S2",
+ qp_vp9);
+ } else {
+ RTC_LOG(LS_WARNING)
+ << "QP stats not recorded for VP9 spatial layer " << spatial_idx;
+ }
+ }
+ int qp_h264 = it.second.h264.Avg(kMinRequiredMetricsSamples);
+ if (qp_h264 != -1) {
+ int spatial_idx = it.first;
+ if (spatial_idx == -1) {
+ RTC_HISTOGRAMS_COUNTS_200(kIndex, uma_prefix_ + "Encoded.Qp.H264",
+ qp_h264);
+ } else if (spatial_idx == 0) {
+ RTC_HISTOGRAMS_COUNTS_200(kIndex, uma_prefix_ + "Encoded.Qp.H264.S0",
+ qp_h264);
+ } else if (spatial_idx == 1) {
+ RTC_HISTOGRAMS_COUNTS_200(kIndex, uma_prefix_ + "Encoded.Qp.H264.S1",
+ qp_h264);
+ } else if (spatial_idx == 2) {
+ RTC_HISTOGRAMS_COUNTS_200(kIndex, uma_prefix_ + "Encoded.Qp.H264.S2",
+ qp_h264);
+ } else {
+ RTC_LOG(LS_WARNING)
+ << "QP stats not recorded for H264 spatial idx " << spatial_idx;
+ }
+ }
+ }
+
+ if (first_rtp_stats_time_ms_ != -1) {
+ quality_adapt_timer_.Stop(clock_->TimeInMilliseconds());
+ int64_t elapsed_sec = quality_adapt_timer_.total_ms / 1000;
+ if (elapsed_sec >= metrics::kMinRunTimeInSeconds) {
+ int quality_changes = current_stats.number_of_quality_adapt_changes -
+ start_stats_.number_of_quality_adapt_changes;
+ // Only base stats on changes during a call, discard initial changes.
+ int initial_changes =
+ initial_quality_changes_.down + initial_quality_changes_.up;
+ if (initial_changes <= quality_changes)
+ quality_changes -= initial_changes;
+ RTC_HISTOGRAMS_COUNTS_100(kIndex,
+ uma_prefix_ + "AdaptChangesPerMinute.Quality",
+ quality_changes * 60 / elapsed_sec);
+ }
+ cpu_adapt_timer_.Stop(clock_->TimeInMilliseconds());
+ elapsed_sec = cpu_adapt_timer_.total_ms / 1000;
+ if (elapsed_sec >= metrics::kMinRunTimeInSeconds) {
+ int cpu_changes = current_stats.number_of_cpu_adapt_changes -
+ start_stats_.number_of_cpu_adapt_changes;
+ RTC_HISTOGRAMS_COUNTS_100(kIndex,
+ uma_prefix_ + "AdaptChangesPerMinute.Cpu",
+ cpu_changes * 60 / elapsed_sec);
+ }
+ }
+
+ if (first_rtcp_stats_time_ms_ != -1) {
+ int64_t elapsed_sec =
+ (clock_->TimeInMilliseconds() - first_rtcp_stats_time_ms_) / 1000;
+ if (elapsed_sec >= metrics::kMinRunTimeInSeconds) {
+ int fraction_lost = report_block_stats_.FractionLostInPercent();
+ if (fraction_lost != -1) {
+ RTC_HISTOGRAMS_PERCENTAGE(
+ kIndex, uma_prefix_ + "SentPacketsLostInPercent", fraction_lost);
+ log_stream << uma_prefix_ << "SentPacketsLostInPercent "
+ << fraction_lost << "\n";
+ }
+
+ // The RTCP packet type counters, delivered via the
+ // RtcpPacketTypeCounterObserver interface, are aggregates over the entire
+ // life of the send stream and are not reset when switching content type.
+ // For the purpose of these statistics though, we want new counts when
+ // switching since we switch histogram name. On every reset of the
+ // UmaSamplesContainer, we save the initial state of the counters, so that
+ // we can calculate the delta here and aggregate over all ssrcs.
+ RtcpPacketTypeCounter counters;
+ for (uint32_t ssrc : rtp_config.ssrcs) {
+ auto kv = current_stats.substreams.find(ssrc);
+ if (kv == current_stats.substreams.end())
+ continue;
+
+ RtcpPacketTypeCounter stream_counters =
+ kv->second.rtcp_packet_type_counts;
+ kv = start_stats_.substreams.find(ssrc);
+ if (kv != start_stats_.substreams.end())
+ stream_counters.Subtract(kv->second.rtcp_packet_type_counts);
+
+ counters.Add(stream_counters);
+ }
+ RTC_HISTOGRAMS_COUNTS_10000(kIndex,
+ uma_prefix_ + "NackPacketsReceivedPerMinute",
+ counters.nack_packets * 60 / elapsed_sec);
+ RTC_HISTOGRAMS_COUNTS_10000(kIndex,
+ uma_prefix_ + "FirPacketsReceivedPerMinute",
+ counters.fir_packets * 60 / elapsed_sec);
+ RTC_HISTOGRAMS_COUNTS_10000(kIndex,
+ uma_prefix_ + "PliPacketsReceivedPerMinute",
+ counters.pli_packets * 60 / elapsed_sec);
+ if (counters.nack_requests > 0) {
+ RTC_HISTOGRAMS_PERCENTAGE(
+ kIndex, uma_prefix_ + "UniqueNackRequestsReceivedInPercent",
+ counters.UniqueNackRequestsInPercent());
+ }
+ }
+ }
+
+ if (first_rtp_stats_time_ms_ != -1) {
+ int64_t elapsed_sec =
+ (clock_->TimeInMilliseconds() - first_rtp_stats_time_ms_) / 1000;
+ if (elapsed_sec >= metrics::kMinRunTimeInSeconds) {
+ RTC_HISTOGRAMS_COUNTS_100(kIndex, uma_prefix_ + "NumberOfPauseEvents",
+ target_rate_updates_.pause_resume_events);
+ log_stream << uma_prefix_ << "NumberOfPauseEvents "
+ << target_rate_updates_.pause_resume_events << "\n";
+
+ int paused_time_percent =
+ paused_time_counter_.Percent(metrics::kMinRunTimeInSeconds * 1000);
+ if (paused_time_percent != -1) {
+ RTC_HISTOGRAMS_PERCENTAGE(kIndex, uma_prefix_ + "PausedTimeInPercent",
+ paused_time_percent);
+ log_stream << uma_prefix_ << "PausedTimeInPercent "
+ << paused_time_percent << "\n";
+ }
+ }
+ }
+
+ if (fallback_info_.is_possible) {
+ // Double interval since there is some time before fallback may occur.
+ const int kMinRunTimeMs = 2 * metrics::kMinRunTimeInSeconds * 1000;
+ int64_t elapsed_ms = fallback_info_.elapsed_ms;
+ int fallback_time_percent = fallback_active_counter_.Percent(kMinRunTimeMs);
+ if (fallback_time_percent != -1 && elapsed_ms >= kMinRunTimeMs) {
+ RTC_HISTOGRAMS_PERCENTAGE(
+ kIndex, uma_prefix_ + "Encoder.ForcedSwFallbackTimeInPercent.Vp8",
+ fallback_time_percent);
+ RTC_HISTOGRAMS_COUNTS_100(
+ kIndex, uma_prefix_ + "Encoder.ForcedSwFallbackChangesPerMinute.Vp8",
+ fallback_info_.on_off_events * 60 / (elapsed_ms / 1000));
+ }
+ }
+
+ AggregatedStats total_bytes_per_sec = total_byte_counter_.GetStats();
+ if (total_bytes_per_sec.num_samples > kMinRequiredPeriodicSamples) {
+ RTC_HISTOGRAMS_COUNTS_10000(kIndex, uma_prefix_ + "BitrateSentInKbps",
+ total_bytes_per_sec.average * 8 / 1000);
+ log_stream << uma_prefix_ << "BitrateSentInBps "
+ << total_bytes_per_sec.ToStringWithMultiplier(8) << "\n";
+ }
+ AggregatedStats media_bytes_per_sec = media_byte_counter_.GetStats();
+ if (media_bytes_per_sec.num_samples > kMinRequiredPeriodicSamples) {
+ RTC_HISTOGRAMS_COUNTS_10000(kIndex, uma_prefix_ + "MediaBitrateSentInKbps",
+ media_bytes_per_sec.average * 8 / 1000);
+ log_stream << uma_prefix_ << "MediaBitrateSentInBps "
+ << media_bytes_per_sec.ToStringWithMultiplier(8) << "\n";
+ }
+ AggregatedStats padding_bytes_per_sec = padding_byte_counter_.GetStats();
+ if (padding_bytes_per_sec.num_samples > kMinRequiredPeriodicSamples) {
+ RTC_HISTOGRAMS_COUNTS_10000(kIndex,
+ uma_prefix_ + "PaddingBitrateSentInKbps",
+ padding_bytes_per_sec.average * 8 / 1000);
+ log_stream << uma_prefix_ << "PaddingBitrateSentInBps "
+ << padding_bytes_per_sec.ToStringWithMultiplier(8) << "\n";
+ }
+ AggregatedStats retransmit_bytes_per_sec =
+ retransmit_byte_counter_.GetStats();
+ if (retransmit_bytes_per_sec.num_samples > kMinRequiredPeriodicSamples) {
+ RTC_HISTOGRAMS_COUNTS_10000(kIndex,
+ uma_prefix_ + "RetransmittedBitrateSentInKbps",
+ retransmit_bytes_per_sec.average * 8 / 1000);
+ log_stream << uma_prefix_ << "RetransmittedBitrateSentInBps "
+ << retransmit_bytes_per_sec.ToStringWithMultiplier(8) << "\n";
+ }
+ if (!rtp_config.rtx.ssrcs.empty()) {
+ AggregatedStats rtx_bytes_per_sec = rtx_byte_counter_.GetStats();
+ int rtx_bytes_per_sec_avg = -1;
+ if (rtx_bytes_per_sec.num_samples > kMinRequiredPeriodicSamples) {
+ rtx_bytes_per_sec_avg = rtx_bytes_per_sec.average;
+ log_stream << uma_prefix_ << "RtxBitrateSentInBps "
+ << rtx_bytes_per_sec.ToStringWithMultiplier(8) << "\n";
+ } else if (total_bytes_per_sec.num_samples > kMinRequiredPeriodicSamples) {
+ rtx_bytes_per_sec_avg = 0; // RTX enabled but no RTX data sent, record 0.
+ }
+ if (rtx_bytes_per_sec_avg != -1) {
+ RTC_HISTOGRAMS_COUNTS_10000(kIndex, uma_prefix_ + "RtxBitrateSentInKbps",
+ rtx_bytes_per_sec_avg * 8 / 1000);
+ }
+ }
+ if (rtp_config.flexfec.payload_type != -1 ||
+ rtp_config.ulpfec.red_payload_type != -1) {
+ AggregatedStats fec_bytes_per_sec = fec_byte_counter_.GetStats();
+ if (fec_bytes_per_sec.num_samples > kMinRequiredPeriodicSamples) {
+ RTC_HISTOGRAMS_COUNTS_10000(kIndex, uma_prefix_ + "FecBitrateSentInKbps",
+ fec_bytes_per_sec.average * 8 / 1000);
+ log_stream << uma_prefix_ << "FecBitrateSentInBps "
+ << fec_bytes_per_sec.ToStringWithMultiplier(8) << "\n";
+ }
+ }
+ log_stream << "Frames encoded " << current_stats.frames_encoded << "\n"
+ << uma_prefix_ << "DroppedFrames.Capturer "
+ << current_stats.frames_dropped_by_capturer << "\n";
+ RTC_HISTOGRAMS_COUNTS_1000(kIndex, uma_prefix_ + "DroppedFrames.Capturer",
+ current_stats.frames_dropped_by_capturer);
+ log_stream << uma_prefix_ << "DroppedFrames.EncoderQueue "
+ << current_stats.frames_dropped_by_encoder_queue << "\n";
+ RTC_HISTOGRAMS_COUNTS_1000(kIndex, uma_prefix_ + "DroppedFrames.EncoderQueue",
+ current_stats.frames_dropped_by_encoder_queue);
+ log_stream << uma_prefix_ << "DroppedFrames.Encoder "
+ << current_stats.frames_dropped_by_encoder << "\n";
+ RTC_HISTOGRAMS_COUNTS_1000(kIndex, uma_prefix_ + "DroppedFrames.Encoder",
+ current_stats.frames_dropped_by_encoder);
+ log_stream << uma_prefix_ << "DroppedFrames.Ratelimiter "
+ << current_stats.frames_dropped_by_rate_limiter << "\n";
+ RTC_HISTOGRAMS_COUNTS_1000(kIndex, uma_prefix_ + "DroppedFrames.Ratelimiter",
+ current_stats.frames_dropped_by_rate_limiter);
+ log_stream << uma_prefix_ << "DroppedFrames.CongestionWindow "
+ << current_stats.frames_dropped_by_congestion_window;
+
+ RTC_LOG(LS_INFO) << log_stream.str();
+}
+
+void SendStatisticsProxy::OnEncoderReconfigured(
+ const VideoEncoderConfig& config,
+ const std::vector<VideoStream>& streams) {
+ // Called on VideoStreamEncoder's encoder_queue_.
+ MutexLock lock(&mutex_);
+
+ if (content_type_ != config.content_type) {
+ uma_container_->UpdateHistograms(rtp_config_, stats_);
+ uma_container_.reset(new UmaSamplesContainer(
+ GetUmaPrefix(config.content_type), stats_, clock_));
+ content_type_ = config.content_type;
+ }
+ uma_container_->encoded_frames_.clear();
+ uma_container_->num_streams_ = streams.size();
+ uma_container_->num_pixels_highest_stream_ =
+ streams.empty() ? 0 : (streams.back().width * streams.back().height);
+}
+
+void SendStatisticsProxy::OnEncodedFrameTimeMeasured(int encode_time_ms,
+ int encode_usage_percent) {
+ RTC_DCHECK_GE(encode_time_ms, 0);
+ MutexLock lock(&mutex_);
+ uma_container_->encode_time_counter_.Add(encode_time_ms);
+ encode_time_.Apply(1.0f, encode_time_ms);
+ stats_.avg_encode_time_ms = std::round(encode_time_.filtered());
+ stats_.total_encode_time_ms += encode_time_ms;
+ stats_.encode_usage_percent = encode_usage_percent;
+}
+
+void SendStatisticsProxy::OnSuspendChange(bool is_suspended) {
+ int64_t now_ms = clock_->TimeInMilliseconds();
+ MutexLock lock(&mutex_);
+ stats_.suspended = is_suspended;
+ if (is_suspended) {
+ // Pause framerate (add min pause time since there may be frames/packets
+ // that are not yet sent).
+ const int64_t kMinMs = 500;
+ uma_container_->input_fps_counter_.ProcessAndPauseForDuration(kMinMs);
+ uma_container_->sent_fps_counter_.ProcessAndPauseForDuration(kMinMs);
+ // Pause bitrate stats.
+ uma_container_->total_byte_counter_.ProcessAndPauseForDuration(kMinMs);
+ uma_container_->media_byte_counter_.ProcessAndPauseForDuration(kMinMs);
+ uma_container_->rtx_byte_counter_.ProcessAndPauseForDuration(kMinMs);
+ uma_container_->padding_byte_counter_.ProcessAndPauseForDuration(kMinMs);
+ uma_container_->retransmit_byte_counter_.ProcessAndPauseForDuration(kMinMs);
+ uma_container_->fec_byte_counter_.ProcessAndPauseForDuration(kMinMs);
+ // Stop adaptation stats.
+ uma_container_->cpu_adapt_timer_.Stop(now_ms);
+ uma_container_->quality_adapt_timer_.Stop(now_ms);
+ } else {
+ // Start adaptation stats if scaling is enabled.
+ if (adaptation_limitations_.MaskedCpuCounts()
+ .resolution_adaptations.has_value())
+ uma_container_->cpu_adapt_timer_.Start(now_ms);
+ if (adaptation_limitations_.MaskedQualityCounts()
+ .resolution_adaptations.has_value())
+ uma_container_->quality_adapt_timer_.Start(now_ms);
+ // Stop pause explicitly for stats that may be zero/not updated for some
+ // time.
+ uma_container_->rtx_byte_counter_.ProcessAndStopPause();
+ uma_container_->padding_byte_counter_.ProcessAndStopPause();
+ uma_container_->retransmit_byte_counter_.ProcessAndStopPause();
+ uma_container_->fec_byte_counter_.ProcessAndStopPause();
+ }
+}
+
+VideoSendStream::Stats SendStatisticsProxy::GetStats() {
+ MutexLock lock(&mutex_);
+ PurgeOldStats();
+ stats_.input_frame_rate =
+ uma_container_->input_frame_rate_tracker_.ComputeRate();
+ stats_.frames =
+ uma_container_->input_frame_rate_tracker_.TotalSampleCount();
+ stats_.content_type =
+ content_type_ == VideoEncoderConfig::ContentType::kRealtimeVideo
+ ? VideoContentType::UNSPECIFIED
+ : VideoContentType::SCREENSHARE;
+ stats_.encode_frame_rate = round(encoded_frame_rate_tracker_.ComputeRate());
+ stats_.media_bitrate_bps = media_byte_rate_tracker_.ComputeRate() * 8;
+ stats_.quality_limitation_durations_ms =
+ quality_limitation_reason_tracker_.DurationsMs();
+
+ for (auto& substream : stats_.substreams) {
+ uint32_t ssrc = substream.first;
+ if (encoded_frame_rate_trackers_.count(ssrc) > 0) {
+ substream.second.encode_frame_rate =
+ encoded_frame_rate_trackers_[ssrc]->ComputeRate();
+ }
+ }
+ return stats_;
+}
+
+void SendStatisticsProxy::PurgeOldStats() {
+ int64_t old_stats_ms = clock_->TimeInMilliseconds() - kStatsTimeoutMs;
+ for (std::map<uint32_t, VideoSendStream::StreamStats>::iterator it =
+ stats_.substreams.begin();
+ it != stats_.substreams.end(); ++it) {
+ uint32_t ssrc = it->first;
+ if (update_times_[ssrc].resolution_update_ms <= old_stats_ms) {
+ it->second.width = 0;
+ it->second.height = 0;
+ }
+ }
+}
+
+VideoSendStream::StreamStats* SendStatisticsProxy::GetStatsEntry(
+ uint32_t ssrc) {
+ std::map<uint32_t, VideoSendStream::StreamStats>::iterator it =
+ stats_.substreams.find(ssrc);
+ if (it != stats_.substreams.end())
+ return &it->second;
+
+ bool is_media = rtp_config_.IsMediaSsrc(ssrc);
+ bool is_flexfec = rtp_config_.flexfec.payload_type != -1 &&
+ ssrc == rtp_config_.flexfec.ssrc;
+ bool is_rtx = rtp_config_.IsRtxSsrc(ssrc);
+ if (!is_media && !is_flexfec && !is_rtx)
+ return nullptr;
+
+ // Insert new entry and return ptr.
+ VideoSendStream::StreamStats* entry = &stats_.substreams[ssrc];
+ if (is_media) {
+ entry->type = VideoSendStream::StreamStats::StreamType::kMedia;
+ } else if (is_rtx) {
+ entry->type = VideoSendStream::StreamStats::StreamType::kRtx;
+ } else if (is_flexfec) {
+ entry->type = VideoSendStream::StreamStats::StreamType::kFlexfec;
+ } else {
+ RTC_DCHECK_NOTREACHED();
+ }
+ switch (entry->type) {
+ case VideoSendStream::StreamStats::StreamType::kMedia:
+ break;
+ case VideoSendStream::StreamStats::StreamType::kRtx:
+ entry->referenced_media_ssrc =
+ rtp_config_.GetMediaSsrcAssociatedWithRtxSsrc(ssrc);
+ break;
+ case VideoSendStream::StreamStats::StreamType::kFlexfec:
+ entry->referenced_media_ssrc =
+ rtp_config_.GetMediaSsrcAssociatedWithFlexfecSsrc(ssrc);
+ break;
+ }
+
+ return entry;
+}
+
+void SendStatisticsProxy::OnInactiveSsrc(uint32_t ssrc) {
+ MutexLock lock(&mutex_);
+ VideoSendStream::StreamStats* stats = GetStatsEntry(ssrc);
+ if (!stats)
+ return;
+
+ stats->total_bitrate_bps = 0;
+ stats->retransmit_bitrate_bps = 0;
+ stats->height = 0;
+ stats->width = 0;
+}
+
+void SendStatisticsProxy::OnSetEncoderTargetRate(uint32_t bitrate_bps) {
+ MutexLock lock(&mutex_);
+ if (uma_container_->target_rate_updates_.last_ms == -1 && bitrate_bps == 0)
+ return; // Start on first non-zero bitrate, may initially be zero.
+
+ int64_t now = clock_->TimeInMilliseconds();
+ if (uma_container_->target_rate_updates_.last_ms != -1) {
+ bool was_paused = stats_.target_media_bitrate_bps == 0;
+ int64_t diff_ms = now - uma_container_->target_rate_updates_.last_ms;
+ uma_container_->paused_time_counter_.Add(was_paused, diff_ms);
+
+ // Use last to not include update when stream is stopped and video disabled.
+ if (uma_container_->target_rate_updates_.last_paused_or_resumed)
+ ++uma_container_->target_rate_updates_.pause_resume_events;
+
+ // Check if video is paused/resumed.
+ uma_container_->target_rate_updates_.last_paused_or_resumed =
+ (bitrate_bps == 0) != was_paused;
+ }
+ uma_container_->target_rate_updates_.last_ms = now;
+
+ stats_.target_media_bitrate_bps = bitrate_bps;
+}
+
+void SendStatisticsProxy::UpdateEncoderFallbackStats(
+ const CodecSpecificInfo* codec_info,
+ int pixels,
+ int simulcast_index) {
+ UpdateFallbackDisabledStats(codec_info, pixels, simulcast_index);
+
+ if (!fallback_max_pixels_ || !uma_container_->fallback_info_.is_possible) {
+ return;
+ }
+
+ if (!IsForcedFallbackPossible(codec_info, simulcast_index)) {
+ uma_container_->fallback_info_.is_possible = false;
+ return;
+ }
+
+ FallbackEncoderInfo* fallback_info = &uma_container_->fallback_info_;
+
+ const int64_t now_ms = clock_->TimeInMilliseconds();
+ bool is_active = fallback_info->is_active;
+ if (encoder_changed_) {
+ // Implementation changed.
+ const bool last_was_vp8_software =
+ encoder_changed_->previous_encoder_implementation == kVp8SwCodecName;
+ is_active = encoder_changed_->new_encoder_implementation == kVp8SwCodecName;
+ encoder_changed_.reset();
+ if (!is_active && !last_was_vp8_software) {
+ // First or not a VP8 SW change, update stats on next call.
+ return;
+ }
+ if (is_active && (pixels > *fallback_max_pixels_)) {
+ // Pixels should not be above `fallback_max_pixels_`. If above skip to
+ // avoid fallbacks due to failure.
+ fallback_info->is_possible = false;
+ return;
+ }
+ stats_.has_entered_low_resolution = true;
+ ++fallback_info->on_off_events;
+ }
+
+ if (fallback_info->last_update_ms) {
+ int64_t diff_ms = now_ms - *(fallback_info->last_update_ms);
+ // If the time diff since last update is greater than `max_frame_diff_ms`,
+ // video is considered paused/muted and the change is not included.
+ if (diff_ms < fallback_info->max_frame_diff_ms) {
+ uma_container_->fallback_active_counter_.Add(fallback_info->is_active,
+ diff_ms);
+ fallback_info->elapsed_ms += diff_ms;
+ }
+ }
+ fallback_info->is_active = is_active;
+ fallback_info->last_update_ms.emplace(now_ms);
+}
+
+void SendStatisticsProxy::UpdateFallbackDisabledStats(
+ const CodecSpecificInfo* codec_info,
+ int pixels,
+ int simulcast_index) {
+ if (!fallback_max_pixels_disabled_ ||
+ !uma_container_->fallback_info_disabled_.is_possible ||
+ stats_.has_entered_low_resolution) {
+ return;
+ }
+
+ if (!IsForcedFallbackPossible(codec_info, simulcast_index) ||
+ stats_.encoder_implementation_name == kVp8SwCodecName) {
+ uma_container_->fallback_info_disabled_.is_possible = false;
+ return;
+ }
+
+ if (pixels <= *fallback_max_pixels_disabled_ ||
+ uma_container_->fallback_info_disabled_.min_pixel_limit_reached) {
+ stats_.has_entered_low_resolution = true;
+ }
+}
+
+void SendStatisticsProxy::OnMinPixelLimitReached() {
+ MutexLock lock(&mutex_);
+ uma_container_->fallback_info_disabled_.min_pixel_limit_reached = true;
+}
+
+void SendStatisticsProxy::OnSendEncodedImage(
+ const EncodedImage& encoded_image,
+ const CodecSpecificInfo* codec_info) {
+ // Simulcast is used for VP8, H264 and Generic.
+ int simulcast_idx =
+ (codec_info && (codec_info->codecType == kVideoCodecVP8 ||
+ codec_info->codecType == kVideoCodecH264 ||
+ codec_info->codecType == kVideoCodecGeneric))
+ ? encoded_image.SpatialIndex().value_or(0)
+ : 0;
+
+ MutexLock lock(&mutex_);
+ ++stats_.frames_encoded;
+ // The current encode frame rate is based on previously encoded frames.
+ double encode_frame_rate = encoded_frame_rate_tracker_.ComputeRate();
+ // We assume that less than 1 FPS is not a trustworthy estimate - perhaps we
+ // just started encoding for the first time or after a pause. Assuming frame
+ // rate is at least 1 FPS is conservative to avoid too large increments.
+ if (encode_frame_rate < 1.0)
+ encode_frame_rate = 1.0;
+ double target_frame_size_bytes =
+ stats_.target_media_bitrate_bps / (8.0 * encode_frame_rate);
+ // `stats_.target_media_bitrate_bps` is set in
+ // SendStatisticsProxy::OnSetEncoderTargetRate.
+ stats_.total_encoded_bytes_target += round(target_frame_size_bytes);
+ if (codec_info) {
+ UpdateEncoderFallbackStats(
+ codec_info, encoded_image._encodedWidth * encoded_image._encodedHeight,
+ simulcast_idx);
+ }
+
+ if (static_cast<size_t>(simulcast_idx) >= rtp_config_.ssrcs.size()) {
+ RTC_LOG(LS_ERROR) << "Encoded image outside simulcast range ("
+ << simulcast_idx << " >= " << rtp_config_.ssrcs.size()
+ << ").";
+ return;
+ }
+ uint32_t ssrc = rtp_config_.ssrcs[simulcast_idx];
+
+ VideoSendStream::StreamStats* stats = GetStatsEntry(ssrc);
+ if (!stats)
+ return;
+
+ if (encoded_frame_rate_trackers_.count(ssrc) == 0) {
+ encoded_frame_rate_trackers_[ssrc] =
+ std::make_unique<rtc::RateTracker>(kBucketSizeMs, kBucketCount);
+ }
+
+ stats->frames_encoded++;
+ stats->total_encode_time_ms += encoded_image.timing_.encode_finish_ms -
+ encoded_image.timing_.encode_start_ms;
+ if (codec_info)
+ stats->scalability_mode = codec_info->scalability_mode;
+ // Report resolution of the top spatial layer.
+ bool is_top_spatial_layer =
+ codec_info == nullptr || codec_info->end_of_picture;
+
+ if (!stats->width || !stats->height || is_top_spatial_layer) {
+ stats->width = encoded_image._encodedWidth;
+ stats->height = encoded_image._encodedHeight;
+ update_times_[ssrc].resolution_update_ms = clock_->TimeInMilliseconds();
+ }
+
+ uma_container_->key_frame_counter_.Add(encoded_image._frameType ==
+ VideoFrameType::kVideoFrameKey);
+
+ if (encoded_image.qp_ != -1) {
+ if (!stats->qp_sum)
+ stats->qp_sum = 0;
+ *stats->qp_sum += encoded_image.qp_;
+
+ if (codec_info) {
+ if (codec_info->codecType == kVideoCodecVP8) {
+ int spatial_idx = (rtp_config_.ssrcs.size() == 1) ? -1 : simulcast_idx;
+ uma_container_->qp_counters_[spatial_idx].vp8.Add(encoded_image.qp_);
+ } else if (codec_info->codecType == kVideoCodecVP9) {
+ int spatial_idx = encoded_image.SpatialIndex().value_or(-1);
+ uma_container_->qp_counters_[spatial_idx].vp9.Add(encoded_image.qp_);
+ } else if (codec_info->codecType == kVideoCodecH264) {
+ int spatial_idx = (rtp_config_.ssrcs.size() == 1) ? -1 : simulcast_idx;
+ uma_container_->qp_counters_[spatial_idx].h264.Add(encoded_image.qp_);
+ }
+ }
+ }
+
+ // If any of the simulcast streams have a huge frame, it should be counted
+ // as a single difficult input frame.
+ // https://w3c.github.io/webrtc-stats/#dom-rtcvideosenderstats-hugeframessent
+ if (encoded_image.timing_.flags & VideoSendTiming::kTriggeredBySize) {
+ ++stats->huge_frames_sent;
+ if (!last_outlier_timestamp_ ||
+ *last_outlier_timestamp_ < encoded_image.capture_time_ms_) {
+ last_outlier_timestamp_.emplace(encoded_image.capture_time_ms_);
+ ++stats_.huge_frames_sent;
+ }
+ }
+
+ media_byte_rate_tracker_.AddSamples(encoded_image.size());
+
+ if (uma_container_->InsertEncodedFrame(encoded_image, simulcast_idx)) {
+ // First frame seen with this timestamp, track overall fps.
+ encoded_frame_rate_tracker_.AddSamples(1);
+ }
+ // is_top_spatial_layer pertains only to SVC, will always be true for
+ // simulcast.
+ if (is_top_spatial_layer)
+ encoded_frame_rate_trackers_[ssrc]->AddSamples(1);
+
+ absl::optional<int> downscales =
+ adaptation_limitations_.MaskedQualityCounts().resolution_adaptations;
+ stats_.bw_limited_resolution |=
+ (downscales.has_value() && downscales.value() > 0);
+
+ if (downscales.has_value()) {
+ uma_container_->quality_limited_frame_counter_.Add(downscales.value() > 0);
+ if (downscales.value() > 0)
+ uma_container_->quality_downscales_counter_.Add(downscales.value());
+ }
+}
+
+void SendStatisticsProxy::OnEncoderImplementationChanged(
+ EncoderImplementation implementation) {
+ MutexLock lock(&mutex_);
+ encoder_changed_ = EncoderChangeEvent{stats_.encoder_implementation_name,
+ implementation.name};
+ stats_.encoder_implementation_name = implementation.name;
+ stats_.power_efficient_encoder = implementation.is_hardware_accelerated;
+}
+
+int SendStatisticsProxy::GetInputFrameRate() const {
+ MutexLock lock(&mutex_);
+ return round(uma_container_->input_frame_rate_tracker_.ComputeRate());
+}
+
+int SendStatisticsProxy::GetSendFrameRate() const {
+ MutexLock lock(&mutex_);
+ return round(encoded_frame_rate_tracker_.ComputeRate());
+}
+
+void SendStatisticsProxy::OnIncomingFrame(int width, int height) {
+ MutexLock lock(&mutex_);
+ uma_container_->input_frame_rate_tracker_.AddSamples(1);
+ uma_container_->input_fps_counter_.Add(1);
+ uma_container_->input_width_counter_.Add(width);
+ uma_container_->input_height_counter_.Add(height);
+ if (adaptation_limitations_.MaskedCpuCounts()
+ .resolution_adaptations.has_value()) {
+ uma_container_->cpu_limited_frame_counter_.Add(
+ stats_.cpu_limited_resolution);
+ }
+ if (encoded_frame_rate_tracker_.TotalSampleCount() == 0) {
+ // Set start time now instead of when first key frame is encoded to avoid a
+ // too high initial estimate.
+ encoded_frame_rate_tracker_.AddSamples(0);
+ }
+}
+
+void SendStatisticsProxy::OnFrameDropped(DropReason reason) {
+ MutexLock lock(&mutex_);
+ switch (reason) {
+ case DropReason::kSource:
+ ++stats_.frames_dropped_by_capturer;
+ break;
+ case DropReason::kEncoderQueue:
+ ++stats_.frames_dropped_by_encoder_queue;
+ break;
+ case DropReason::kEncoder:
+ ++stats_.frames_dropped_by_encoder;
+ break;
+ case DropReason::kMediaOptimization:
+ ++stats_.frames_dropped_by_rate_limiter;
+ break;
+ case DropReason::kCongestionWindow:
+ ++stats_.frames_dropped_by_congestion_window;
+ break;
+ }
+}
+
+void SendStatisticsProxy::ClearAdaptationStats() {
+ MutexLock lock(&mutex_);
+ adaptation_limitations_.set_cpu_counts(VideoAdaptationCounters());
+ adaptation_limitations_.set_quality_counts(VideoAdaptationCounters());
+ UpdateAdaptationStats();
+}
+
+void SendStatisticsProxy::UpdateAdaptationSettings(
+ VideoStreamEncoderObserver::AdaptationSettings cpu_settings,
+ VideoStreamEncoderObserver::AdaptationSettings quality_settings) {
+ MutexLock lock(&mutex_);
+ adaptation_limitations_.UpdateMaskingSettings(cpu_settings, quality_settings);
+ SetAdaptTimer(adaptation_limitations_.MaskedCpuCounts(),
+ &uma_container_->cpu_adapt_timer_);
+ SetAdaptTimer(adaptation_limitations_.MaskedQualityCounts(),
+ &uma_container_->quality_adapt_timer_);
+ UpdateAdaptationStats();
+}
+
+void SendStatisticsProxy::OnAdaptationChanged(
+ VideoAdaptationReason reason,
+ const VideoAdaptationCounters& cpu_counters,
+ const VideoAdaptationCounters& quality_counters) {
+ MutexLock lock(&mutex_);
+
+ MaskedAdaptationCounts receiver =
+ adaptation_limitations_.MaskedQualityCounts();
+ adaptation_limitations_.set_cpu_counts(cpu_counters);
+ adaptation_limitations_.set_quality_counts(quality_counters);
+ switch (reason) {
+ case VideoAdaptationReason::kCpu:
+ ++stats_.number_of_cpu_adapt_changes;
+ break;
+ case VideoAdaptationReason::kQuality:
+ TryUpdateInitialQualityResolutionAdaptUp(
+ receiver.resolution_adaptations,
+ adaptation_limitations_.MaskedQualityCounts().resolution_adaptations);
+ ++stats_.number_of_quality_adapt_changes;
+ break;
+ }
+ UpdateAdaptationStats();
+}
+
+void SendStatisticsProxy::UpdateAdaptationStats() {
+ auto cpu_counts = adaptation_limitations_.MaskedCpuCounts();
+ auto quality_counts = adaptation_limitations_.MaskedQualityCounts();
+
+ bool is_cpu_limited = cpu_counts.resolution_adaptations > 0 ||
+ cpu_counts.num_framerate_reductions > 0;
+ bool is_bandwidth_limited = quality_counts.resolution_adaptations > 0 ||
+ quality_counts.num_framerate_reductions > 0 ||
+ bw_limited_layers_ || internal_encoder_scaler_;
+ if (is_bandwidth_limited) {
+ // We may be both CPU limited and bandwidth limited at the same time but
+ // there is no way to express this in standardized stats. Heuristically,
+ // bandwidth is more likely to be a limiting factor than CPU, and more
+ // likely to vary over time, so only when we aren't bandwidth limited do we
+ // want to know about our CPU being the bottleneck.
+ quality_limitation_reason_tracker_.SetReason(
+ QualityLimitationReason::kBandwidth);
+ } else if (is_cpu_limited) {
+ quality_limitation_reason_tracker_.SetReason(QualityLimitationReason::kCpu);
+ } else {
+ quality_limitation_reason_tracker_.SetReason(
+ QualityLimitationReason::kNone);
+ }
+
+ stats_.cpu_limited_resolution = cpu_counts.resolution_adaptations > 0;
+ stats_.cpu_limited_framerate = cpu_counts.num_framerate_reductions > 0;
+ stats_.bw_limited_resolution = quality_counts.resolution_adaptations > 0;
+ stats_.bw_limited_framerate = quality_counts.num_framerate_reductions > 0;
+ // If bitrate allocator has disabled some layers frame-rate or resolution are
+ // limited depending on the encoder configuration.
+ if (bw_limited_layers_) {
+ switch (content_type_) {
+ case VideoEncoderConfig::ContentType::kRealtimeVideo: {
+ stats_.bw_limited_resolution = true;
+ break;
+ }
+ case VideoEncoderConfig::ContentType::kScreen: {
+ stats_.bw_limited_framerate = true;
+ break;
+ }
+ }
+ }
+ if (internal_encoder_scaler_) {
+ stats_.bw_limited_resolution = true;
+ }
+
+ stats_.quality_limitation_reason =
+ quality_limitation_reason_tracker_.current_reason();
+
+ // `stats_.quality_limitation_durations_ms` depends on the current time
+ // when it is polled; it is updated in SendStatisticsProxy::GetStats().
+}
+
+void SendStatisticsProxy::OnBitrateAllocationUpdated(
+ const VideoCodec& codec,
+ const VideoBitrateAllocation& allocation) {
+ int num_spatial_layers = 0;
+ for (int i = 0; i < kMaxSpatialLayers; i++) {
+ if (codec.spatialLayers[i].active) {
+ num_spatial_layers++;
+ }
+ }
+ int num_simulcast_streams = 0;
+ for (int i = 0; i < kMaxSimulcastStreams; i++) {
+ if (codec.simulcastStream[i].active) {
+ num_simulcast_streams++;
+ }
+ }
+
+ std::array<bool, kMaxSpatialLayers> spatial_layers;
+ for (int i = 0; i < kMaxSpatialLayers; i++) {
+ spatial_layers[i] = (allocation.GetSpatialLayerSum(i) > 0);
+ }
+
+ MutexLock lock(&mutex_);
+
+ bw_limited_layers_ = allocation.is_bw_limited();
+ UpdateAdaptationStats();
+
+ if (spatial_layers != last_spatial_layer_use_) {
+ // If the number of spatial layers has changed, the resolution change is
+ // not due to quality limitations, it is because the configuration
+ // changed.
+ if (last_num_spatial_layers_ == num_spatial_layers &&
+ last_num_simulcast_streams_ == num_simulcast_streams) {
+ ++stats_.quality_limitation_resolution_changes;
+ }
+ last_spatial_layer_use_ = spatial_layers;
+ }
+ last_num_spatial_layers_ = num_spatial_layers;
+ last_num_simulcast_streams_ = num_simulcast_streams;
+}
+
+// Informes observer if an internal encoder scaler has reduced video
+// resolution or not. `is_scaled` is a flag indicating if the video is scaled
+// down.
+void SendStatisticsProxy::OnEncoderInternalScalerUpdate(bool is_scaled) {
+ MutexLock lock(&mutex_);
+ internal_encoder_scaler_ = is_scaled;
+ UpdateAdaptationStats();
+}
+
+// TODO(asapersson): Include fps changes.
+void SendStatisticsProxy::OnInitialQualityResolutionAdaptDown() {
+ MutexLock lock(&mutex_);
+ ++uma_container_->initial_quality_changes_.down;
+}
+
+void SendStatisticsProxy::TryUpdateInitialQualityResolutionAdaptUp(
+ absl::optional<int> old_quality_downscales,
+ absl::optional<int> updated_quality_downscales) {
+ if (uma_container_->initial_quality_changes_.down == 0)
+ return;
+
+ if (old_quality_downscales.has_value() &&
+ old_quality_downscales.value() > 0 &&
+ updated_quality_downscales.value_or(-1) <
+ old_quality_downscales.value()) {
+ // Adapting up in quality.
+ if (uma_container_->initial_quality_changes_.down >
+ uma_container_->initial_quality_changes_.up) {
+ ++uma_container_->initial_quality_changes_.up;
+ }
+ }
+}
+
+void SendStatisticsProxy::SetAdaptTimer(const MaskedAdaptationCounts& counts,
+ StatsTimer* timer) {
+ if (counts.resolution_adaptations || counts.num_framerate_reductions) {
+ // Adaptation enabled.
+ if (!stats_.suspended)
+ timer->Start(clock_->TimeInMilliseconds());
+ return;
+ }
+ timer->Stop(clock_->TimeInMilliseconds());
+}
+
+void SendStatisticsProxy::RtcpPacketTypesCounterUpdated(
+ uint32_t ssrc,
+ const RtcpPacketTypeCounter& packet_counter) {
+ MutexLock lock(&mutex_);
+ VideoSendStream::StreamStats* stats = GetStatsEntry(ssrc);
+ if (!stats)
+ return;
+
+ stats->rtcp_packet_type_counts = packet_counter;
+ if (uma_container_->first_rtcp_stats_time_ms_ == -1)
+ uma_container_->first_rtcp_stats_time_ms_ = clock_->TimeInMilliseconds();
+}
+
+void SendStatisticsProxy::OnReportBlockDataUpdated(
+ ReportBlockData report_block_data) {
+ MutexLock lock(&mutex_);
+ VideoSendStream::StreamStats* stats =
+ GetStatsEntry(report_block_data.report_block().source_ssrc);
+ if (!stats)
+ return;
+ const RTCPReportBlock& report_block = report_block_data.report_block();
+ uma_container_->report_block_stats_.Store(
+ /*ssrc=*/report_block.source_ssrc,
+ /*packets_lost=*/report_block.packets_lost,
+ /*extended_highest_sequence_number=*/
+ report_block.extended_highest_sequence_number);
+
+ stats->report_block_data = std::move(report_block_data);
+}
+
+void SendStatisticsProxy::DataCountersUpdated(
+ const StreamDataCounters& counters,
+ uint32_t ssrc) {
+ MutexLock lock(&mutex_);
+ VideoSendStream::StreamStats* stats = GetStatsEntry(ssrc);
+ RTC_DCHECK(stats) << "DataCountersUpdated reported for unknown ssrc " << ssrc;
+
+ if (stats->type == VideoSendStream::StreamStats::StreamType::kFlexfec) {
+ // The same counters are reported for both the media ssrc and flexfec ssrc.
+ // Bitrate stats are summed for all SSRCs. Use fec stats from media update.
+ return;
+ }
+
+ stats->rtp_stats = counters;
+ if (uma_container_->first_rtp_stats_time_ms_ == -1) {
+ int64_t now_ms = clock_->TimeInMilliseconds();
+ uma_container_->first_rtp_stats_time_ms_ = now_ms;
+ uma_container_->cpu_adapt_timer_.Restart(now_ms);
+ uma_container_->quality_adapt_timer_.Restart(now_ms);
+ }
+
+ uma_container_->total_byte_counter_.Set(counters.transmitted.TotalBytes(),
+ ssrc);
+ uma_container_->padding_byte_counter_.Set(counters.transmitted.padding_bytes,
+ ssrc);
+ uma_container_->retransmit_byte_counter_.Set(
+ counters.retransmitted.TotalBytes(), ssrc);
+ uma_container_->fec_byte_counter_.Set(counters.fec.TotalBytes(), ssrc);
+ switch (stats->type) {
+ case VideoSendStream::StreamStats::StreamType::kMedia:
+ uma_container_->media_byte_counter_.Set(counters.MediaPayloadBytes(),
+ ssrc);
+ break;
+ case VideoSendStream::StreamStats::StreamType::kRtx:
+ uma_container_->rtx_byte_counter_.Set(counters.transmitted.TotalBytes(),
+ ssrc);
+ break;
+ case VideoSendStream::StreamStats::StreamType::kFlexfec:
+ break;
+ }
+}
+
+void SendStatisticsProxy::Notify(uint32_t total_bitrate_bps,
+ uint32_t retransmit_bitrate_bps,
+ uint32_t ssrc) {
+ MutexLock lock(&mutex_);
+ VideoSendStream::StreamStats* stats = GetStatsEntry(ssrc);
+ if (!stats)
+ return;
+
+ stats->total_bitrate_bps = total_bitrate_bps;
+ stats->retransmit_bitrate_bps = retransmit_bitrate_bps;
+}
+
+void SendStatisticsProxy::FrameCountUpdated(const FrameCounts& frame_counts,
+ uint32_t ssrc) {
+ MutexLock lock(&mutex_);
+ VideoSendStream::StreamStats* stats = GetStatsEntry(ssrc);
+ if (!stats)
+ return;
+
+ stats->frame_counts = frame_counts;
+}
+
+void SendStatisticsProxy::SendSideDelayUpdated(int avg_delay_ms,
+ int max_delay_ms,
+ uint32_t ssrc) {
+ MutexLock lock(&mutex_);
+ VideoSendStream::StreamStats* stats = GetStatsEntry(ssrc);
+ if (!stats)
+ return;
+ stats->avg_delay_ms = avg_delay_ms;
+ stats->max_delay_ms = max_delay_ms;
+
+ uma_container_->delay_counter_.Add(avg_delay_ms);
+ uma_container_->max_delay_counter_.Add(max_delay_ms);
+}
+
+void SendStatisticsProxy::StatsTimer::Start(int64_t now_ms) {
+ if (start_ms == -1)
+ start_ms = now_ms;
+}
+
+void SendStatisticsProxy::StatsTimer::Stop(int64_t now_ms) {
+ if (start_ms != -1) {
+ total_ms += now_ms - start_ms;
+ start_ms = -1;
+ }
+}
+
+void SendStatisticsProxy::StatsTimer::Restart(int64_t now_ms) {
+ total_ms = 0;
+ if (start_ms != -1)
+ start_ms = now_ms;
+}
+
+void SendStatisticsProxy::SampleCounter::Add(int sample) {
+ sum += sample;
+ ++num_samples;
+}
+
+int SendStatisticsProxy::SampleCounter::Avg(
+ int64_t min_required_samples) const {
+ if (num_samples < min_required_samples || num_samples == 0)
+ return -1;
+ return static_cast<int>((sum + (num_samples / 2)) / num_samples);
+}
+
+void SendStatisticsProxy::BoolSampleCounter::Add(bool sample) {
+ if (sample)
+ ++sum;
+ ++num_samples;
+}
+
+void SendStatisticsProxy::BoolSampleCounter::Add(bool sample, int64_t count) {
+ if (sample)
+ sum += count;
+ num_samples += count;
+}
+int SendStatisticsProxy::BoolSampleCounter::Percent(
+ int64_t min_required_samples) const {
+ return Fraction(min_required_samples, 100.0f);
+}
+
+int SendStatisticsProxy::BoolSampleCounter::Permille(
+ int64_t min_required_samples) const {
+ return Fraction(min_required_samples, 1000.0f);
+}
+
+int SendStatisticsProxy::BoolSampleCounter::Fraction(
+ int64_t min_required_samples,
+ float multiplier) const {
+ if (num_samples < min_required_samples || num_samples == 0)
+ return -1;
+ return static_cast<int>((sum * multiplier / num_samples) + 0.5f);
+}
+
+SendStatisticsProxy::MaskedAdaptationCounts
+SendStatisticsProxy::Adaptations::MaskedCpuCounts() const {
+ return Mask(cpu_counts_, cpu_settings_);
+}
+
+SendStatisticsProxy::MaskedAdaptationCounts
+SendStatisticsProxy::Adaptations::MaskedQualityCounts() const {
+ return Mask(quality_counts_, quality_settings_);
+}
+
+void SendStatisticsProxy::Adaptations::set_cpu_counts(
+ const VideoAdaptationCounters& cpu_counts) {
+ cpu_counts_ = cpu_counts;
+}
+
+void SendStatisticsProxy::Adaptations::set_quality_counts(
+ const VideoAdaptationCounters& quality_counts) {
+ quality_counts_ = quality_counts;
+}
+
+VideoAdaptationCounters SendStatisticsProxy::Adaptations::cpu_counts() const {
+ return cpu_counts_;
+}
+
+VideoAdaptationCounters SendStatisticsProxy::Adaptations::quality_counts()
+ const {
+ return quality_counts_;
+}
+
+void SendStatisticsProxy::Adaptations::UpdateMaskingSettings(
+ VideoStreamEncoderObserver::AdaptationSettings cpu_settings,
+ VideoStreamEncoderObserver::AdaptationSettings quality_settings) {
+ cpu_settings_ = std::move(cpu_settings);
+ quality_settings_ = std::move(quality_settings);
+}
+
+SendStatisticsProxy::MaskedAdaptationCounts
+SendStatisticsProxy::Adaptations::Mask(
+ const VideoAdaptationCounters& counters,
+ const VideoStreamEncoderObserver::AdaptationSettings& settings) const {
+ MaskedAdaptationCounts masked_counts;
+ if (settings.resolution_scaling_enabled) {
+ masked_counts.resolution_adaptations = counters.resolution_adaptations;
+ }
+ if (settings.framerate_scaling_enabled) {
+ masked_counts.num_framerate_reductions = counters.fps_adaptations;
+ }
+ return masked_counts;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/video/send_statistics_proxy.h b/third_party/libwebrtc/video/send_statistics_proxy.h
new file mode 100644
index 0000000000..4203b1c873
--- /dev/null
+++ b/third_party/libwebrtc/video/send_statistics_proxy.h
@@ -0,0 +1,380 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VIDEO_SEND_STATISTICS_PROXY_H_
+#define VIDEO_SEND_STATISTICS_PROXY_H_
+
+#include <array>
+#include <map>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "api/field_trials_view.h"
+#include "api/video/video_codec_constants.h"
+#include "call/video_send_stream.h"
+#include "modules/include/module_common_types_public.h"
+#include "modules/rtp_rtcp/include/report_block_data.h"
+#include "modules/video_coding/include/video_codec_interface.h"
+#include "modules/video_coding/include/video_coding_defines.h"
+#include "rtc_base/numerics/exp_filter.h"
+#include "rtc_base/rate_tracker.h"
+#include "rtc_base/synchronization/mutex.h"
+#include "rtc_base/thread_annotations.h"
+#include "system_wrappers/include/clock.h"
+#include "video/config/video_encoder_config.h"
+#include "video/quality_limitation_reason_tracker.h"
+#include "video/report_block_stats.h"
+#include "video/stats_counter.h"
+#include "video/video_stream_encoder_observer.h"
+
+namespace webrtc {
+
+class SendStatisticsProxy : public VideoStreamEncoderObserver,
+ public ReportBlockDataObserver,
+ public RtcpPacketTypeCounterObserver,
+ public StreamDataCountersCallback,
+ public BitrateStatisticsObserver,
+ public FrameCountObserver,
+ public SendSideDelayObserver {
+ public:
+ static const int kStatsTimeoutMs;
+ // Number of required samples to be collected before a metric is added
+ // to a rtc histogram.
+ static const int kMinRequiredMetricsSamples = 200;
+
+ SendStatisticsProxy(Clock* clock,
+ const VideoSendStream::Config& config,
+ VideoEncoderConfig::ContentType content_type,
+ const FieldTrialsView& field_trials);
+ ~SendStatisticsProxy() override;
+
+ virtual VideoSendStream::Stats GetStats();
+
+ void OnSendEncodedImage(const EncodedImage& encoded_image,
+ const CodecSpecificInfo* codec_info) override;
+
+ void OnEncoderImplementationChanged(
+ EncoderImplementation implementation) override;
+
+ // Used to update incoming frame rate.
+ void OnIncomingFrame(int width, int height) override;
+
+ // Dropped frame stats.
+ void OnFrameDropped(DropReason) override;
+
+ // Adaptation stats.
+ void OnAdaptationChanged(
+ VideoAdaptationReason reason,
+ const VideoAdaptationCounters& cpu_counters,
+ const VideoAdaptationCounters& quality_counters) override;
+ void ClearAdaptationStats() override;
+ void UpdateAdaptationSettings(AdaptationSettings cpu_settings,
+ AdaptationSettings quality_settings) override;
+
+ void OnBitrateAllocationUpdated(
+ const VideoCodec& codec,
+ const VideoBitrateAllocation& allocation) override;
+
+ void OnEncoderInternalScalerUpdate(bool is_scaled) override;
+
+ void OnMinPixelLimitReached() override;
+ void OnInitialQualityResolutionAdaptDown() override;
+
+ void OnSuspendChange(bool is_suspended) override;
+ void OnInactiveSsrc(uint32_t ssrc);
+
+ // Used to indicate change in content type, which may require a change in
+ // how stats are collected.
+ void OnEncoderReconfigured(const VideoEncoderConfig& encoder_config,
+ const std::vector<VideoStream>& streams) override;
+
+ // Used to update the encoder target rate.
+ void OnSetEncoderTargetRate(uint32_t bitrate_bps);
+
+ // Implements CpuOveruseMetricsObserver.
+ void OnEncodedFrameTimeMeasured(int encode_time_ms,
+ int encode_usage_percent) override;
+
+ int GetInputFrameRate() const override;
+ int GetSendFrameRate() const;
+
+ protected:
+ // From ReportBlockDataObserver.
+ void OnReportBlockDataUpdated(ReportBlockData report_block_data) override;
+ // From RtcpPacketTypeCounterObserver.
+ void RtcpPacketTypesCounterUpdated(
+ uint32_t ssrc,
+ const RtcpPacketTypeCounter& packet_counter) override;
+ // From StreamDataCountersCallback.
+ void DataCountersUpdated(const StreamDataCounters& counters,
+ uint32_t ssrc) override;
+
+ // From BitrateStatisticsObserver.
+ void Notify(uint32_t total_bitrate_bps,
+ uint32_t retransmit_bitrate_bps,
+ uint32_t ssrc) override;
+
+ // From FrameCountObserver.
+ void FrameCountUpdated(const FrameCounts& frame_counts,
+ uint32_t ssrc) override;
+
+ // From SendSideDelayObserver.
+ void SendSideDelayUpdated(int avg_delay_ms,
+ int max_delay_ms,
+ uint32_t ssrc) override;
+
+ private:
+ class SampleCounter {
+ public:
+ SampleCounter() : sum(0), num_samples(0) {}
+ ~SampleCounter() {}
+ void Add(int sample);
+ int Avg(int64_t min_required_samples) const;
+
+ private:
+ int64_t sum;
+ int64_t num_samples;
+ };
+ class BoolSampleCounter {
+ public:
+ BoolSampleCounter() : sum(0), num_samples(0) {}
+ ~BoolSampleCounter() {}
+ void Add(bool sample);
+ void Add(bool sample, int64_t count);
+ int Percent(int64_t min_required_samples) const;
+ int Permille(int64_t min_required_samples) const;
+
+ private:
+ int Fraction(int64_t min_required_samples, float multiplier) const;
+ int64_t sum;
+ int64_t num_samples;
+ };
+ struct StatsUpdateTimes {
+ StatsUpdateTimes() : resolution_update_ms(0), bitrate_update_ms(0) {}
+ int64_t resolution_update_ms;
+ int64_t bitrate_update_ms;
+ };
+ struct TargetRateUpdates {
+ TargetRateUpdates()
+ : pause_resume_events(0), last_paused_or_resumed(false), last_ms(-1) {}
+ int pause_resume_events;
+ bool last_paused_or_resumed;
+ int64_t last_ms;
+ };
+ struct FallbackEncoderInfo {
+ FallbackEncoderInfo();
+ bool is_possible = true;
+ bool is_active = false;
+ int on_off_events = 0;
+ int64_t elapsed_ms = 0;
+ absl::optional<int64_t> last_update_ms;
+ const int max_frame_diff_ms = 2000;
+ };
+ struct FallbackEncoderInfoDisabled {
+ bool is_possible = true;
+ bool min_pixel_limit_reached = false;
+ };
+ struct StatsTimer {
+ void Start(int64_t now_ms);
+ void Stop(int64_t now_ms);
+ void Restart(int64_t now_ms);
+ int64_t start_ms = -1;
+ int64_t total_ms = 0;
+ };
+ struct QpCounters {
+ SampleCounter vp8; // QP range: 0-127.
+ SampleCounter vp9; // QP range: 0-255.
+ SampleCounter h264; // QP range: 0-51.
+ };
+ struct AdaptChanges {
+ int down = 0;
+ int up = 0;
+ };
+
+ // Map holding encoded frames (mapped by timestamp).
+ // If simulcast layers are encoded on different threads, there is no guarantee
+ // that one frame of all layers are encoded before the next start.
+ struct TimestampOlderThan {
+ bool operator()(uint32_t ts1, uint32_t ts2) const {
+ return IsNewerTimestamp(ts2, ts1);
+ }
+ };
+ struct Frame {
+ Frame(int64_t send_ms, uint32_t width, uint32_t height, int simulcast_idx)
+ : send_ms(send_ms),
+ max_width(width),
+ max_height(height),
+ max_simulcast_idx(simulcast_idx) {}
+ const int64_t
+ send_ms; // Time when first frame with this timestamp is sent.
+ uint32_t max_width; // Max width with this timestamp.
+ uint32_t max_height; // Max height with this timestamp.
+ int max_simulcast_idx; // Max simulcast index with this timestamp.
+ };
+ typedef std::map<uint32_t, Frame, TimestampOlderThan> EncodedFrameMap;
+
+ void PurgeOldStats() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+ VideoSendStream::StreamStats* GetStatsEntry(uint32_t ssrc)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+
+ struct MaskedAdaptationCounts {
+ absl::optional<int> resolution_adaptations = absl::nullopt;
+ absl::optional<int> num_framerate_reductions = absl::nullopt;
+ };
+
+ struct Adaptations {
+ public:
+ MaskedAdaptationCounts MaskedCpuCounts() const;
+ MaskedAdaptationCounts MaskedQualityCounts() const;
+
+ void set_cpu_counts(const VideoAdaptationCounters& cpu_counts);
+ void set_quality_counts(const VideoAdaptationCounters& quality_counts);
+
+ VideoAdaptationCounters cpu_counts() const;
+ VideoAdaptationCounters quality_counts() const;
+
+ void UpdateMaskingSettings(AdaptationSettings cpu_settings,
+ AdaptationSettings quality_settings);
+
+ private:
+ VideoAdaptationCounters cpu_counts_;
+ AdaptationSettings cpu_settings_;
+ VideoAdaptationCounters quality_counts_;
+ AdaptationSettings quality_settings_;
+
+ MaskedAdaptationCounts Mask(const VideoAdaptationCounters& counters,
+ const AdaptationSettings& settings) const;
+ };
+
+ void SetAdaptTimer(const MaskedAdaptationCounts& counts, StatsTimer* timer)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+ void UpdateAdaptationStats() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+ void TryUpdateInitialQualityResolutionAdaptUp(
+ absl::optional<int> old_quality_downscales,
+ absl::optional<int> updated_quality_downscales)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+
+ void UpdateEncoderFallbackStats(const CodecSpecificInfo* codec_info,
+ int pixels,
+ int simulcast_index)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+ void UpdateFallbackDisabledStats(const CodecSpecificInfo* codec_info,
+ int pixels,
+ int simulcast_index)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+
+ Clock* const clock_;
+ const std::string payload_name_;
+ const RtpConfig rtp_config_;
+ const absl::optional<int> fallback_max_pixels_;
+ const absl::optional<int> fallback_max_pixels_disabled_;
+ mutable Mutex mutex_;
+ VideoEncoderConfig::ContentType content_type_ RTC_GUARDED_BY(mutex_);
+ const int64_t start_ms_;
+ VideoSendStream::Stats stats_ RTC_GUARDED_BY(mutex_);
+ std::map<uint32_t, StatsUpdateTimes> update_times_ RTC_GUARDED_BY(mutex_);
+ rtc::ExpFilter encode_time_ RTC_GUARDED_BY(mutex_);
+ QualityLimitationReasonTracker quality_limitation_reason_tracker_
+ RTC_GUARDED_BY(mutex_);
+ rtc::RateTracker media_byte_rate_tracker_ RTC_GUARDED_BY(mutex_);
+ rtc::RateTracker encoded_frame_rate_tracker_ RTC_GUARDED_BY(mutex_);
+ // Rate trackers mapped by ssrc.
+ std::map<uint32_t, std::unique_ptr<rtc::RateTracker>>
+ encoded_frame_rate_trackers_ RTC_GUARDED_BY(mutex_);
+
+ absl::optional<int64_t> last_outlier_timestamp_ RTC_GUARDED_BY(mutex_);
+
+ int last_num_spatial_layers_ RTC_GUARDED_BY(mutex_);
+ int last_num_simulcast_streams_ RTC_GUARDED_BY(mutex_);
+ std::array<bool, kMaxSpatialLayers> last_spatial_layer_use_
+ RTC_GUARDED_BY(mutex_);
+ // Indicates if the latest bitrate allocation had layers disabled by low
+ // available bandwidth.
+ bool bw_limited_layers_ RTC_GUARDED_BY(mutex_);
+ // Indicastes if the encoder internally downscales input image.
+ bool internal_encoder_scaler_ RTC_GUARDED_BY(mutex_);
+ Adaptations adaptation_limitations_ RTC_GUARDED_BY(mutex_);
+
+ struct EncoderChangeEvent {
+ std::string previous_encoder_implementation;
+ std::string new_encoder_implementation;
+ };
+ // Stores the last change in encoder implementation in an optional, so that
+ // the event can be consumed.
+ absl::optional<EncoderChangeEvent> encoder_changed_;
+
+ // Contains stats used for UMA histograms. These stats will be reset if
+ // content type changes between real-time video and screenshare, since these
+ // will be reported separately.
+ struct UmaSamplesContainer {
+ UmaSamplesContainer(const char* prefix,
+ const VideoSendStream::Stats& start_stats,
+ Clock* clock);
+ ~UmaSamplesContainer();
+
+ void UpdateHistograms(const RtpConfig& rtp_config,
+ const VideoSendStream::Stats& current_stats);
+
+ void InitializeBitrateCounters(const VideoSendStream::Stats& stats);
+
+ bool InsertEncodedFrame(const EncodedImage& encoded_frame,
+ int simulcast_idx);
+ void RemoveOld(int64_t now_ms);
+
+ const std::string uma_prefix_;
+ Clock* const clock_;
+ SampleCounter input_width_counter_;
+ SampleCounter input_height_counter_;
+ SampleCounter sent_width_counter_;
+ SampleCounter sent_height_counter_;
+ SampleCounter encode_time_counter_;
+ BoolSampleCounter key_frame_counter_;
+ BoolSampleCounter quality_limited_frame_counter_;
+ SampleCounter quality_downscales_counter_;
+ BoolSampleCounter cpu_limited_frame_counter_;
+ BoolSampleCounter bw_limited_frame_counter_;
+ SampleCounter bw_resolutions_disabled_counter_;
+ SampleCounter delay_counter_;
+ SampleCounter max_delay_counter_;
+ rtc::RateTracker input_frame_rate_tracker_;
+ RateCounter input_fps_counter_;
+ RateCounter sent_fps_counter_;
+ RateAccCounter total_byte_counter_;
+ RateAccCounter media_byte_counter_;
+ RateAccCounter rtx_byte_counter_;
+ RateAccCounter padding_byte_counter_;
+ RateAccCounter retransmit_byte_counter_;
+ RateAccCounter fec_byte_counter_;
+ int64_t first_rtcp_stats_time_ms_;
+ int64_t first_rtp_stats_time_ms_;
+ StatsTimer cpu_adapt_timer_;
+ StatsTimer quality_adapt_timer_;
+ BoolSampleCounter paused_time_counter_;
+ TargetRateUpdates target_rate_updates_;
+ BoolSampleCounter fallback_active_counter_;
+ FallbackEncoderInfo fallback_info_;
+ FallbackEncoderInfoDisabled fallback_info_disabled_;
+ ReportBlockStats report_block_stats_;
+ const VideoSendStream::Stats start_stats_;
+ size_t num_streams_; // Number of configured streams to encoder.
+ size_t num_pixels_highest_stream_;
+ EncodedFrameMap encoded_frames_;
+ AdaptChanges initial_quality_changes_;
+
+ std::map<int, QpCounters>
+ qp_counters_; // QP counters mapped by spatial idx.
+ };
+
+ std::unique_ptr<UmaSamplesContainer> uma_container_ RTC_GUARDED_BY(mutex_);
+};
+
+} // namespace webrtc
+#endif // VIDEO_SEND_STATISTICS_PROXY_H_
diff --git a/third_party/libwebrtc/video/send_statistics_proxy_unittest.cc b/third_party/libwebrtc/video/send_statistics_proxy_unittest.cc
new file mode 100644
index 0000000000..af3b0208e2
--- /dev/null
+++ b/third_party/libwebrtc/video/send_statistics_proxy_unittest.cc
@@ -0,0 +1,3123 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video/send_statistics_proxy.h"
+
+#include <algorithm>
+#include <map>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "absl/algorithm/container.h"
+#include "api/units/timestamp.h"
+#include "api/video/video_adaptation_reason.h"
+#include "api/video/video_bitrate_allocation.h"
+#include "api/video/video_codec_type.h"
+#include "api/video_codecs/scalability_mode.h"
+#include "api/video_codecs/video_codec.h"
+#include "rtc_base/fake_clock.h"
+#include "system_wrappers/include/metrics.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+#include "test/scoped_key_value_config.h"
+#include "video/config/video_encoder_config.h"
+#include "video/video_stream_encoder_observer.h"
+
+namespace webrtc {
+namespace {
+
+using ::testing::Optional;
+
+const uint32_t kFirstSsrc = 17;
+const uint32_t kSecondSsrc = 42;
+const uint32_t kFirstRtxSsrc = 18;
+const uint32_t kSecondRtxSsrc = 43;
+const uint32_t kFlexFecSsrc = 55;
+const int kFpsPeriodicIntervalMs = 2000;
+const int kWidth = 640;
+const int kHeight = 480;
+const int kQpIdx0 = 21;
+const int kQpIdx1 = 39;
+const int kRtpClockRateHz = 90000;
+const CodecSpecificInfo kDefaultCodecInfo = []() {
+ CodecSpecificInfo codec_info;
+ codec_info.codecType = kVideoCodecVP8;
+ return codec_info;
+}();
+
+const VideoStreamEncoderObserver::AdaptationSettings kScalingEnabled(true,
+ true);
+const VideoStreamEncoderObserver::AdaptationSettings kFramerateScalingDisabled(
+ true,
+ false);
+const VideoStreamEncoderObserver::AdaptationSettings kResolutionScalingDisabled(
+ false,
+ true);
+const VideoStreamEncoderObserver::AdaptationSettings kScalingDisabled;
+} // namespace
+
+class SendStatisticsProxyTest : public ::testing::Test {
+ public:
+ SendStatisticsProxyTest() : SendStatisticsProxyTest("") {}
+ explicit SendStatisticsProxyTest(const std::string& field_trials)
+ : override_field_trials_(field_trials),
+ fake_clock_(1234),
+ config_(GetTestConfig()) {}
+ virtual ~SendStatisticsProxyTest() {}
+
+ protected:
+ virtual void SetUp() {
+ metrics::Reset();
+ statistics_proxy_.reset(
+ new SendStatisticsProxy(&fake_clock_, GetTestConfig(),
+ VideoEncoderConfig::ContentType::kRealtimeVideo,
+ override_field_trials_));
+ expected_ = VideoSendStream::Stats();
+ for (const auto& ssrc : config_.rtp.ssrcs) {
+ expected_.substreams[ssrc].type =
+ VideoSendStream::StreamStats::StreamType::kMedia;
+ }
+ for (size_t i = 0; i < config_.rtp.rtx.ssrcs.size(); ++i) {
+ uint32_t ssrc = config_.rtp.rtx.ssrcs[i];
+ expected_.substreams[ssrc].type =
+ VideoSendStream::StreamStats::StreamType::kRtx;
+ expected_.substreams[ssrc].referenced_media_ssrc = config_.rtp.ssrcs[i];
+ }
+ }
+
+ VideoSendStream::Config GetTestConfig() {
+ VideoSendStream::Config config(nullptr);
+ config.rtp.ssrcs.push_back(kFirstSsrc);
+ config.rtp.ssrcs.push_back(kSecondSsrc);
+ config.rtp.rtx.ssrcs.push_back(kFirstRtxSsrc);
+ config.rtp.rtx.ssrcs.push_back(kSecondRtxSsrc);
+ config.rtp.ulpfec.red_payload_type = 17;
+ return config;
+ }
+
+ VideoSendStream::Config GetTestConfigWithFlexFec() {
+ VideoSendStream::Config config(nullptr);
+ config.rtp.ssrcs.push_back(kFirstSsrc);
+ config.rtp.ssrcs.push_back(kSecondSsrc);
+ config.rtp.rtx.ssrcs.push_back(kFirstRtxSsrc);
+ config.rtp.rtx.ssrcs.push_back(kSecondRtxSsrc);
+ config.rtp.flexfec.payload_type = 50;
+ config.rtp.flexfec.ssrc = kFlexFecSsrc;
+ config.rtp.flexfec.protected_media_ssrcs = {kFirstSsrc};
+ return config;
+ }
+
+ VideoSendStream::StreamStats GetStreamStats(uint32_t ssrc) {
+ VideoSendStream::Stats stats = statistics_proxy_->GetStats();
+ std::map<uint32_t, VideoSendStream::StreamStats>::iterator it =
+ stats.substreams.find(ssrc);
+ EXPECT_NE(it, stats.substreams.end());
+ return it->second;
+ }
+
+ void UpdateDataCounters(uint32_t ssrc) {
+ StreamDataCountersCallback* proxy =
+ static_cast<StreamDataCountersCallback*>(statistics_proxy_.get());
+ StreamDataCounters counters;
+ proxy->DataCountersUpdated(counters, ssrc);
+ }
+
+ void ExpectEqual(VideoSendStream::Stats one, VideoSendStream::Stats other) {
+ EXPECT_EQ(one.frames, other.frames);
+ EXPECT_EQ(one.input_frame_rate, other.input_frame_rate);
+ EXPECT_EQ(one.encode_frame_rate, other.encode_frame_rate);
+ EXPECT_EQ(one.media_bitrate_bps, other.media_bitrate_bps);
+ EXPECT_EQ(one.suspended, other.suspended);
+
+ EXPECT_EQ(one.substreams.size(), other.substreams.size());
+ for (std::map<uint32_t, VideoSendStream::StreamStats>::const_iterator it =
+ one.substreams.begin();
+ it != one.substreams.end(); ++it) {
+ std::map<uint32_t, VideoSendStream::StreamStats>::const_iterator
+ corresponding_it = other.substreams.find(it->first);
+ ASSERT_TRUE(corresponding_it != other.substreams.end());
+ const VideoSendStream::StreamStats& a = it->second;
+ const VideoSendStream::StreamStats& b = corresponding_it->second;
+
+ EXPECT_EQ(a.type, b.type);
+ EXPECT_EQ(a.frame_counts.key_frames, b.frame_counts.key_frames);
+ EXPECT_EQ(a.frame_counts.delta_frames, b.frame_counts.delta_frames);
+ EXPECT_EQ(a.total_bitrate_bps, b.total_bitrate_bps);
+ EXPECT_EQ(a.avg_delay_ms, b.avg_delay_ms);
+ EXPECT_EQ(a.max_delay_ms, b.max_delay_ms);
+
+ EXPECT_EQ(a.rtp_stats.transmitted.payload_bytes,
+ b.rtp_stats.transmitted.payload_bytes);
+ EXPECT_EQ(a.rtp_stats.transmitted.header_bytes,
+ b.rtp_stats.transmitted.header_bytes);
+ EXPECT_EQ(a.rtp_stats.transmitted.padding_bytes,
+ b.rtp_stats.transmitted.padding_bytes);
+ EXPECT_EQ(a.rtp_stats.transmitted.packets,
+ b.rtp_stats.transmitted.packets);
+ EXPECT_EQ(a.rtp_stats.retransmitted.packets,
+ b.rtp_stats.retransmitted.packets);
+ EXPECT_EQ(a.rtp_stats.fec.packets, b.rtp_stats.fec.packets);
+
+ EXPECT_EQ(a.report_block_data.has_value(),
+ b.report_block_data.has_value());
+ if (a.report_block_data.has_value()) {
+ const RTCPReportBlock& a_rtcp_stats =
+ a.report_block_data->report_block();
+ const RTCPReportBlock& b_rtcp_stats =
+ b.report_block_data->report_block();
+ EXPECT_EQ(a_rtcp_stats.fraction_lost, b_rtcp_stats.fraction_lost);
+ EXPECT_EQ(a_rtcp_stats.packets_lost, b_rtcp_stats.packets_lost);
+ EXPECT_EQ(a_rtcp_stats.extended_highest_sequence_number,
+ b_rtcp_stats.extended_highest_sequence_number);
+ EXPECT_EQ(a_rtcp_stats.jitter, b_rtcp_stats.jitter);
+ }
+ }
+ }
+
+ test::ScopedKeyValueConfig override_field_trials_;
+ SimulatedClock fake_clock_;
+ std::unique_ptr<SendStatisticsProxy> statistics_proxy_;
+ VideoSendStream::Config config_;
+ VideoSendStream::Stats expected_;
+};
+
+TEST_F(SendStatisticsProxyTest, ReportBlockDataObserver) {
+ ReportBlockDataObserver* callback = statistics_proxy_.get();
+ for (uint32_t ssrc : config_.rtp.ssrcs) {
+ // Add statistics with some arbitrary, but unique, numbers.
+ uint32_t offset = ssrc * 4;
+ RTCPReportBlock report_block;
+ report_block.source_ssrc = ssrc;
+ report_block.packets_lost = offset;
+ report_block.extended_highest_sequence_number = offset + 1;
+ report_block.fraction_lost = offset + 2;
+ report_block.jitter = offset + 3;
+ ReportBlockData data;
+ data.SetReportBlock(report_block, 0);
+ expected_.substreams[ssrc].report_block_data = data;
+
+ callback->OnReportBlockDataUpdated(data);
+ }
+ for (uint32_t ssrc : config_.rtp.rtx.ssrcs) {
+ // Add statistics with some arbitrary, but unique, numbers.
+ uint32_t offset = ssrc * 4;
+ RTCPReportBlock report_block;
+ report_block.source_ssrc = ssrc;
+ report_block.packets_lost = offset;
+ report_block.extended_highest_sequence_number = offset + 1;
+ report_block.fraction_lost = offset + 2;
+ report_block.jitter = offset + 3;
+ ReportBlockData data;
+ data.SetReportBlock(report_block, 0);
+ expected_.substreams[ssrc].report_block_data = data;
+
+ callback->OnReportBlockDataUpdated(data);
+ }
+ VideoSendStream::Stats stats = statistics_proxy_->GetStats();
+ ExpectEqual(expected_, stats);
+}
+
+TEST_F(SendStatisticsProxyTest, Suspended) {
+ // Verify that the value is false by default.
+ EXPECT_FALSE(statistics_proxy_->GetStats().suspended);
+
+ // Verify that we can set it to true.
+ statistics_proxy_->OnSuspendChange(true);
+ EXPECT_TRUE(statistics_proxy_->GetStats().suspended);
+
+ // Verify that we can set it back to false again.
+ statistics_proxy_->OnSuspendChange(false);
+ EXPECT_FALSE(statistics_proxy_->GetStats().suspended);
+}
+
+TEST_F(SendStatisticsProxyTest, FrameCounts) {
+ FrameCountObserver* observer = statistics_proxy_.get();
+ for (const auto& ssrc : config_.rtp.ssrcs) {
+ // Add statistics with some arbitrary, but unique, numbers.
+ VideoSendStream::StreamStats& stats = expected_.substreams[ssrc];
+ uint32_t offset = ssrc * sizeof(VideoSendStream::StreamStats);
+ FrameCounts frame_counts;
+ frame_counts.key_frames = offset;
+ frame_counts.delta_frames = offset + 1;
+ stats.frame_counts = frame_counts;
+ observer->FrameCountUpdated(frame_counts, ssrc);
+ }
+ for (const auto& ssrc : config_.rtp.rtx.ssrcs) {
+ // Add statistics with some arbitrary, but unique, numbers.
+ VideoSendStream::StreamStats& stats = expected_.substreams[ssrc];
+ uint32_t offset = ssrc * sizeof(VideoSendStream::StreamStats);
+ FrameCounts frame_counts;
+ frame_counts.key_frames = offset;
+ frame_counts.delta_frames = offset + 1;
+ stats.frame_counts = frame_counts;
+ observer->FrameCountUpdated(frame_counts, ssrc);
+ }
+
+ VideoSendStream::Stats stats = statistics_proxy_->GetStats();
+ ExpectEqual(expected_, stats);
+}
+
+TEST_F(SendStatisticsProxyTest, DataCounters) {
+ StreamDataCountersCallback* callback = statistics_proxy_.get();
+ for (const auto& ssrc : config_.rtp.ssrcs) {
+ StreamDataCounters& counters = expected_.substreams[ssrc].rtp_stats;
+ // Add statistics with some arbitrary, but unique, numbers.
+ size_t offset = ssrc * sizeof(StreamDataCounters);
+ uint32_t offset_uint32 = static_cast<uint32_t>(offset);
+ counters.transmitted.payload_bytes = offset;
+ counters.transmitted.header_bytes = offset + 1;
+ counters.fec.packets = offset_uint32 + 2;
+ counters.transmitted.padding_bytes = offset + 3;
+ counters.retransmitted.packets = offset_uint32 + 4;
+ counters.transmitted.packets = offset_uint32 + 5;
+ callback->DataCountersUpdated(counters, ssrc);
+ }
+ for (const auto& ssrc : config_.rtp.rtx.ssrcs) {
+ StreamDataCounters& counters = expected_.substreams[ssrc].rtp_stats;
+ // Add statistics with some arbitrary, but unique, numbers.
+ size_t offset = ssrc * sizeof(StreamDataCounters);
+ uint32_t offset_uint32 = static_cast<uint32_t>(offset);
+ counters.transmitted.payload_bytes = offset;
+ counters.transmitted.header_bytes = offset + 1;
+ counters.fec.packets = offset_uint32 + 2;
+ counters.transmitted.padding_bytes = offset + 3;
+ counters.retransmitted.packets = offset_uint32 + 4;
+ counters.transmitted.packets = offset_uint32 + 5;
+ callback->DataCountersUpdated(counters, ssrc);
+ }
+
+ VideoSendStream::Stats stats = statistics_proxy_->GetStats();
+ ExpectEqual(expected_, stats);
+}
+
+TEST_F(SendStatisticsProxyTest, Bitrate) {
+ BitrateStatisticsObserver* observer = statistics_proxy_.get();
+ for (const auto& ssrc : config_.rtp.ssrcs) {
+ // Use ssrc as bitrate_bps to get a unique value for each stream.
+ uint32_t total = ssrc;
+ uint32_t retransmit = ssrc + 1;
+ observer->Notify(total, retransmit, ssrc);
+ expected_.substreams[ssrc].total_bitrate_bps = total;
+ expected_.substreams[ssrc].retransmit_bitrate_bps = retransmit;
+ }
+ for (const auto& ssrc : config_.rtp.rtx.ssrcs) {
+ // Use ssrc as bitrate_bps to get a unique value for each stream.
+ uint32_t total = ssrc;
+ uint32_t retransmit = ssrc + 1;
+ observer->Notify(total, retransmit, ssrc);
+ expected_.substreams[ssrc].total_bitrate_bps = total;
+ expected_.substreams[ssrc].retransmit_bitrate_bps = retransmit;
+ }
+
+ VideoSendStream::Stats stats = statistics_proxy_->GetStats();
+ ExpectEqual(expected_, stats);
+}
+
+TEST_F(SendStatisticsProxyTest, SendSideDelay) {
+ SendSideDelayObserver* observer = statistics_proxy_.get();
+ for (const auto& ssrc : config_.rtp.ssrcs) {
+ // Use ssrc as avg_delay_ms and max_delay_ms to get a unique value for each
+ // stream.
+ int avg_delay_ms = ssrc;
+ int max_delay_ms = ssrc + 1;
+ observer->SendSideDelayUpdated(avg_delay_ms, max_delay_ms, ssrc);
+ expected_.substreams[ssrc].avg_delay_ms = avg_delay_ms;
+ expected_.substreams[ssrc].max_delay_ms = max_delay_ms;
+ }
+ for (const auto& ssrc : config_.rtp.rtx.ssrcs) {
+ // Use ssrc as avg_delay_ms and max_delay_ms to get a unique value for each
+ // stream.
+ int avg_delay_ms = ssrc;
+ int max_delay_ms = ssrc + 1;
+ observer->SendSideDelayUpdated(avg_delay_ms, max_delay_ms, ssrc);
+ expected_.substreams[ssrc].avg_delay_ms = avg_delay_ms;
+ expected_.substreams[ssrc].max_delay_ms = max_delay_ms;
+ }
+ VideoSendStream::Stats stats = statistics_proxy_->GetStats();
+ ExpectEqual(expected_, stats);
+}
+
+TEST_F(SendStatisticsProxyTest, OnEncodedFrameTimeMeasured) {
+ const int kEncodeTimeMs = 11;
+ int encode_usage_percent = 80;
+ statistics_proxy_->OnEncodedFrameTimeMeasured(kEncodeTimeMs,
+ encode_usage_percent);
+
+ VideoSendStream::Stats stats = statistics_proxy_->GetStats();
+ EXPECT_EQ(kEncodeTimeMs, stats.avg_encode_time_ms);
+ EXPECT_EQ(encode_usage_percent, stats.encode_usage_percent);
+}
+
+TEST_F(SendStatisticsProxyTest, TotalEncodeTimeIncreasesPerFrameMeasured) {
+ const int kEncodeUsagePercent = 0; // Don't care for this test.
+ EXPECT_EQ(0u, statistics_proxy_->GetStats().total_encode_time_ms);
+ statistics_proxy_->OnEncodedFrameTimeMeasured(10, kEncodeUsagePercent);
+ EXPECT_EQ(10u, statistics_proxy_->GetStats().total_encode_time_ms);
+ statistics_proxy_->OnEncodedFrameTimeMeasured(20, kEncodeUsagePercent);
+ EXPECT_EQ(30u, statistics_proxy_->GetStats().total_encode_time_ms);
+}
+
+TEST_F(SendStatisticsProxyTest, OnSendEncodedImageIncreasesFramesEncoded) {
+ EncodedImage encoded_image;
+ CodecSpecificInfo codec_info;
+ EXPECT_EQ(0u, statistics_proxy_->GetStats().frames_encoded);
+ for (uint32_t i = 1; i <= 3; ++i) {
+ statistics_proxy_->OnSendEncodedImage(encoded_image, &codec_info);
+ EXPECT_EQ(i, statistics_proxy_->GetStats().frames_encoded);
+ }
+}
+
+TEST_F(SendStatisticsProxyTest, OnSendEncodedImageIncreasesQpSum) {
+ EncodedImage encoded_image;
+ CodecSpecificInfo codec_info;
+ auto ssrc = config_.rtp.ssrcs[0];
+ EXPECT_EQ(absl::nullopt,
+ statistics_proxy_->GetStats().substreams[ssrc].qp_sum);
+ encoded_image.qp_ = 3;
+ statistics_proxy_->OnSendEncodedImage(encoded_image, &codec_info);
+ EXPECT_EQ(3u, statistics_proxy_->GetStats().substreams[ssrc].qp_sum);
+ encoded_image.qp_ = 127;
+ statistics_proxy_->OnSendEncodedImage(encoded_image, &codec_info);
+ EXPECT_EQ(130u, statistics_proxy_->GetStats().substreams[ssrc].qp_sum);
+}
+
+TEST_F(SendStatisticsProxyTest, OnSendEncodedImageWithoutQpQpSumWontExist) {
+ EncodedImage encoded_image;
+ CodecSpecificInfo codec_info;
+ auto ssrc = config_.rtp.ssrcs[0];
+ encoded_image.qp_ = -1;
+ EXPECT_EQ(absl::nullopt,
+ statistics_proxy_->GetStats().substreams[ssrc].qp_sum);
+ statistics_proxy_->OnSendEncodedImage(encoded_image, &codec_info);
+ EXPECT_EQ(absl::nullopt,
+ statistics_proxy_->GetStats().substreams[ssrc].qp_sum);
+}
+
+TEST_F(SendStatisticsProxyTest,
+ OnSendEncodedImageSetsScalabilityModeOfCurrentLayer) {
+ EncodedImage encoded_image;
+ CodecSpecificInfo codec_info;
+ ScalabilityMode layer0_mode = ScalabilityMode::kL1T1;
+ ScalabilityMode layer1_mode = ScalabilityMode::kL1T3;
+ auto ssrc0 = config_.rtp.ssrcs[0];
+ auto ssrc1 = config_.rtp.ssrcs[1];
+ EXPECT_EQ(absl::nullopt,
+ statistics_proxy_->GetStats().substreams[ssrc0].scalability_mode);
+ EXPECT_EQ(absl::nullopt,
+ statistics_proxy_->GetStats().substreams[ssrc1].scalability_mode);
+ encoded_image.SetSpatialIndex(0);
+ codec_info.scalability_mode = layer0_mode;
+ statistics_proxy_->OnSendEncodedImage(encoded_image, &codec_info);
+ EXPECT_THAT(statistics_proxy_->GetStats().substreams[ssrc0].scalability_mode,
+ layer0_mode);
+ EXPECT_EQ(absl::nullopt,
+ statistics_proxy_->GetStats().substreams[ssrc1].scalability_mode);
+ encoded_image.SetSpatialIndex(1);
+ codec_info.scalability_mode = layer1_mode;
+ statistics_proxy_->OnSendEncodedImage(encoded_image, &codec_info);
+ EXPECT_THAT(statistics_proxy_->GetStats().substreams[ssrc0].scalability_mode,
+ layer0_mode);
+ EXPECT_THAT(statistics_proxy_->GetStats().substreams[ssrc1].scalability_mode,
+ layer1_mode);
+}
+
+TEST_F(SendStatisticsProxyTest, TotalEncodedBytesTargetFirstFrame) {
+ const uint32_t kTargetBytesPerSecond = 100000;
+ statistics_proxy_->OnSetEncoderTargetRate(kTargetBytesPerSecond * 8);
+ EXPECT_EQ(0u, statistics_proxy_->GetStats().total_encoded_bytes_target);
+
+ EncodedImage encoded_image;
+ statistics_proxy_->OnSendEncodedImage(encoded_image, nullptr);
+ // On the first frame we don't know the frame rate yet, calculation yields
+ // zero. Our estimate assumes at least 1 FPS, so we expect the frame size to
+ // increment by a full `kTargetBytesPerSecond`.
+ EXPECT_EQ(kTargetBytesPerSecond,
+ statistics_proxy_->GetStats().total_encoded_bytes_target);
+}
+
+TEST_F(SendStatisticsProxyTest,
+ TotalEncodedBytesTargetIncrementsBasedOnFrameRate) {
+ const uint32_t kTargetBytesPerSecond = 100000;
+ const int kInterframeDelayMs = 100;
+
+ // SendStatisticsProxy uses a RateTracker internally. SendStatisticsProxy uses
+ // `fake_clock_` for testing, but the RateTracker relies on a global clock.
+ // This test relies on rtc::ScopedFakeClock to synchronize these two clocks.
+ // TODO(https://crbug.com/webrtc/10640): When the RateTracker uses a Clock
+ // this test can stop relying on rtc::ScopedFakeClock.
+ rtc::ScopedFakeClock fake_global_clock;
+ fake_global_clock.SetTime(
+ Timestamp::Millis(fake_clock_.TimeInMilliseconds()));
+
+ statistics_proxy_->OnSetEncoderTargetRate(kTargetBytesPerSecond * 8);
+ EncodedImage encoded_image;
+
+ // First frame
+ statistics_proxy_->OnSendEncodedImage(encoded_image, nullptr);
+ uint64_t first_total_encoded_bytes_target =
+ statistics_proxy_->GetStats().total_encoded_bytes_target;
+ // Second frame
+ fake_clock_.AdvanceTimeMilliseconds(kInterframeDelayMs);
+ fake_global_clock.SetTime(
+ Timestamp::Millis(fake_clock_.TimeInMilliseconds()));
+ encoded_image.SetTimestamp(encoded_image.Timestamp() +
+ 90 * kInterframeDelayMs);
+ statistics_proxy_->OnSendEncodedImage(encoded_image, nullptr);
+
+ auto stats = statistics_proxy_->GetStats();
+ // By the time the second frame arrives, one frame has previously arrived
+ // during a `kInterframeDelayMs` interval. The estimated encode frame rate at
+ // the second frame's arrival should be 10 FPS.
+ uint64_t delta_encoded_bytes_target =
+ stats.total_encoded_bytes_target - first_total_encoded_bytes_target;
+ EXPECT_EQ(kTargetBytesPerSecond / 10, delta_encoded_bytes_target);
+}
+
+TEST_F(SendStatisticsProxyTest, EncodeFrameRateInSubStream) {
+ const int kInterframeDelayMs = 100;
+ const auto ssrc = config_.rtp.ssrcs[0];
+ rtc::ScopedFakeClock fake_global_clock;
+ fake_global_clock.SetTime(
+ Timestamp::Millis(fake_clock_.TimeInMilliseconds()));
+
+ // First frame
+ EncodedImage encoded_image;
+ statistics_proxy_->OnSendEncodedImage(encoded_image, nullptr);
+ fake_clock_.AdvanceTimeMilliseconds(kInterframeDelayMs);
+ fake_global_clock.SetTime(
+ Timestamp::Millis(fake_clock_.TimeInMilliseconds()));
+ // Second frame
+ encoded_image.SetTimestamp(encoded_image.Timestamp() +
+ 90 * kInterframeDelayMs);
+ statistics_proxy_->OnSendEncodedImage(encoded_image, nullptr);
+ fake_clock_.AdvanceTimeMilliseconds(kInterframeDelayMs);
+ fake_global_clock.SetTime(
+ Timestamp::Millis(fake_clock_.TimeInMilliseconds()));
+
+ auto stats = statistics_proxy_->GetStats();
+ EXPECT_EQ(stats.substreams[ssrc].encode_frame_rate, 10);
+}
+
+TEST_F(SendStatisticsProxyTest, EncodeFrameRateInSubStreamsVp8Simulcast) {
+ const int kInterframeDelayMs = 100;
+ rtc::ScopedFakeClock fake_global_clock;
+ fake_global_clock.SetTime(
+ Timestamp::Millis(fake_clock_.TimeInMilliseconds()));
+ EncodedImage encoded_image;
+ CodecSpecificInfo codec_info;
+ codec_info.codecType = kVideoCodecVP8;
+
+ for (int i = 0; i < 10; ++i) {
+ encoded_image.SetTimestamp(encoded_image.Timestamp() +
+ 90 * kInterframeDelayMs);
+ encoded_image.SetSpatialIndex(0);
+ statistics_proxy_->OnSendEncodedImage(encoded_image, &codec_info);
+ encoded_image.SetSpatialIndex(1);
+ statistics_proxy_->OnSendEncodedImage(encoded_image, &codec_info);
+ fake_clock_.AdvanceTimeMilliseconds(kInterframeDelayMs);
+ fake_global_clock.SetTime(
+ Timestamp::Millis(fake_clock_.TimeInMilliseconds()));
+ }
+
+ VideoSendStream::Stats stats = statistics_proxy_->GetStats();
+ EXPECT_EQ(2u, stats.substreams.size());
+ EXPECT_EQ(stats.substreams[config_.rtp.ssrcs[0]].encode_frame_rate, 10);
+ EXPECT_EQ(stats.substreams[config_.rtp.ssrcs[1]].encode_frame_rate, 10);
+
+ // Stop encoding second stream, expect framerate to be zero.
+ for (int i = 0; i < 10; ++i) {
+ encoded_image.SetTimestamp(encoded_image.Timestamp() +
+ 90 * kInterframeDelayMs);
+ encoded_image.SetSpatialIndex(0);
+ statistics_proxy_->OnSendEncodedImage(encoded_image, &codec_info);
+ fake_clock_.AdvanceTimeMilliseconds(kInterframeDelayMs);
+ fake_global_clock.SetTime(
+ Timestamp::Millis(fake_clock_.TimeInMilliseconds()));
+ }
+
+ stats = statistics_proxy_->GetStats();
+ EXPECT_EQ(2u, stats.substreams.size());
+ EXPECT_EQ(stats.substreams[config_.rtp.ssrcs[0]].encode_frame_rate, 10);
+ EXPECT_EQ(stats.substreams[config_.rtp.ssrcs[1]].encode_frame_rate, 0);
+
+ // Start encoding second stream.
+ for (int i = 0; i < 10; ++i) {
+ encoded_image.SetTimestamp(encoded_image.Timestamp() +
+ 90 * kInterframeDelayMs);
+ encoded_image.SetSpatialIndex(0);
+ statistics_proxy_->OnSendEncodedImage(encoded_image, &codec_info);
+ encoded_image.SetSpatialIndex(1);
+ statistics_proxy_->OnSendEncodedImage(encoded_image, &codec_info);
+ fake_clock_.AdvanceTimeMilliseconds(kInterframeDelayMs);
+ fake_global_clock.SetTime(
+ Timestamp::Millis(fake_clock_.TimeInMilliseconds()));
+ }
+
+ stats = statistics_proxy_->GetStats();
+ EXPECT_EQ(2u, stats.substreams.size());
+ EXPECT_EQ(stats.substreams[config_.rtp.ssrcs[0]].encode_frame_rate, 10);
+ EXPECT_EQ(stats.substreams[config_.rtp.ssrcs[1]].encode_frame_rate, 10);
+}
+
+TEST_F(SendStatisticsProxyTest, EncodeFrameRateInSubStreamsVp9Svc) {
+ const int kInterframeDelayMs = 100;
+ rtc::ScopedFakeClock fake_global_clock;
+ fake_global_clock.SetTime(
+ Timestamp::Millis(fake_clock_.TimeInMilliseconds()));
+ EncodedImage encoded_image;
+ CodecSpecificInfo codec_info;
+ codec_info.codecType = kVideoCodecVP9;
+
+ for (int i = 0; i < 10; ++i) {
+ encoded_image.SetTimestamp(encoded_image.Timestamp() +
+ 90 * kInterframeDelayMs);
+ encoded_image.SetSpatialIndex(0);
+ codec_info.end_of_picture = false;
+ statistics_proxy_->OnSendEncodedImage(encoded_image, &codec_info);
+ encoded_image.SetSpatialIndex(1);
+ codec_info.end_of_picture = true;
+ statistics_proxy_->OnSendEncodedImage(encoded_image, &codec_info);
+ fake_clock_.AdvanceTimeMilliseconds(kInterframeDelayMs);
+ fake_global_clock.SetTime(
+ Timestamp::Millis(fake_clock_.TimeInMilliseconds()));
+ }
+
+ VideoSendStream::Stats stats = statistics_proxy_->GetStats();
+ EXPECT_EQ(1u, stats.substreams.size());
+ EXPECT_EQ(stats.substreams[config_.rtp.ssrcs[0]].encode_frame_rate, 10);
+}
+
+TEST_F(SendStatisticsProxyTest, GetCpuAdaptationStats) {
+ VideoAdaptationCounters cpu_counts;
+ VideoAdaptationCounters quality_counts;
+ EXPECT_FALSE(statistics_proxy_->GetStats().cpu_limited_framerate);
+ EXPECT_FALSE(statistics_proxy_->GetStats().cpu_limited_resolution);
+ cpu_counts.fps_adaptations = 1;
+ cpu_counts.resolution_adaptations = 0;
+ statistics_proxy_->UpdateAdaptationSettings(kScalingEnabled, kScalingEnabled);
+ statistics_proxy_->OnAdaptationChanged(VideoAdaptationReason::kCpu,
+ cpu_counts, quality_counts);
+ EXPECT_TRUE(statistics_proxy_->GetStats().cpu_limited_framerate);
+ EXPECT_FALSE(statistics_proxy_->GetStats().cpu_limited_resolution);
+ cpu_counts.fps_adaptations = 0;
+ cpu_counts.resolution_adaptations = 1;
+ statistics_proxy_->OnAdaptationChanged(VideoAdaptationReason::kCpu,
+ cpu_counts, quality_counts);
+ EXPECT_FALSE(statistics_proxy_->GetStats().cpu_limited_framerate);
+ EXPECT_TRUE(statistics_proxy_->GetStats().cpu_limited_resolution);
+ cpu_counts.fps_adaptations = 1;
+ statistics_proxy_->UpdateAdaptationSettings(kResolutionScalingDisabled,
+ kResolutionScalingDisabled);
+ statistics_proxy_->OnAdaptationChanged(VideoAdaptationReason::kCpu,
+ cpu_counts, quality_counts);
+ EXPECT_TRUE(statistics_proxy_->GetStats().cpu_limited_framerate);
+ EXPECT_FALSE(statistics_proxy_->GetStats().cpu_limited_resolution);
+ statistics_proxy_->UpdateAdaptationSettings(kScalingDisabled,
+ kScalingDisabled);
+ statistics_proxy_->OnAdaptationChanged(VideoAdaptationReason::kCpu,
+ cpu_counts, quality_counts);
+ EXPECT_FALSE(statistics_proxy_->GetStats().cpu_limited_framerate);
+ EXPECT_FALSE(statistics_proxy_->GetStats().cpu_limited_resolution);
+}
+
+TEST_F(SendStatisticsProxyTest, GetQualityAdaptationStats) {
+ VideoAdaptationCounters cpu_counts;
+ VideoAdaptationCounters quality_counts;
+ EXPECT_FALSE(statistics_proxy_->GetStats().bw_limited_framerate);
+ EXPECT_FALSE(statistics_proxy_->GetStats().bw_limited_resolution);
+ quality_counts.fps_adaptations = 1;
+ quality_counts.resolution_adaptations = 0;
+ statistics_proxy_->UpdateAdaptationSettings(kScalingEnabled, kScalingEnabled);
+ statistics_proxy_->OnAdaptationChanged(VideoAdaptationReason::kQuality,
+ cpu_counts, quality_counts);
+ EXPECT_TRUE(statistics_proxy_->GetStats().bw_limited_framerate);
+ EXPECT_FALSE(statistics_proxy_->GetStats().bw_limited_resolution);
+ quality_counts.fps_adaptations = 0;
+ quality_counts.resolution_adaptations = 1;
+ statistics_proxy_->OnAdaptationChanged(VideoAdaptationReason::kQuality,
+ cpu_counts, quality_counts);
+ EXPECT_FALSE(statistics_proxy_->GetStats().bw_limited_framerate);
+ EXPECT_TRUE(statistics_proxy_->GetStats().bw_limited_resolution);
+ quality_counts.fps_adaptations = 1;
+ statistics_proxy_->UpdateAdaptationSettings(kResolutionScalingDisabled,
+ kResolutionScalingDisabled);
+ statistics_proxy_->OnAdaptationChanged(VideoAdaptationReason::kQuality,
+ cpu_counts, quality_counts);
+ EXPECT_TRUE(statistics_proxy_->GetStats().bw_limited_framerate);
+ EXPECT_FALSE(statistics_proxy_->GetStats().bw_limited_resolution);
+ statistics_proxy_->UpdateAdaptationSettings(kScalingDisabled,
+ kScalingDisabled);
+ statistics_proxy_->OnAdaptationChanged(VideoAdaptationReason::kQuality,
+ cpu_counts, quality_counts);
+ EXPECT_FALSE(statistics_proxy_->GetStats().bw_limited_framerate);
+ EXPECT_FALSE(statistics_proxy_->GetStats().bw_limited_resolution);
+}
+
+TEST_F(SendStatisticsProxyTest, GetStatsReportsCpuAdaptChanges) {
+ statistics_proxy_->UpdateAdaptationSettings(kScalingEnabled, kScalingEnabled);
+ VideoAdaptationCounters cpu_counts;
+ VideoAdaptationCounters quality_counts;
+ EXPECT_EQ(0, statistics_proxy_->GetStats().number_of_cpu_adapt_changes);
+
+ cpu_counts.resolution_adaptations = 1;
+ statistics_proxy_->OnAdaptationChanged(VideoAdaptationReason::kCpu,
+ cpu_counts, quality_counts);
+ EXPECT_FALSE(statistics_proxy_->GetStats().cpu_limited_framerate);
+ EXPECT_TRUE(statistics_proxy_->GetStats().cpu_limited_resolution);
+ EXPECT_EQ(1, statistics_proxy_->GetStats().number_of_cpu_adapt_changes);
+
+ cpu_counts.resolution_adaptations = 2;
+ statistics_proxy_->OnAdaptationChanged(VideoAdaptationReason::kCpu,
+ cpu_counts, quality_counts);
+ EXPECT_FALSE(statistics_proxy_->GetStats().cpu_limited_framerate);
+ EXPECT_TRUE(statistics_proxy_->GetStats().cpu_limited_resolution);
+ EXPECT_EQ(2, statistics_proxy_->GetStats().number_of_cpu_adapt_changes);
+ EXPECT_EQ(0, statistics_proxy_->GetStats().number_of_quality_adapt_changes);
+}
+
+TEST_F(SendStatisticsProxyTest, GetStatsReportsQualityAdaptChanges) {
+ VideoAdaptationCounters cpu_counts;
+ VideoAdaptationCounters quality_counts;
+ statistics_proxy_->UpdateAdaptationSettings(kScalingEnabled, kScalingEnabled);
+ EXPECT_EQ(0, statistics_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ quality_counts.fps_adaptations = 1;
+ statistics_proxy_->OnAdaptationChanged(VideoAdaptationReason::kQuality,
+ cpu_counts, quality_counts);
+ EXPECT_TRUE(statistics_proxy_->GetStats().bw_limited_framerate);
+ EXPECT_FALSE(statistics_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_EQ(1, statistics_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ quality_counts.fps_adaptations = 0;
+ statistics_proxy_->OnAdaptationChanged(VideoAdaptationReason::kQuality,
+ cpu_counts, quality_counts);
+ EXPECT_FALSE(statistics_proxy_->GetStats().bw_limited_framerate);
+ EXPECT_FALSE(statistics_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_EQ(2, statistics_proxy_->GetStats().number_of_quality_adapt_changes);
+ EXPECT_EQ(0, statistics_proxy_->GetStats().number_of_cpu_adapt_changes);
+}
+
+TEST_F(SendStatisticsProxyTest, TestAdaptationStatisticsMasking) {
+ VideoAdaptationCounters cpu_counts;
+ VideoAdaptationCounters quality_counts;
+ statistics_proxy_->UpdateAdaptationSettings(kScalingEnabled, kScalingEnabled);
+ EXPECT_EQ(0, statistics_proxy_->GetStats().number_of_quality_adapt_changes);
+ EXPECT_EQ(0, statistics_proxy_->GetStats().number_of_cpu_adapt_changes);
+
+ quality_counts.resolution_adaptations = 1;
+ statistics_proxy_->OnAdaptationChanged(VideoAdaptationReason::kQuality,
+ cpu_counts, quality_counts);
+ quality_counts.fps_adaptations = 1;
+ statistics_proxy_->OnAdaptationChanged(VideoAdaptationReason::kQuality,
+ cpu_counts, quality_counts);
+ cpu_counts.resolution_adaptations = 1;
+ statistics_proxy_->OnAdaptationChanged(VideoAdaptationReason::kCpu,
+ cpu_counts, quality_counts);
+ cpu_counts.fps_adaptations = 1;
+ statistics_proxy_->OnAdaptationChanged(VideoAdaptationReason::kCpu,
+ cpu_counts, quality_counts);
+ // We have 1 fps and resolution reduction for both cpu and quality
+ EXPECT_TRUE(statistics_proxy_->GetStats().bw_limited_framerate);
+ EXPECT_TRUE(statistics_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_TRUE(statistics_proxy_->GetStats().cpu_limited_framerate);
+ EXPECT_TRUE(statistics_proxy_->GetStats().cpu_limited_resolution);
+ EXPECT_EQ(2, statistics_proxy_->GetStats().number_of_quality_adapt_changes);
+ EXPECT_EQ(2, statistics_proxy_->GetStats().number_of_cpu_adapt_changes);
+
+ // Disable quality scaling. Expect quality scaling not limited.
+ statistics_proxy_->UpdateAdaptationSettings(kScalingEnabled,
+ kScalingDisabled);
+ EXPECT_FALSE(statistics_proxy_->GetStats().bw_limited_framerate);
+ EXPECT_FALSE(statistics_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_TRUE(statistics_proxy_->GetStats().cpu_limited_framerate);
+ EXPECT_TRUE(statistics_proxy_->GetStats().cpu_limited_resolution);
+ EXPECT_EQ(2, statistics_proxy_->GetStats().number_of_quality_adapt_changes);
+ EXPECT_EQ(2, statistics_proxy_->GetStats().number_of_cpu_adapt_changes);
+
+ // Disable framerate scaling.
+ statistics_proxy_->UpdateAdaptationSettings(kFramerateScalingDisabled,
+ kFramerateScalingDisabled);
+ EXPECT_FALSE(statistics_proxy_->GetStats().bw_limited_framerate);
+ EXPECT_TRUE(statistics_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_FALSE(statistics_proxy_->GetStats().cpu_limited_framerate);
+ EXPECT_TRUE(statistics_proxy_->GetStats().cpu_limited_resolution);
+ EXPECT_EQ(2, statistics_proxy_->GetStats().number_of_quality_adapt_changes);
+ EXPECT_EQ(2, statistics_proxy_->GetStats().number_of_cpu_adapt_changes);
+
+ // Disable resolution scaling.
+ statistics_proxy_->UpdateAdaptationSettings(kResolutionScalingDisabled,
+ kResolutionScalingDisabled);
+ EXPECT_TRUE(statistics_proxy_->GetStats().bw_limited_framerate);
+ EXPECT_FALSE(statistics_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_TRUE(statistics_proxy_->GetStats().cpu_limited_framerate);
+ EXPECT_FALSE(statistics_proxy_->GetStats().cpu_limited_resolution);
+ EXPECT_EQ(2, statistics_proxy_->GetStats().number_of_quality_adapt_changes);
+ EXPECT_EQ(2, statistics_proxy_->GetStats().number_of_cpu_adapt_changes);
+
+ // Enable all
+ statistics_proxy_->UpdateAdaptationSettings(kScalingEnabled, kScalingEnabled);
+ EXPECT_TRUE(statistics_proxy_->GetStats().bw_limited_framerate);
+ EXPECT_TRUE(statistics_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_TRUE(statistics_proxy_->GetStats().cpu_limited_framerate);
+ EXPECT_TRUE(statistics_proxy_->GetStats().cpu_limited_resolution);
+ EXPECT_EQ(2, statistics_proxy_->GetStats().number_of_quality_adapt_changes);
+ EXPECT_EQ(2, statistics_proxy_->GetStats().number_of_cpu_adapt_changes);
+}
+
+TEST_F(SendStatisticsProxyTest, AdaptChangesNotReported_AdaptationNotEnabled) {
+ // First RTP packet sent.
+ UpdateDataCounters(kFirstSsrc);
+ // Min runtime has passed.
+ fake_clock_.AdvanceTimeMilliseconds(metrics::kMinRunTimeInSeconds * 1000);
+ statistics_proxy_.reset();
+ EXPECT_METRIC_EQ(
+ 0, metrics::NumSamples("WebRTC.Video.AdaptChangesPerMinute.Cpu"));
+ EXPECT_METRIC_EQ(
+ 0, metrics::NumSamples("WebRTC.Video.AdaptChangesPerMinute.Quality"));
+}
+
+TEST_F(SendStatisticsProxyTest, AdaptChangesNotReported_MinRuntimeNotPassed) {
+ // First RTP packet sent.
+ UpdateDataCounters(kFirstSsrc);
+ // Enable adaptation.
+ statistics_proxy_->UpdateAdaptationSettings(kScalingEnabled, kScalingEnabled);
+ // Min runtime has not passed.
+ fake_clock_.AdvanceTimeMilliseconds(metrics::kMinRunTimeInSeconds * 1000 - 1);
+ statistics_proxy_.reset();
+ EXPECT_METRIC_EQ(
+ 0, metrics::NumSamples("WebRTC.Video.AdaptChangesPerMinute.Cpu"));
+ EXPECT_METRIC_EQ(
+ 0, metrics::NumSamples("WebRTC.Video.AdaptChangesPerMinute.Quality"));
+}
+
+TEST_F(SendStatisticsProxyTest, ZeroAdaptChangesReported) {
+ // First RTP packet sent.
+ UpdateDataCounters(kFirstSsrc);
+ // Enable adaptation.
+ statistics_proxy_->UpdateAdaptationSettings(kScalingEnabled, kScalingEnabled);
+ // Min runtime has passed.
+ fake_clock_.AdvanceTimeMilliseconds(metrics::kMinRunTimeInSeconds * 1000);
+ statistics_proxy_.reset();
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.AdaptChangesPerMinute.Cpu"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.AdaptChangesPerMinute.Cpu", 0));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.AdaptChangesPerMinute.Quality"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.AdaptChangesPerMinute.Quality", 0));
+}
+
+TEST_F(SendStatisticsProxyTest, CpuAdaptChangesReported) {
+ // First RTP packet sent.
+ UpdateDataCounters(kFirstSsrc);
+ // Enable adaptation.
+ VideoAdaptationCounters cpu_counts;
+ VideoAdaptationCounters quality_counts;
+ statistics_proxy_->UpdateAdaptationSettings(kScalingEnabled, kScalingEnabled);
+ // Adapt changes: 1, elapsed time: 10 sec => 6 per minute.
+ statistics_proxy_->OnAdaptationChanged(VideoAdaptationReason::kCpu,
+ cpu_counts, quality_counts);
+ fake_clock_.AdvanceTimeMilliseconds(10000);
+ statistics_proxy_.reset();
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.AdaptChangesPerMinute.Cpu"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.AdaptChangesPerMinute.Cpu", 6));
+}
+
+TEST_F(SendStatisticsProxyTest, ExcludesInitialQualityAdaptDownChange) {
+ // First RTP packet sent.
+ UpdateDataCounters(kFirstSsrc);
+ // Enable adaptation.
+ VideoAdaptationCounters cpu_counts;
+ VideoAdaptationCounters quality_counts;
+ statistics_proxy_->UpdateAdaptationSettings(kScalingEnabled, kScalingEnabled);
+ // Adapt changes: 1 (1 initial) = 0, elapsed time: 10 sec => 0 per minute.
+ statistics_proxy_->OnAdaptationChanged(VideoAdaptationReason::kQuality,
+ cpu_counts, quality_counts);
+ statistics_proxy_->OnInitialQualityResolutionAdaptDown();
+ fake_clock_.AdvanceTimeMilliseconds(10000);
+ statistics_proxy_.reset();
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.AdaptChangesPerMinute.Quality"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.AdaptChangesPerMinute.Quality", 0));
+}
+
+TEST_F(SendStatisticsProxyTest, ExcludesInitialQualityAdaptDownChanges) {
+ // First RTP packet sent.
+ UpdateDataCounters(kFirstSsrc);
+ // Enable adaptation.
+ VideoAdaptationCounters cpu_counts;
+ VideoAdaptationCounters quality_counts;
+ statistics_proxy_->UpdateAdaptationSettings(kScalingEnabled, kScalingEnabled);
+ // Adapt changes: 3 (2 initial) = 1, elapsed time: 10 sec => 6 per minute.
+ quality_counts.resolution_adaptations = 1;
+ statistics_proxy_->OnAdaptationChanged(VideoAdaptationReason::kQuality,
+ cpu_counts, quality_counts);
+ statistics_proxy_->OnInitialQualityResolutionAdaptDown();
+ quality_counts.resolution_adaptations = 2;
+ statistics_proxy_->OnAdaptationChanged(VideoAdaptationReason::kQuality,
+ cpu_counts, quality_counts);
+ statistics_proxy_->OnInitialQualityResolutionAdaptDown();
+ quality_counts.resolution_adaptations = 3;
+ statistics_proxy_->OnAdaptationChanged(VideoAdaptationReason::kQuality,
+ cpu_counts, quality_counts);
+ fake_clock_.AdvanceTimeMilliseconds(10000);
+ statistics_proxy_.reset();
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.AdaptChangesPerMinute.Quality"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.AdaptChangesPerMinute.Quality", 6));
+}
+
+TEST_F(SendStatisticsProxyTest, InitialQualityAdaptChangesNotExcludedOnError) {
+ // First RTP packet sent.
+ UpdateDataCounters(kFirstSsrc);
+ // Enable adaptation.
+ VideoAdaptationCounters cpu_counts;
+ VideoAdaptationCounters quality_counts;
+ statistics_proxy_->UpdateAdaptationSettings(kScalingEnabled, kScalingEnabled);
+ // Adapt changes: 1 (2 initial) = 1, elapsed time: 10 sec => 6 per minute.
+ statistics_proxy_->OnAdaptationChanged(VideoAdaptationReason::kQuality,
+ cpu_counts, quality_counts);
+ statistics_proxy_->OnInitialQualityResolutionAdaptDown();
+ statistics_proxy_->OnInitialQualityResolutionAdaptDown();
+ fake_clock_.AdvanceTimeMilliseconds(10000);
+ statistics_proxy_.reset();
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.AdaptChangesPerMinute.Quality"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.AdaptChangesPerMinute.Quality", 6));
+}
+
+TEST_F(SendStatisticsProxyTest, ExcludesInitialQualityAdaptDownAndUpChanges) {
+ // First RTP packet sent.
+ UpdateDataCounters(kFirstSsrc);
+ // Enable adaptation.
+ statistics_proxy_->UpdateAdaptationSettings(kScalingEnabled, kScalingEnabled);
+ VideoAdaptationCounters cpu_counts;
+ VideoAdaptationCounters quality_counts;
+ statistics_proxy_->ClearAdaptationStats();
+ // Adapt changes: 8 (4 initial) = 4, elapsed time: 10 sec => 24 per minute.
+ quality_counts.resolution_adaptations = 1;
+ statistics_proxy_->OnAdaptationChanged(VideoAdaptationReason::kQuality,
+ cpu_counts, quality_counts);
+ statistics_proxy_->OnInitialQualityResolutionAdaptDown();
+ quality_counts.resolution_adaptations = 2;
+ statistics_proxy_->OnAdaptationChanged(VideoAdaptationReason::kQuality,
+ cpu_counts, quality_counts);
+ statistics_proxy_->OnInitialQualityResolutionAdaptDown();
+ quality_counts.resolution_adaptations = 3;
+ statistics_proxy_->OnAdaptationChanged(VideoAdaptationReason::kQuality,
+ cpu_counts, quality_counts);
+ quality_counts.fps_adaptations = 1;
+ statistics_proxy_->OnAdaptationChanged(VideoAdaptationReason::kQuality,
+ cpu_counts, quality_counts);
+ quality_counts.fps_adaptations = 0;
+ statistics_proxy_->OnAdaptationChanged(VideoAdaptationReason::kQuality,
+ cpu_counts, quality_counts);
+ quality_counts.resolution_adaptations = 2; // Initial resolution up.
+ statistics_proxy_->OnAdaptationChanged(VideoAdaptationReason::kQuality,
+ cpu_counts, quality_counts);
+ quality_counts.resolution_adaptations = 1; // Initial resolution up.
+ statistics_proxy_->OnAdaptationChanged(VideoAdaptationReason::kQuality,
+ cpu_counts, quality_counts);
+ quality_counts.resolution_adaptations = 0;
+ statistics_proxy_->OnAdaptationChanged(VideoAdaptationReason::kQuality,
+ cpu_counts, quality_counts);
+
+ fake_clock_.AdvanceTimeMilliseconds(10000);
+ statistics_proxy_.reset();
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.AdaptChangesPerMinute.Quality"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.AdaptChangesPerMinute.Quality", 24));
+}
+
+TEST_F(SendStatisticsProxyTest, AdaptChangesStatsExcludesDisabledTime) {
+ // First RTP packet sent.
+ UpdateDataCounters(kFirstSsrc);
+
+ // Disable quality adaptation.
+ VideoAdaptationCounters cpu_counts;
+ VideoAdaptationCounters quality_counts;
+ statistics_proxy_->UpdateAdaptationSettings(kScalingEnabled,
+ kScalingDisabled);
+ fake_clock_.AdvanceTimeMilliseconds(10000);
+
+ // Enable quality adaptation.
+ // Adapt changes: 2, elapsed time: 20 sec.
+ quality_counts.fps_adaptations = 0;
+ statistics_proxy_->UpdateAdaptationSettings(kResolutionScalingDisabled,
+ kResolutionScalingDisabled);
+ fake_clock_.AdvanceTimeMilliseconds(5000);
+ statistics_proxy_->UpdateAdaptationSettings(kResolutionScalingDisabled,
+ kResolutionScalingDisabled);
+ fake_clock_.AdvanceTimeMilliseconds(9000);
+ statistics_proxy_->OnAdaptationChanged(VideoAdaptationReason::kQuality,
+ cpu_counts, quality_counts);
+ fake_clock_.AdvanceTimeMilliseconds(6000);
+ statistics_proxy_->OnAdaptationChanged(VideoAdaptationReason::kQuality,
+ cpu_counts, quality_counts);
+
+ // Disable quality adaptation.
+ statistics_proxy_->UpdateAdaptationSettings(kScalingDisabled,
+ kScalingDisabled);
+ fake_clock_.AdvanceTimeMilliseconds(30000);
+
+ // Enable quality adaptation.
+ // Adapt changes: 1, elapsed time: 10 sec.
+ quality_counts.resolution_adaptations = 0;
+ statistics_proxy_->UpdateAdaptationSettings(kFramerateScalingDisabled,
+ kFramerateScalingDisabled);
+ statistics_proxy_->OnAdaptationChanged(VideoAdaptationReason::kQuality,
+ cpu_counts, quality_counts);
+ fake_clock_.AdvanceTimeMilliseconds(10000);
+
+ // Disable quality adaptation.
+ statistics_proxy_->UpdateAdaptationSettings(kScalingDisabled,
+ kScalingDisabled);
+ fake_clock_.AdvanceTimeMilliseconds(5000);
+ statistics_proxy_->UpdateAdaptationSettings(kScalingDisabled,
+ kScalingDisabled);
+ fake_clock_.AdvanceTimeMilliseconds(20000);
+
+ // Adapt changes: 3, elapsed time: 30 sec => 6 per minute.
+ statistics_proxy_.reset();
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.AdaptChangesPerMinute.Quality"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.AdaptChangesPerMinute.Quality", 6));
+}
+
+TEST_F(SendStatisticsProxyTest,
+ AdaptChangesNotReported_ScalingNotEnabledVideoResumed) {
+ // First RTP packet sent.
+ UpdateDataCounters(kFirstSsrc);
+
+ // Suspend and resume video.
+ statistics_proxy_->OnSuspendChange(true);
+ fake_clock_.AdvanceTimeMilliseconds(5000);
+ statistics_proxy_->OnSuspendChange(false);
+
+ // Min runtime has passed but scaling not enabled.
+ fake_clock_.AdvanceTimeMilliseconds(metrics::kMinRunTimeInSeconds * 1000);
+ statistics_proxy_.reset();
+ EXPECT_METRIC_EQ(
+ 0, metrics::NumSamples("WebRTC.Video.AdaptChangesPerMinute.Cpu"));
+ EXPECT_METRIC_EQ(
+ 0, metrics::NumSamples("WebRTC.Video.AdaptChangesPerMinute.Quality"));
+}
+
+TEST_F(SendStatisticsProxyTest, QualityAdaptChangesStatsExcludesSuspendedTime) {
+ // First RTP packet sent.
+ UpdateDataCounters(kFirstSsrc);
+
+ // Enable adaptation.
+ VideoAdaptationCounters cpu_counts;
+ VideoAdaptationCounters quality_counts;
+ // Adapt changes: 2, elapsed time: 20 sec.
+ statistics_proxy_->UpdateAdaptationSettings(kScalingEnabled, kScalingEnabled);
+ fake_clock_.AdvanceTimeMilliseconds(20000);
+ statistics_proxy_->OnAdaptationChanged(VideoAdaptationReason::kQuality,
+ cpu_counts, quality_counts);
+ statistics_proxy_->OnAdaptationChanged(VideoAdaptationReason::kQuality,
+ cpu_counts, quality_counts);
+
+ // Suspend and resume video.
+ statistics_proxy_->OnSuspendChange(true);
+ fake_clock_.AdvanceTimeMilliseconds(30000);
+ statistics_proxy_->OnSuspendChange(false);
+
+ // Adapt changes: 1, elapsed time: 10 sec.
+ statistics_proxy_->OnAdaptationChanged(VideoAdaptationReason::kQuality,
+ cpu_counts, quality_counts);
+ fake_clock_.AdvanceTimeMilliseconds(10000);
+
+ // Adapt changes: 3, elapsed time: 30 sec => 6 per minute.
+ statistics_proxy_.reset();
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.AdaptChangesPerMinute.Quality"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.AdaptChangesPerMinute.Quality", 6));
+}
+
+TEST_F(SendStatisticsProxyTest, CpuAdaptChangesStatsExcludesSuspendedTime) {
+ // First RTP packet sent.
+ UpdateDataCounters(kFirstSsrc);
+
+ // Video not suspended.
+ statistics_proxy_->OnSuspendChange(false);
+ fake_clock_.AdvanceTimeMilliseconds(30000);
+
+ // Enable adaptation.
+ VideoAdaptationCounters cpu_counts;
+ VideoAdaptationCounters quality_counts;
+ // Adapt changes: 1, elapsed time: 20 sec.
+ statistics_proxy_->UpdateAdaptationSettings(kScalingEnabled, kScalingEnabled);
+ fake_clock_.AdvanceTimeMilliseconds(10000);
+ statistics_proxy_->OnAdaptationChanged(VideoAdaptationReason::kCpu,
+ cpu_counts, quality_counts);
+
+ // Video not suspended, stats time already started.
+ statistics_proxy_->OnSuspendChange(false);
+ fake_clock_.AdvanceTimeMilliseconds(10000);
+
+ // Disable adaptation.
+ statistics_proxy_->UpdateAdaptationSettings(kScalingDisabled,
+ kScalingDisabled);
+ fake_clock_.AdvanceTimeMilliseconds(30000);
+
+ // Suspend and resume video, stats time not started when scaling not enabled.
+ statistics_proxy_->OnSuspendChange(true);
+ fake_clock_.AdvanceTimeMilliseconds(30000);
+ statistics_proxy_->OnSuspendChange(false);
+ fake_clock_.AdvanceTimeMilliseconds(30000);
+
+ // Enable adaptation.
+ // Adapt changes: 1, elapsed time: 10 sec.
+ cpu_counts.fps_adaptations = 0;
+ cpu_counts.resolution_adaptations = 0;
+ statistics_proxy_->UpdateAdaptationSettings(kScalingEnabled, kScalingEnabled);
+ fake_clock_.AdvanceTimeMilliseconds(10000);
+ statistics_proxy_->OnAdaptationChanged(VideoAdaptationReason::kCpu,
+ cpu_counts, quality_counts);
+
+ // Adapt changes: 2, elapsed time: 30 sec => 4 per minute.
+ statistics_proxy_.reset();
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.AdaptChangesPerMinute.Cpu"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.AdaptChangesPerMinute.Cpu", 4));
+}
+
+TEST_F(SendStatisticsProxyTest, AdaptChangesStatsNotStartedIfVideoSuspended) {
+ // First RTP packet sent.
+ UpdateDataCounters(kFirstSsrc);
+
+ // Video suspended.
+ statistics_proxy_->OnSuspendChange(true);
+
+ // Enable adaptation, stats time not started when suspended.
+ VideoAdaptationCounters cpu_counts;
+ VideoAdaptationCounters quality_counts;
+ statistics_proxy_->UpdateAdaptationSettings(kScalingEnabled, kScalingEnabled);
+ fake_clock_.AdvanceTimeMilliseconds(10000);
+
+ // Resume video, stats time started.
+ // Adapt changes: 1, elapsed time: 10 sec.
+ statistics_proxy_->OnSuspendChange(false);
+ fake_clock_.AdvanceTimeMilliseconds(10000);
+ statistics_proxy_->OnAdaptationChanged(VideoAdaptationReason::kCpu,
+ cpu_counts, quality_counts);
+
+ // Adapt changes: 1, elapsed time: 10 sec => 6 per minute.
+ statistics_proxy_.reset();
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.AdaptChangesPerMinute.Cpu"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.AdaptChangesPerMinute.Cpu", 6));
+}
+
+TEST_F(SendStatisticsProxyTest, AdaptChangesStatsRestartsOnFirstSentPacket) {
+ // Send first packet, adaptation enabled.
+ // Elapsed time before first packet is sent should be excluded.
+ VideoAdaptationCounters cpu_counts;
+ VideoAdaptationCounters quality_counts;
+ statistics_proxy_->UpdateAdaptationSettings(kScalingEnabled, kScalingEnabled);
+ fake_clock_.AdvanceTimeMilliseconds(10000);
+ UpdateDataCounters(kFirstSsrc);
+
+ // Adapt changes: 1, elapsed time: 10 sec.
+ fake_clock_.AdvanceTimeMilliseconds(10000);
+ statistics_proxy_->OnAdaptationChanged(VideoAdaptationReason::kQuality,
+ cpu_counts, quality_counts);
+ UpdateDataCounters(kFirstSsrc);
+
+ // Adapt changes: 1, elapsed time: 10 sec => 6 per minute.
+ statistics_proxy_.reset();
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.AdaptChangesPerMinute.Quality"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.AdaptChangesPerMinute.Quality", 6));
+}
+
+TEST_F(SendStatisticsProxyTest, AdaptChangesStatsStartedAfterFirstSentPacket) {
+ // Enable and disable adaptation.
+ VideoAdaptationCounters cpu_counts;
+ VideoAdaptationCounters quality_counts;
+ statistics_proxy_->UpdateAdaptationSettings(kScalingEnabled, kScalingEnabled);
+ fake_clock_.AdvanceTimeMilliseconds(60000);
+ statistics_proxy_->UpdateAdaptationSettings(kScalingDisabled,
+ kScalingDisabled);
+
+ // Send first packet, scaling disabled.
+ // Elapsed time before first packet is sent should be excluded.
+ UpdateDataCounters(kFirstSsrc);
+ fake_clock_.AdvanceTimeMilliseconds(60000);
+
+ // Enable adaptation.
+ cpu_counts.resolution_adaptations = 0;
+ statistics_proxy_->UpdateAdaptationSettings(kFramerateScalingDisabled,
+ kScalingDisabled);
+ fake_clock_.AdvanceTimeMilliseconds(10000);
+ UpdateDataCounters(kFirstSsrc);
+
+ // Adapt changes: 1, elapsed time: 20 sec.
+ fake_clock_.AdvanceTimeMilliseconds(10000);
+ statistics_proxy_->OnAdaptationChanged(VideoAdaptationReason::kCpu,
+ cpu_counts, quality_counts);
+
+ // Adapt changes: 1, elapsed time: 20 sec => 3 per minute.
+ statistics_proxy_.reset();
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.AdaptChangesPerMinute.Cpu"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.AdaptChangesPerMinute.Cpu", 3));
+}
+
+TEST_F(SendStatisticsProxyTest, AdaptChangesReportedAfterContentSwitch) {
+ // First RTP packet sent, cpu adaptation enabled.
+ UpdateDataCounters(kFirstSsrc);
+ VideoAdaptationCounters cpu_counts;
+ VideoAdaptationCounters quality_counts;
+ statistics_proxy_->UpdateAdaptationSettings(kScalingEnabled,
+ kScalingDisabled);
+
+ // Adapt changes: 2, elapsed time: 15 sec => 8 per minute.
+ statistics_proxy_->OnAdaptationChanged(VideoAdaptationReason::kCpu,
+ cpu_counts, quality_counts);
+ fake_clock_.AdvanceTimeMilliseconds(6000);
+ statistics_proxy_->OnAdaptationChanged(VideoAdaptationReason::kCpu,
+ cpu_counts, quality_counts);
+ fake_clock_.AdvanceTimeMilliseconds(9000);
+
+ // Switch content type, real-time stats should be updated.
+ VideoEncoderConfig config;
+ config.content_type = VideoEncoderConfig::ContentType::kScreen;
+ statistics_proxy_->OnEncoderReconfigured(config, {});
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.AdaptChangesPerMinute.Cpu"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.AdaptChangesPerMinute.Cpu", 8));
+ EXPECT_METRIC_EQ(
+ 0, metrics::NumSamples("WebRTC.Video.AdaptChangesPerMinute.Quality"));
+
+ // First RTP packet sent, scaling enabled.
+ UpdateDataCounters(kFirstSsrc);
+ statistics_proxy_->UpdateAdaptationSettings(kScalingEnabled,
+ kScalingDisabled);
+
+ // Adapt changes: 4, elapsed time: 120 sec => 2 per minute.
+ statistics_proxy_->OnAdaptationChanged(VideoAdaptationReason::kCpu,
+ cpu_counts, quality_counts);
+ statistics_proxy_->OnAdaptationChanged(VideoAdaptationReason::kCpu,
+ cpu_counts, quality_counts);
+ statistics_proxy_->OnAdaptationChanged(VideoAdaptationReason::kCpu,
+ cpu_counts, quality_counts);
+ statistics_proxy_->OnAdaptationChanged(VideoAdaptationReason::kCpu,
+ cpu_counts, quality_counts);
+ fake_clock_.AdvanceTimeMilliseconds(120000);
+
+ statistics_proxy_.reset();
+ EXPECT_METRIC_EQ(1,
+ metrics::NumSamples(
+ "WebRTC.Video.Screenshare.AdaptChangesPerMinute.Cpu"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents(
+ "WebRTC.Video.Screenshare.AdaptChangesPerMinute.Cpu", 2));
+ EXPECT_METRIC_EQ(
+ 0, metrics::NumSamples(
+ "WebRTC.Video.Screenshare.AdaptChangesPerMinute.Quality"));
+}
+
+TEST_F(SendStatisticsProxyTest,
+ QualityLimitationReasonIsCpuWhenCpuIsResolutionLimited) {
+ VideoAdaptationCounters cpu_counts;
+ VideoAdaptationCounters quality_counts;
+
+ cpu_counts.resolution_adaptations = 1;
+ statistics_proxy_->UpdateAdaptationSettings(kScalingEnabled, kScalingEnabled);
+ statistics_proxy_->OnAdaptationChanged(VideoAdaptationReason::kCpu,
+ cpu_counts, quality_counts);
+
+ EXPECT_EQ(QualityLimitationReason::kCpu,
+ statistics_proxy_->GetStats().quality_limitation_reason);
+}
+
+TEST_F(SendStatisticsProxyTest,
+ QualityLimitationReasonIsCpuWhenCpuIsFramerateLimited) {
+ VideoAdaptationCounters cpu_counts;
+ VideoAdaptationCounters quality_counts;
+
+ cpu_counts.fps_adaptations = 1;
+
+ statistics_proxy_->UpdateAdaptationSettings(kScalingEnabled, kScalingEnabled);
+ statistics_proxy_->OnAdaptationChanged(VideoAdaptationReason::kCpu,
+ cpu_counts, quality_counts);
+
+ EXPECT_EQ(QualityLimitationReason::kCpu,
+ statistics_proxy_->GetStats().quality_limitation_reason);
+}
+
+TEST_F(SendStatisticsProxyTest,
+ QualityLimitationReasonIsBandwidthWhenQualityIsResolutionLimited) {
+ VideoAdaptationCounters cpu_counts;
+ VideoAdaptationCounters quality_counts;
+
+ quality_counts.resolution_adaptations = 1;
+
+ statistics_proxy_->UpdateAdaptationSettings(kScalingEnabled, kScalingEnabled);
+ statistics_proxy_->OnAdaptationChanged(VideoAdaptationReason::kQuality,
+ cpu_counts, quality_counts);
+
+ EXPECT_EQ(QualityLimitationReason::kBandwidth,
+ statistics_proxy_->GetStats().quality_limitation_reason);
+}
+
+TEST_F(SendStatisticsProxyTest,
+ QualityLimitationReasonIsBandwidthWhenQualityIsFramerateLimited) {
+ VideoAdaptationCounters cpu_counts;
+ VideoAdaptationCounters quality_counts;
+
+ quality_counts.fps_adaptations = 1;
+
+ statistics_proxy_->UpdateAdaptationSettings(kScalingEnabled, kScalingEnabled);
+ statistics_proxy_->OnAdaptationChanged(VideoAdaptationReason::kQuality,
+ cpu_counts, quality_counts);
+
+ EXPECT_EQ(QualityLimitationReason::kBandwidth,
+ statistics_proxy_->GetStats().quality_limitation_reason);
+}
+
+TEST_F(SendStatisticsProxyTest,
+ QualityLimitationReasonIsBandwidthWhenBothCpuAndQualityIsLimited) {
+ VideoAdaptationCounters cpu_counts;
+ VideoAdaptationCounters quality_counts;
+
+ cpu_counts.resolution_adaptations = 1;
+ quality_counts.resolution_adaptations = 1;
+ statistics_proxy_->UpdateAdaptationSettings(kScalingEnabled, kScalingEnabled);
+
+ // Even if the last adaptation reason is kCpu, if the counters indicate being
+ // both CPU and quality (=bandwidth) limited, kBandwidth takes precedence.
+ statistics_proxy_->OnAdaptationChanged(VideoAdaptationReason::kCpu,
+ cpu_counts, quality_counts);
+
+ EXPECT_EQ(QualityLimitationReason::kBandwidth,
+ statistics_proxy_->GetStats().quality_limitation_reason);
+}
+
+TEST_F(SendStatisticsProxyTest, QualityLimitationReasonIsNoneWhenNotLimited) {
+ VideoAdaptationCounters cpu_counts;
+ VideoAdaptationCounters quality_counts;
+
+ // Observe a limitation due to CPU. This makes sure the test doesn't pass
+ // due to "none" being the default value.
+ cpu_counts.resolution_adaptations = 1;
+ statistics_proxy_->UpdateAdaptationSettings(kScalingEnabled, kScalingEnabled);
+ statistics_proxy_->OnAdaptationChanged(VideoAdaptationReason::kCpu,
+ cpu_counts, quality_counts);
+ // Go back to not being limited.
+ cpu_counts.resolution_adaptations = 0;
+ statistics_proxy_->OnAdaptationChanged(VideoAdaptationReason::kCpu,
+ cpu_counts, quality_counts);
+
+ EXPECT_EQ(QualityLimitationReason::kNone,
+ statistics_proxy_->GetStats().quality_limitation_reason);
+}
+
+TEST_F(SendStatisticsProxyTest, QualityLimitationDurationIncreasesWithTime) {
+ VideoAdaptationCounters cpu_counts;
+ VideoAdaptationCounters quality_counts;
+
+ statistics_proxy_->UpdateAdaptationSettings(kScalingEnabled, kScalingEnabled);
+ // Not limited for 3000 ms
+ fake_clock_.AdvanceTimeMilliseconds(3000);
+ // CPU limited for 2000 ms
+ cpu_counts.resolution_adaptations = 1;
+ statistics_proxy_->OnAdaptationChanged(VideoAdaptationReason::kCpu,
+ cpu_counts, quality_counts);
+ fake_clock_.AdvanceTimeMilliseconds(2000);
+ // Bandwidth limited for 1000 ms
+ cpu_counts.resolution_adaptations = 0;
+ quality_counts.resolution_adaptations = 1;
+ statistics_proxy_->OnAdaptationChanged(VideoAdaptationReason::kQuality,
+ cpu_counts, quality_counts);
+ fake_clock_.AdvanceTimeMilliseconds(1000);
+ // CPU limited for another 2000 ms
+ cpu_counts.resolution_adaptations = 1;
+ quality_counts.resolution_adaptations = 0;
+ statistics_proxy_->OnAdaptationChanged(VideoAdaptationReason::kCpu,
+ cpu_counts, quality_counts);
+ fake_clock_.AdvanceTimeMilliseconds(2000);
+
+ auto quality_limitation_durations_ms =
+ statistics_proxy_->GetStats().quality_limitation_durations_ms;
+
+ EXPECT_EQ(3000,
+ quality_limitation_durations_ms[QualityLimitationReason::kNone]);
+ EXPECT_EQ(4000,
+ quality_limitation_durations_ms[QualityLimitationReason::kCpu]);
+ EXPECT_EQ(
+ 1000,
+ quality_limitation_durations_ms[QualityLimitationReason::kBandwidth]);
+ EXPECT_EQ(0,
+ quality_limitation_durations_ms[QualityLimitationReason::kOther]);
+}
+
+TEST_F(SendStatisticsProxyTest, QualityLimitationResolutionChangesDefaultZero) {
+ EXPECT_EQ(
+ 0u, statistics_proxy_->GetStats().quality_limitation_resolution_changes);
+}
+
+TEST_F(SendStatisticsProxyTest,
+ QualityLimitationResolutionChangesNotChangesWithOnlyDefaultAllocation) {
+ VideoCodec codec;
+ VideoBitrateAllocation allocation;
+ statistics_proxy_->OnBitrateAllocationUpdated(codec, allocation);
+ EXPECT_EQ(
+ 0u, statistics_proxy_->GetStats().quality_limitation_resolution_changes);
+}
+
+TEST_F(SendStatisticsProxyTest,
+ QualityLimitationResolutionChangesDoesNotIncreaseOnFirstAllocation) {
+ VideoCodec codec;
+ codec.simulcastStream[0].active = true;
+ codec.simulcastStream[1].active = true;
+ codec.simulcastStream[2].active = true;
+ VideoBitrateAllocation allocation;
+ allocation.SetBitrate(0, 0, 100);
+ statistics_proxy_->OnBitrateAllocationUpdated(codec, allocation);
+ EXPECT_EQ(
+ 0u, statistics_proxy_->GetStats().quality_limitation_resolution_changes);
+}
+
+TEST_F(SendStatisticsProxyTest,
+ QualityLimitationResolutionChangesWhenNewLayerGetsBandwidth) {
+ VideoCodec codec;
+ codec.simulcastStream[0].active = true;
+ codec.simulcastStream[1].active = true;
+ codec.simulcastStream[2].active = true;
+ VideoBitrateAllocation allocation;
+ allocation.SetBitrate(0, 0, 100);
+ statistics_proxy_->OnBitrateAllocationUpdated(codec, allocation);
+ allocation.SetBitrate(1, 0, 100);
+ statistics_proxy_->OnBitrateAllocationUpdated(codec, allocation);
+ EXPECT_EQ(
+ 1u, statistics_proxy_->GetStats().quality_limitation_resolution_changes);
+}
+
+TEST_F(SendStatisticsProxyTest,
+ QualityLimitationResolutionDoesNotChangeWhenLayerSame) {
+ VideoCodec codec;
+ codec.simulcastStream[0].active = true;
+ VideoBitrateAllocation allocation;
+ allocation.SetBitrate(0, 0, 100);
+ statistics_proxy_->OnBitrateAllocationUpdated(codec, allocation);
+ // Layer 0 got more bandwidth, but still only one layer on
+ allocation.SetBitrate(0, 0, 200);
+ statistics_proxy_->OnBitrateAllocationUpdated(codec, allocation);
+ EXPECT_EQ(
+ 0u, statistics_proxy_->GetStats().quality_limitation_resolution_changes);
+}
+
+TEST_F(SendStatisticsProxyTest,
+ QualityLimitationResolutionChangesWithTogglingLayers) {
+ VideoCodec codec;
+ codec.simulcastStream[0].active = true;
+ codec.simulcastStream[1].active = true;
+ codec.simulcastStream[2].active = true;
+ VideoBitrateAllocation allocation;
+ allocation.SetBitrate(0, 0, 100);
+ statistics_proxy_->OnBitrateAllocationUpdated(codec, allocation);
+ EXPECT_EQ(
+ 0u, statistics_proxy_->GetStats().quality_limitation_resolution_changes);
+ allocation.SetBitrate(1, 0, 300);
+ allocation.SetBitrate(2, 0, 500);
+ statistics_proxy_->OnBitrateAllocationUpdated(codec, allocation);
+ EXPECT_EQ(
+ 1u, statistics_proxy_->GetStats().quality_limitation_resolution_changes);
+ // Layer 2 off
+ allocation.SetBitrate(2, 0, 0);
+ statistics_proxy_->OnBitrateAllocationUpdated(codec, allocation);
+ EXPECT_EQ(
+ 2u, statistics_proxy_->GetStats().quality_limitation_resolution_changes);
+ // Layer 2 back on
+ allocation.SetBitrate(2, 0, 500);
+ statistics_proxy_->OnBitrateAllocationUpdated(codec, allocation);
+ EXPECT_EQ(
+ 3u, statistics_proxy_->GetStats().quality_limitation_resolution_changes);
+ allocation.SetBitrate(0, 0, 0);
+ allocation.SetBitrate(1, 0, 0);
+ allocation.SetBitrate(2, 0, 0);
+ // All layers off
+ statistics_proxy_->OnBitrateAllocationUpdated(codec, allocation);
+ EXPECT_EQ(
+ 4u, statistics_proxy_->GetStats().quality_limitation_resolution_changes);
+}
+
+TEST_F(SendStatisticsProxyTest,
+ QualityLimitationResolutionDoesNotUpdateOnCodecSimulcastStreamChanges) {
+ VideoCodec codec;
+ // 3 layers
+ codec.simulcastStream[0].active = true;
+ codec.simulcastStream[1].active = true;
+ codec.simulcastStream[2].active = true;
+ VideoBitrateAllocation allocation;
+ allocation.SetBitrate(0, 0, 500);
+ allocation.SetBitrate(1, 0, 500);
+ allocation.SetBitrate(2, 0, 500);
+ statistics_proxy_->OnBitrateAllocationUpdated(codec, allocation);
+ EXPECT_EQ(
+ 0u, statistics_proxy_->GetStats().quality_limitation_resolution_changes);
+
+ // Down to one layer now, triggered by a config change
+ codec.numberOfSimulcastStreams = 1;
+ codec.simulcastStream[1].active = false;
+ codec.simulcastStream[2].active = false;
+ allocation.SetBitrate(0, 0, 100);
+ statistics_proxy_->OnBitrateAllocationUpdated(codec, allocation);
+ EXPECT_EQ(
+ 0u, statistics_proxy_->GetStats().quality_limitation_resolution_changes);
+
+ // Up to 3 layers again.
+ codec.numberOfSimulcastStreams = 3;
+ codec.simulcastStream[1].active = true;
+ codec.simulcastStream[2].active = true;
+ allocation.SetBitrate(0, 0, 500);
+ allocation.SetBitrate(1, 0, 500);
+ allocation.SetBitrate(2, 0, 500);
+ statistics_proxy_->OnBitrateAllocationUpdated(codec, allocation);
+ EXPECT_EQ(
+ 0u, statistics_proxy_->GetStats().quality_limitation_resolution_changes);
+}
+
+TEST_F(SendStatisticsProxyTest,
+ QualityLimitationResolutionDoesNotUpdateForSpatialLayerChanges) {
+ VideoCodec codec;
+ codec.simulcastStream[0].active = true;
+ codec.spatialLayers[0].active = true;
+ codec.spatialLayers[1].active = true;
+ codec.spatialLayers[2].active = true;
+ VideoBitrateAllocation allocation;
+ allocation.SetBitrate(0, 0, 500);
+ allocation.SetBitrate(1, 0, 500);
+ allocation.SetBitrate(2, 0, 500);
+ statistics_proxy_->OnBitrateAllocationUpdated(codec, allocation);
+ EXPECT_EQ(
+ 0u, statistics_proxy_->GetStats().quality_limitation_resolution_changes);
+
+ // Down to one layer now, triggered by a config change
+ codec.spatialLayers[1].active = false;
+ codec.spatialLayers[2].active = false;
+ allocation.SetBitrate(0, 0, 100);
+ statistics_proxy_->OnBitrateAllocationUpdated(codec, allocation);
+ EXPECT_EQ(
+ 0u, statistics_proxy_->GetStats().quality_limitation_resolution_changes);
+
+ // Up to 3 layers again.
+ codec.spatialLayers[1].active = true;
+ codec.spatialLayers[2].active = true;
+ allocation.SetBitrate(0, 0, 500);
+ allocation.SetBitrate(1, 0, 500);
+ allocation.SetBitrate(2, 0, 500);
+ statistics_proxy_->OnBitrateAllocationUpdated(codec, allocation);
+ EXPECT_EQ(
+ 0u, statistics_proxy_->GetStats().quality_limitation_resolution_changes);
+}
+
+TEST_F(SendStatisticsProxyTest,
+ QualityLimitationReasonsAreCorrectForContentType) {
+ // Realtime case.
+ // Configure two streams.
+ VideoEncoderConfig config;
+ config.content_type = VideoEncoderConfig::ContentType::kRealtimeVideo;
+ config.number_of_streams = 2;
+ VideoStream stream1;
+ stream1.width = kWidth / 2;
+ stream1.height = kHeight / 2;
+ VideoStream stream2;
+ stream2.width = kWidth;
+ stream2.height = kHeight;
+ statistics_proxy_->OnEncoderReconfigured(config, {stream1, stream2});
+ EXPECT_FALSE(statistics_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_FALSE(statistics_proxy_->GetStats().bw_limited_framerate);
+ EXPECT_EQ(statistics_proxy_->GetStats().quality_limitation_reason,
+ QualityLimitationReason::kNone);
+ // Bw disabled one layer.
+ VideoCodec codec;
+ codec.numberOfSimulcastStreams = 2;
+ codec.simulcastStream[0].active = true;
+ codec.simulcastStream[1].active = true;
+ VideoBitrateAllocation allocation;
+ // Some positive bitrate only on the first stream.
+ allocation.SetBitrate(0, 0, 10000);
+ allocation.SetBitrate(1, 0, 0);
+ allocation.set_bw_limited(true);
+ statistics_proxy_->OnBitrateAllocationUpdated(codec, allocation);
+ EXPECT_TRUE(statistics_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_FALSE(statistics_proxy_->GetStats().bw_limited_framerate);
+ EXPECT_EQ(statistics_proxy_->GetStats().quality_limitation_reason,
+ QualityLimitationReason::kBandwidth);
+ // Bw enabled all layers.
+ allocation.SetBitrate(1, 0, 10000);
+ allocation.set_bw_limited(false);
+ statistics_proxy_->OnBitrateAllocationUpdated(codec, allocation);
+ EXPECT_FALSE(statistics_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_FALSE(statistics_proxy_->GetStats().bw_limited_framerate);
+ EXPECT_EQ(statistics_proxy_->GetStats().quality_limitation_reason,
+ QualityLimitationReason::kNone);
+
+ // Screencast case
+ // Configure two streams.
+ config.content_type = VideoEncoderConfig::ContentType::kScreen;
+ config.number_of_streams = 2;
+ stream1.width = kWidth;
+ stream1.height = kHeight;
+ statistics_proxy_->OnEncoderReconfigured(config, {stream1, stream2});
+ EXPECT_FALSE(statistics_proxy_->GetStats().bw_limited_framerate);
+ EXPECT_FALSE(statistics_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_EQ(statistics_proxy_->GetStats().quality_limitation_reason,
+ QualityLimitationReason::kNone);
+ // Bw disabled one layer.
+ // Some positive bitrate only on the second stream.
+ allocation.SetBitrate(0, 0, 10000);
+ allocation.SetBitrate(1, 0, 0);
+ allocation.set_bw_limited(true);
+ statistics_proxy_->OnBitrateAllocationUpdated(codec, allocation);
+ EXPECT_TRUE(statistics_proxy_->GetStats().bw_limited_framerate);
+ EXPECT_FALSE(statistics_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_EQ(statistics_proxy_->GetStats().quality_limitation_reason,
+ QualityLimitationReason::kBandwidth);
+ // Bw enabled all layers.
+ allocation.SetBitrate(1, 0, 10000);
+ allocation.set_bw_limited(false);
+ statistics_proxy_->OnBitrateAllocationUpdated(codec, allocation);
+ EXPECT_FALSE(statistics_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_FALSE(statistics_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_EQ(statistics_proxy_->GetStats().quality_limitation_reason,
+ QualityLimitationReason::kNone);
+}
+
+TEST_F(SendStatisticsProxyTest, SwitchContentTypeUpdatesHistograms) {
+ for (int i = 0; i < SendStatisticsProxy::kMinRequiredMetricsSamples; ++i)
+ statistics_proxy_->OnIncomingFrame(kWidth, kHeight);
+
+ // No switch, stats should not be updated.
+ VideoEncoderConfig config;
+ config.content_type = VideoEncoderConfig::ContentType::kRealtimeVideo;
+ statistics_proxy_->OnEncoderReconfigured(config, {});
+ EXPECT_METRIC_EQ(0, metrics::NumSamples("WebRTC.Video.InputWidthInPixels"));
+
+ // Switch to screenshare, real-time stats should be updated.
+ config.content_type = VideoEncoderConfig::ContentType::kScreen;
+ statistics_proxy_->OnEncoderReconfigured(config, {});
+ EXPECT_METRIC_EQ(1, metrics::NumSamples("WebRTC.Video.InputWidthInPixels"));
+}
+
+TEST_F(SendStatisticsProxyTest, InputResolutionHistogramsAreUpdated) {
+ for (int i = 0; i < SendStatisticsProxy::kMinRequiredMetricsSamples; ++i)
+ statistics_proxy_->OnIncomingFrame(kWidth, kHeight);
+
+ statistics_proxy_.reset();
+ EXPECT_METRIC_EQ(1, metrics::NumSamples("WebRTC.Video.InputWidthInPixels"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.InputWidthInPixels", kWidth));
+ EXPECT_METRIC_EQ(1, metrics::NumSamples("WebRTC.Video.InputHeightInPixels"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.InputHeightInPixels", kHeight));
+}
+
+TEST_F(SendStatisticsProxyTest, SentResolutionHistogramsAreUpdated) {
+ const int64_t kMaxEncodedFrameWindowMs = 800;
+ const int kFps = 5;
+ const int kNumFramesPerWindow = kFps * kMaxEncodedFrameWindowMs / 1000;
+ const int kMinSamples = // Sample added when removed from EncodedFrameMap.
+ SendStatisticsProxy::kMinRequiredMetricsSamples + kNumFramesPerWindow;
+ EncodedImage encoded_image;
+
+ // Not enough samples, stats should not be updated.
+ for (int i = 0; i < kMinSamples - 1; ++i) {
+ fake_clock_.AdvanceTimeMilliseconds(1000 / kFps);
+ encoded_image.SetTimestamp(encoded_image.Timestamp() + 90 * 1000 / kFps);
+ statistics_proxy_->OnSendEncodedImage(encoded_image, nullptr);
+ }
+ SetUp(); // Reset stats proxy also causes histograms to be reported.
+ EXPECT_METRIC_EQ(0, metrics::NumSamples("WebRTC.Video.SentWidthInPixels"));
+ EXPECT_METRIC_EQ(0, metrics::NumSamples("WebRTC.Video.SentHeightInPixels"));
+
+ // Enough samples, max resolution per frame should be reported.
+ encoded_image.SetTimestamp(0xffff0000); // Will wrap.
+ for (int i = 0; i < kMinSamples; ++i) {
+ fake_clock_.AdvanceTimeMilliseconds(1000 / kFps);
+ encoded_image.SetTimestamp(encoded_image.Timestamp() + 90 * 1000 / kFps);
+ encoded_image._encodedWidth = kWidth;
+ encoded_image._encodedHeight = kHeight;
+ statistics_proxy_->OnSendEncodedImage(encoded_image, nullptr);
+ encoded_image._encodedWidth = kWidth / 2;
+ encoded_image._encodedHeight = kHeight / 2;
+ statistics_proxy_->OnSendEncodedImage(encoded_image, nullptr);
+ }
+
+ statistics_proxy_.reset();
+ EXPECT_METRIC_EQ(1, metrics::NumSamples("WebRTC.Video.SentWidthInPixels"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.SentWidthInPixels", kWidth));
+ EXPECT_METRIC_EQ(1, metrics::NumSamples("WebRTC.Video.SentHeightInPixels"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.SentHeightInPixels", kHeight));
+}
+
+TEST_F(SendStatisticsProxyTest, InputFpsHistogramIsUpdated) {
+ const int kFps = 20;
+ const int kMinPeriodicSamples = 6;
+ int frames = kMinPeriodicSamples * kFpsPeriodicIntervalMs * kFps / 1000;
+ for (int i = 0; i <= frames; ++i) {
+ fake_clock_.AdvanceTimeMilliseconds(1000 / kFps);
+ statistics_proxy_->OnIncomingFrame(kWidth, kHeight);
+ }
+ statistics_proxy_.reset();
+ EXPECT_METRIC_EQ(1, metrics::NumSamples("WebRTC.Video.InputFramesPerSecond"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.InputFramesPerSecond", kFps));
+}
+
+TEST_F(SendStatisticsProxyTest, SentFpsHistogramIsUpdated) {
+ EncodedImage encoded_image;
+ const int kFps = 20;
+ const int kMinPeriodicSamples = 6;
+ int frames = kMinPeriodicSamples * kFpsPeriodicIntervalMs * kFps / 1000 + 1;
+ for (int i = 0; i < frames; ++i) {
+ fake_clock_.AdvanceTimeMilliseconds(1000 / kFps);
+ encoded_image.SetTimestamp(encoded_image.Timestamp() + 1);
+ statistics_proxy_->OnSendEncodedImage(encoded_image, nullptr);
+ // Frame with same timestamp should not be counted.
+ statistics_proxy_->OnSendEncodedImage(encoded_image, nullptr);
+ }
+ statistics_proxy_.reset();
+ EXPECT_METRIC_EQ(1, metrics::NumSamples("WebRTC.Video.SentFramesPerSecond"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.SentFramesPerSecond", kFps));
+}
+
+TEST_F(SendStatisticsProxyTest, InputFpsHistogramExcludesSuspendedTime) {
+ const int kFps = 20;
+ const int kSuspendTimeMs = 10000;
+ const int kMinPeriodicSamples = 6;
+ int frames = kMinPeriodicSamples * kFpsPeriodicIntervalMs * kFps / 1000;
+ for (int i = 0; i < frames; ++i) {
+ fake_clock_.AdvanceTimeMilliseconds(1000 / kFps);
+ statistics_proxy_->OnIncomingFrame(kWidth, kHeight);
+ }
+ // Suspend.
+ statistics_proxy_->OnSuspendChange(true);
+ fake_clock_.AdvanceTimeMilliseconds(kSuspendTimeMs);
+
+ for (int i = 0; i < frames; ++i) {
+ fake_clock_.AdvanceTimeMilliseconds(1000 / kFps);
+ statistics_proxy_->OnIncomingFrame(kWidth, kHeight);
+ }
+ // Suspended time interval should not affect the framerate.
+ statistics_proxy_.reset();
+ EXPECT_METRIC_EQ(1, metrics::NumSamples("WebRTC.Video.InputFramesPerSecond"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.InputFramesPerSecond", kFps));
+}
+
+TEST_F(SendStatisticsProxyTest, SentFpsHistogramExcludesSuspendedTime) {
+ EncodedImage encoded_image;
+ const int kFps = 20;
+ const int kSuspendTimeMs = 10000;
+ const int kMinPeriodicSamples = 6;
+ int frames = kMinPeriodicSamples * kFpsPeriodicIntervalMs * kFps / 1000;
+ for (int i = 0; i < frames; ++i) {
+ fake_clock_.AdvanceTimeMilliseconds(1000 / kFps);
+ encoded_image.SetTimestamp(i + 1);
+ statistics_proxy_->OnSendEncodedImage(encoded_image, nullptr);
+ }
+ // Suspend.
+ statistics_proxy_->OnSuspendChange(true);
+ fake_clock_.AdvanceTimeMilliseconds(kSuspendTimeMs);
+
+ for (int i = 0; i < frames; ++i) {
+ fake_clock_.AdvanceTimeMilliseconds(1000 / kFps);
+ encoded_image.SetTimestamp(i + 1);
+ statistics_proxy_->OnSendEncodedImage(encoded_image, nullptr);
+ }
+ // Suspended time interval should not affect the framerate.
+ statistics_proxy_.reset();
+ EXPECT_METRIC_EQ(1, metrics::NumSamples("WebRTC.Video.SentFramesPerSecond"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.SentFramesPerSecond", kFps));
+}
+
+TEST_F(SendStatisticsProxyTest, CpuLimitedHistogramNotUpdatedWhenDisabled) {
+ statistics_proxy_->UpdateAdaptationSettings(kResolutionScalingDisabled,
+ kResolutionScalingDisabled);
+
+ for (int i = 0; i < SendStatisticsProxy::kMinRequiredMetricsSamples; ++i)
+ statistics_proxy_->OnIncomingFrame(kWidth, kHeight);
+
+ statistics_proxy_.reset();
+ EXPECT_METRIC_EQ(
+ 0, metrics::NumSamples("WebRTC.Video.CpuLimitedResolutionInPercent"));
+}
+
+TEST_F(SendStatisticsProxyTest, CpuLimitedHistogramUpdated) {
+ VideoAdaptationCounters cpu_counts;
+ VideoAdaptationCounters quality_counts;
+ cpu_counts.resolution_adaptations = 0;
+ statistics_proxy_->UpdateAdaptationSettings(kScalingEnabled, kScalingEnabled);
+
+ for (int i = 0; i < SendStatisticsProxy::kMinRequiredMetricsSamples; ++i)
+ statistics_proxy_->OnIncomingFrame(kWidth, kHeight);
+
+ cpu_counts.resolution_adaptations = 1;
+ statistics_proxy_->OnAdaptationChanged(VideoAdaptationReason::kCpu,
+ cpu_counts, quality_counts);
+
+ for (int i = 0; i < SendStatisticsProxy::kMinRequiredMetricsSamples; ++i)
+ statistics_proxy_->OnIncomingFrame(kWidth, kHeight);
+
+ statistics_proxy_.reset();
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.CpuLimitedResolutionInPercent"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.CpuLimitedResolutionInPercent", 50));
+}
+
+TEST_F(SendStatisticsProxyTest, LifetimeHistogramIsUpdated) {
+ const int64_t kTimeSec = 3;
+ fake_clock_.AdvanceTimeMilliseconds(kTimeSec * 1000);
+ statistics_proxy_.reset();
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.SendStreamLifetimeInSeconds"));
+ EXPECT_METRIC_EQ(
+ 1,
+ metrics::NumEvents("WebRTC.Video.SendStreamLifetimeInSeconds", kTimeSec));
+}
+
+TEST_F(SendStatisticsProxyTest, CodecTypeHistogramIsUpdated) {
+ fake_clock_.AdvanceTimeMilliseconds(metrics::kMinRunTimeInSeconds * 1000);
+ statistics_proxy_.reset();
+ EXPECT_METRIC_EQ(1, metrics::NumSamples("WebRTC.Video.Encoder.CodecType"));
+}
+
+TEST_F(SendStatisticsProxyTest, PauseEventHistogramIsUpdated) {
+ // First RTP packet sent.
+ UpdateDataCounters(kFirstSsrc);
+
+ // Min runtime has passed.
+ fake_clock_.AdvanceTimeMilliseconds(metrics::kMinRunTimeInSeconds * 1000);
+ statistics_proxy_.reset();
+ EXPECT_METRIC_EQ(1, metrics::NumSamples("WebRTC.Video.NumberOfPauseEvents"));
+ EXPECT_METRIC_EQ(1,
+ metrics::NumEvents("WebRTC.Video.NumberOfPauseEvents", 0));
+}
+
+TEST_F(SendStatisticsProxyTest,
+ PauseEventHistogramIsNotUpdatedIfMinRuntimeHasNotPassed) {
+ // First RTP packet sent.
+ UpdateDataCounters(kFirstSsrc);
+
+ // Min runtime has not passed.
+ fake_clock_.AdvanceTimeMilliseconds(metrics::kMinRunTimeInSeconds * 1000 - 1);
+ statistics_proxy_.reset();
+ EXPECT_METRIC_EQ(0, metrics::NumSamples("WebRTC.Video.NumberOfPauseEvents"));
+ EXPECT_METRIC_EQ(0, metrics::NumSamples("WebRTC.Video.PausedTimeInPercent"));
+}
+
+TEST_F(SendStatisticsProxyTest,
+ PauseEventHistogramIsNotUpdatedIfNoMediaIsSent) {
+ // First RTP packet not sent.
+ fake_clock_.AdvanceTimeMilliseconds(metrics::kMinRunTimeInSeconds * 1000);
+ statistics_proxy_.reset();
+ EXPECT_METRIC_EQ(0, metrics::NumSamples("WebRTC.Video.NumberOfPauseEvents"));
+}
+
+TEST_F(SendStatisticsProxyTest, NoPauseEvent) {
+ // First RTP packet sent and min runtime passed.
+ UpdateDataCounters(kFirstSsrc);
+
+ // No change. Video: 10000 ms, paused: 0 ms (0%).
+ statistics_proxy_->OnSetEncoderTargetRate(50000);
+ fake_clock_.AdvanceTimeMilliseconds(metrics::kMinRunTimeInSeconds * 1000);
+ statistics_proxy_->OnSetEncoderTargetRate(0); // VideoSendStream::Stop
+
+ statistics_proxy_.reset();
+ EXPECT_METRIC_EQ(1, metrics::NumSamples("WebRTC.Video.NumberOfPauseEvents"));
+ EXPECT_METRIC_EQ(1,
+ metrics::NumEvents("WebRTC.Video.NumberOfPauseEvents", 0));
+ EXPECT_METRIC_EQ(1, metrics::NumSamples("WebRTC.Video.PausedTimeInPercent"));
+ EXPECT_METRIC_EQ(1,
+ metrics::NumEvents("WebRTC.Video.PausedTimeInPercent", 0));
+}
+
+TEST_F(SendStatisticsProxyTest, OnePauseEvent) {
+ // First RTP packet sent and min runtime passed.
+ UpdateDataCounters(kFirstSsrc);
+
+ // One change. Video: 7000 ms, paused: 3000 ms (30%).
+ statistics_proxy_->OnSetEncoderTargetRate(50000);
+ fake_clock_.AdvanceTimeMilliseconds(7000);
+ statistics_proxy_->OnSetEncoderTargetRate(0);
+ fake_clock_.AdvanceTimeMilliseconds(3000);
+ statistics_proxy_->OnSetEncoderTargetRate(0); // VideoSendStream::Stop
+
+ statistics_proxy_.reset();
+ EXPECT_METRIC_EQ(1, metrics::NumSamples("WebRTC.Video.NumberOfPauseEvents"));
+ EXPECT_METRIC_EQ(1,
+ metrics::NumEvents("WebRTC.Video.NumberOfPauseEvents", 1));
+ EXPECT_METRIC_EQ(1, metrics::NumSamples("WebRTC.Video.PausedTimeInPercent"));
+ EXPECT_METRIC_EQ(1,
+ metrics::NumEvents("WebRTC.Video.PausedTimeInPercent", 30));
+}
+
+TEST_F(SendStatisticsProxyTest, TwoPauseEvents) {
+ // First RTP packet sent.
+ UpdateDataCounters(kFirstSsrc);
+
+ // Two changes. Video: 19000 ms, paused: 1000 ms (5%).
+ statistics_proxy_->OnSetEncoderTargetRate(0);
+ fake_clock_.AdvanceTimeMilliseconds(1000);
+ statistics_proxy_->OnSetEncoderTargetRate(50000); // Starts on bitrate > 0.
+ fake_clock_.AdvanceTimeMilliseconds(7000);
+ statistics_proxy_->OnSetEncoderTargetRate(60000);
+ fake_clock_.AdvanceTimeMilliseconds(3000);
+ statistics_proxy_->OnSetEncoderTargetRate(0);
+ fake_clock_.AdvanceTimeMilliseconds(250);
+ statistics_proxy_->OnSetEncoderTargetRate(0);
+ fake_clock_.AdvanceTimeMilliseconds(750);
+ statistics_proxy_->OnSetEncoderTargetRate(60000);
+ fake_clock_.AdvanceTimeMilliseconds(5000);
+ statistics_proxy_->OnSetEncoderTargetRate(50000);
+ fake_clock_.AdvanceTimeMilliseconds(4000);
+ statistics_proxy_->OnSetEncoderTargetRate(0); // VideoSendStream::Stop
+
+ statistics_proxy_.reset();
+ EXPECT_METRIC_EQ(1, metrics::NumSamples("WebRTC.Video.NumberOfPauseEvents"));
+ EXPECT_METRIC_EQ(1,
+ metrics::NumEvents("WebRTC.Video.NumberOfPauseEvents", 2));
+ EXPECT_METRIC_EQ(1, metrics::NumSamples("WebRTC.Video.PausedTimeInPercent"));
+ EXPECT_METRIC_EQ(1,
+ metrics::NumEvents("WebRTC.Video.PausedTimeInPercent", 5));
+}
+
+TEST_F(SendStatisticsProxyTest,
+ PausedTimeHistogramIsNotUpdatedIfMinRuntimeHasNotPassed) {
+ // First RTP packet sent.
+ UpdateDataCounters(kFirstSsrc);
+ fake_clock_.AdvanceTimeMilliseconds(metrics::kMinRunTimeInSeconds * 1000);
+
+ // Min runtime has not passed.
+ statistics_proxy_->OnSetEncoderTargetRate(50000);
+ fake_clock_.AdvanceTimeMilliseconds(metrics::kMinRunTimeInSeconds * 1000 - 1);
+ statistics_proxy_->OnSetEncoderTargetRate(0); // VideoSendStream::Stop
+
+ statistics_proxy_.reset();
+ EXPECT_METRIC_EQ(0, metrics::NumSamples("WebRTC.Video.PausedTimeInPercent"));
+}
+
+TEST_F(SendStatisticsProxyTest, VerifyQpHistogramStats_Vp8) {
+ EncodedImage encoded_image;
+ CodecSpecificInfo codec_info;
+ codec_info.codecType = kVideoCodecVP8;
+
+ for (int i = 0; i < SendStatisticsProxy::kMinRequiredMetricsSamples; ++i) {
+ encoded_image.SetSpatialIndex(0);
+ encoded_image.qp_ = kQpIdx0;
+ statistics_proxy_->OnSendEncodedImage(encoded_image, &codec_info);
+ encoded_image.SetSpatialIndex(1);
+ encoded_image.qp_ = kQpIdx1;
+ statistics_proxy_->OnSendEncodedImage(encoded_image, &codec_info);
+ }
+ statistics_proxy_.reset();
+ EXPECT_METRIC_EQ(1, metrics::NumSamples("WebRTC.Video.Encoded.Qp.Vp8.S0"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.Encoded.Qp.Vp8.S0", kQpIdx0));
+ EXPECT_METRIC_EQ(1, metrics::NumSamples("WebRTC.Video.Encoded.Qp.Vp8.S1"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.Encoded.Qp.Vp8.S1", kQpIdx1));
+}
+
+TEST_F(SendStatisticsProxyTest, VerifyQpHistogramStats_Vp8OneSsrc) {
+ test::ScopedKeyValueConfig field_trials;
+ VideoSendStream::Config config(nullptr);
+ config.rtp.ssrcs.push_back(kFirstSsrc);
+ statistics_proxy_.reset(new SendStatisticsProxy(
+ &fake_clock_, config, VideoEncoderConfig::ContentType::kRealtimeVideo,
+ field_trials));
+
+ EncodedImage encoded_image;
+ CodecSpecificInfo codec_info;
+ codec_info.codecType = kVideoCodecVP8;
+
+ for (int i = 0; i < SendStatisticsProxy::kMinRequiredMetricsSamples; ++i) {
+ encoded_image.SetSpatialIndex(0);
+ encoded_image.qp_ = kQpIdx0;
+ statistics_proxy_->OnSendEncodedImage(encoded_image, &codec_info);
+ }
+ statistics_proxy_.reset();
+ EXPECT_METRIC_EQ(1, metrics::NumSamples("WebRTC.Video.Encoded.Qp.Vp8"));
+ EXPECT_METRIC_EQ(1,
+ metrics::NumEvents("WebRTC.Video.Encoded.Qp.Vp8", kQpIdx0));
+}
+
+TEST_F(SendStatisticsProxyTest, VerifyQpHistogramStats_Vp9) {
+ EncodedImage encoded_image;
+ CodecSpecificInfo codec_info;
+ codec_info.codecType = kVideoCodecVP9;
+ codec_info.codecSpecific.VP9.num_spatial_layers = 2;
+
+ for (int i = 0; i < SendStatisticsProxy::kMinRequiredMetricsSamples; ++i) {
+ encoded_image.qp_ = kQpIdx0;
+ encoded_image.SetSpatialIndex(0);
+ statistics_proxy_->OnSendEncodedImage(encoded_image, &codec_info);
+ encoded_image.qp_ = kQpIdx1;
+ encoded_image.SetSpatialIndex(1);
+ statistics_proxy_->OnSendEncodedImage(encoded_image, &codec_info);
+ }
+ statistics_proxy_.reset();
+ EXPECT_METRIC_EQ(1, metrics::NumSamples("WebRTC.Video.Encoded.Qp.Vp9.S0"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.Encoded.Qp.Vp9.S0", kQpIdx0));
+ EXPECT_METRIC_EQ(1, metrics::NumSamples("WebRTC.Video.Encoded.Qp.Vp9.S1"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.Encoded.Qp.Vp9.S1", kQpIdx1));
+}
+
+TEST_F(SendStatisticsProxyTest, VerifyQpHistogramStats_Vp9OneSpatialLayer) {
+ test::ScopedKeyValueConfig field_trials;
+ VideoSendStream::Config config(nullptr);
+ config.rtp.ssrcs.push_back(kFirstSsrc);
+ statistics_proxy_.reset(new SendStatisticsProxy(
+ &fake_clock_, config, VideoEncoderConfig::ContentType::kRealtimeVideo,
+ field_trials));
+
+ EncodedImage encoded_image;
+ CodecSpecificInfo codec_info;
+ codec_info.codecType = kVideoCodecVP9;
+ codec_info.codecSpecific.VP9.num_spatial_layers = 1;
+
+ for (int i = 0; i < SendStatisticsProxy::kMinRequiredMetricsSamples; ++i) {
+ encoded_image.qp_ = kQpIdx0;
+ statistics_proxy_->OnSendEncodedImage(encoded_image, &codec_info);
+ }
+ statistics_proxy_.reset();
+ EXPECT_METRIC_EQ(1, metrics::NumSamples("WebRTC.Video.Encoded.Qp.Vp9"));
+ EXPECT_METRIC_EQ(1,
+ metrics::NumEvents("WebRTC.Video.Encoded.Qp.Vp9", kQpIdx0));
+}
+
+TEST_F(SendStatisticsProxyTest, VerifyQpHistogramStats_H264) {
+ EncodedImage encoded_image;
+ CodecSpecificInfo codec_info;
+ codec_info.codecType = kVideoCodecH264;
+
+ for (int i = 0; i < SendStatisticsProxy::kMinRequiredMetricsSamples; ++i) {
+ encoded_image.SetSpatialIndex(0);
+ encoded_image.qp_ = kQpIdx0;
+ statistics_proxy_->OnSendEncodedImage(encoded_image, &codec_info);
+ encoded_image.SetSpatialIndex(1);
+ encoded_image.qp_ = kQpIdx1;
+ statistics_proxy_->OnSendEncodedImage(encoded_image, &codec_info);
+ }
+ statistics_proxy_.reset();
+ EXPECT_METRIC_EQ(1, metrics::NumSamples("WebRTC.Video.Encoded.Qp.H264.S0"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.Encoded.Qp.H264.S0", kQpIdx0));
+ EXPECT_METRIC_EQ(1, metrics::NumSamples("WebRTC.Video.Encoded.Qp.H264.S1"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.Encoded.Qp.H264.S1", kQpIdx1));
+}
+
+TEST_F(SendStatisticsProxyTest,
+ BandwidthLimitedHistogramsNotUpdatedForOneStream) {
+ // Configure one stream.
+ VideoEncoderConfig config;
+ config.content_type = VideoEncoderConfig::ContentType::kRealtimeVideo;
+ VideoStream stream1;
+ stream1.width = kWidth;
+ stream1.height = kHeight;
+ statistics_proxy_->OnEncoderReconfigured(config, {stream1});
+
+ const int64_t kMaxEncodedFrameWindowMs = 800;
+ const int kFps = 20;
+ const int kNumFramesPerWindow = kFps * kMaxEncodedFrameWindowMs / 1000;
+ const int kMinSamples = // Sample added when removed from EncodedFrameMap.
+ SendStatisticsProxy::kMinRequiredMetricsSamples + kNumFramesPerWindow;
+
+ // Stream encoded.
+ EncodedImage encoded_image;
+ encoded_image._encodedWidth = kWidth;
+ encoded_image._encodedHeight = kHeight;
+ for (int i = 0; i < kMinSamples; ++i) {
+ fake_clock_.AdvanceTimeMilliseconds(1000 / kFps);
+ encoded_image.SetTimestamp(encoded_image.Timestamp() +
+ (kRtpClockRateHz / kFps));
+ statistics_proxy_->OnSendEncodedImage(encoded_image, nullptr);
+ }
+
+ // Histograms are updated when the statistics_proxy_ is deleted.
+ statistics_proxy_.reset();
+ EXPECT_METRIC_EQ(0, metrics::NumSamples(
+ "WebRTC.Video.BandwidthLimitedResolutionInPercent"));
+ EXPECT_METRIC_EQ(0, metrics::NumSamples(
+ "WebRTC.Video.BandwidthLimitedResolutionsDisabled"));
+}
+
+TEST_F(SendStatisticsProxyTest,
+ BandwidthLimitedHistogramsUpdatedForTwoStreams_NoResolutionDisabled) {
+ // Configure two streams.
+ VideoEncoderConfig config;
+ config.content_type = VideoEncoderConfig::ContentType::kRealtimeVideo;
+ VideoStream stream1;
+ stream1.width = kWidth / 2;
+ stream1.height = kHeight / 2;
+ VideoStream stream2;
+ stream2.width = kWidth;
+ stream2.height = kHeight;
+ statistics_proxy_->OnEncoderReconfigured(config, {stream1, stream2});
+
+ const int64_t kMaxEncodedFrameWindowMs = 800;
+ const int kFps = 20;
+ const int kNumFramesPerWindow = kFps * kMaxEncodedFrameWindowMs / 1000;
+ const int kMinSamples = // Sample added when removed from EncodedFrameMap.
+ SendStatisticsProxy::kMinRequiredMetricsSamples + kNumFramesPerWindow;
+
+ // Two streams encoded.
+ EncodedImage encoded_image;
+ for (int i = 0; i < kMinSamples; ++i) {
+ fake_clock_.AdvanceTimeMilliseconds(1000 / kFps);
+ encoded_image.SetTimestamp(encoded_image.Timestamp() +
+ (kRtpClockRateHz / kFps));
+ encoded_image._encodedWidth = kWidth;
+ encoded_image._encodedHeight = kHeight;
+ statistics_proxy_->OnSendEncodedImage(encoded_image, nullptr);
+ encoded_image._encodedWidth = kWidth / 2;
+ encoded_image._encodedHeight = kHeight / 2;
+ statistics_proxy_->OnSendEncodedImage(encoded_image, nullptr);
+ }
+
+ // Histograms are updated when the statistics_proxy_ is deleted.
+ statistics_proxy_.reset();
+ EXPECT_METRIC_EQ(1, metrics::NumSamples(
+ "WebRTC.Video.BandwidthLimitedResolutionInPercent"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.BandwidthLimitedResolutionInPercent",
+ 0));
+ // No resolution disabled.
+ EXPECT_METRIC_EQ(0, metrics::NumSamples(
+ "WebRTC.Video.BandwidthLimitedResolutionsDisabled"));
+}
+
+TEST_F(SendStatisticsProxyTest,
+ BandwidthLimitedHistogramsUpdatedForTwoStreams_OneResolutionDisabled) {
+ // Configure two streams.
+ VideoEncoderConfig config;
+ config.content_type = VideoEncoderConfig::ContentType::kRealtimeVideo;
+ VideoStream stream1;
+ stream1.width = kWidth / 2;
+ stream1.height = kHeight / 2;
+ VideoStream stream2;
+ stream2.width = kWidth;
+ stream2.height = kHeight;
+ statistics_proxy_->OnEncoderReconfigured(config, {stream1, stream2});
+
+ const int64_t kMaxEncodedFrameWindowMs = 800;
+ const int kFps = 20;
+ const int kNumFramesPerWindow = kFps * kMaxEncodedFrameWindowMs / 1000;
+ const int kMinSamples = // Sample added when removed from EncodedFrameMap.
+ SendStatisticsProxy::kMinRequiredMetricsSamples + kNumFramesPerWindow;
+
+ // One stream encoded.
+ EncodedImage encoded_image;
+ encoded_image._encodedWidth = kWidth / 2;
+ encoded_image._encodedHeight = kHeight / 2;
+ for (int i = 0; i < kMinSamples; ++i) {
+ fake_clock_.AdvanceTimeMilliseconds(1000 / kFps);
+ encoded_image.SetTimestamp(encoded_image.Timestamp() +
+ (kRtpClockRateHz / kFps));
+ statistics_proxy_->OnSendEncodedImage(encoded_image, nullptr);
+ }
+
+ // Histograms are updated when the statistics_proxy_ is deleted.
+ statistics_proxy_.reset();
+ EXPECT_METRIC_EQ(1, metrics::NumSamples(
+ "WebRTC.Video.BandwidthLimitedResolutionInPercent"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.BandwidthLimitedResolutionInPercent",
+ 100));
+ // One resolution disabled.
+ EXPECT_METRIC_EQ(1, metrics::NumSamples(
+ "WebRTC.Video.BandwidthLimitedResolutionsDisabled"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.BandwidthLimitedResolutionsDisabled",
+ 1));
+}
+
+TEST_F(SendStatisticsProxyTest,
+ QualityLimitedHistogramsNotUpdatedWhenDisabled) {
+ statistics_proxy_->UpdateAdaptationSettings(kFramerateScalingDisabled,
+ kScalingDisabled);
+ EncodedImage encoded_image;
+ encoded_image.SetSpatialIndex(0);
+ for (int i = 0; i < SendStatisticsProxy::kMinRequiredMetricsSamples; ++i)
+ statistics_proxy_->OnSendEncodedImage(encoded_image, &kDefaultCodecInfo);
+
+ // Histograms are updated when the statistics_proxy_ is deleted.
+ statistics_proxy_.reset();
+ EXPECT_METRIC_EQ(
+ 0, metrics::NumSamples("WebRTC.Video.QualityLimitedResolutionInPercent"));
+ EXPECT_METRIC_EQ(0, metrics::NumSamples(
+ "WebRTC.Video.QualityLimitedResolutionDownscales"));
+}
+
+TEST_F(SendStatisticsProxyTest,
+ QualityLimitedHistogramsUpdatedWhenEnabled_NoResolutionDownscale) {
+ statistics_proxy_->UpdateAdaptationSettings(kScalingEnabled, kScalingEnabled);
+ EncodedImage encoded_image;
+ encoded_image.SetSpatialIndex(0);
+ for (int i = 0; i < SendStatisticsProxy::kMinRequiredMetricsSamples; ++i)
+ statistics_proxy_->OnSendEncodedImage(encoded_image, &kDefaultCodecInfo);
+
+ // Histograms are updated when the statistics_proxy_ is deleted.
+ statistics_proxy_.reset();
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.QualityLimitedResolutionInPercent"));
+ EXPECT_METRIC_EQ(1, metrics::NumEvents(
+ "WebRTC.Video.QualityLimitedResolutionInPercent", 0));
+ // No resolution downscale.
+ EXPECT_METRIC_EQ(0, metrics::NumSamples(
+ "WebRTC.Video.QualityLimitedResolutionDownscales"));
+}
+
+TEST_F(SendStatisticsProxyTest,
+ QualityLimitedHistogramsUpdatedWhenEnabled_TwoResolutionDownscales) {
+ const int kDownscales = 2;
+ VideoAdaptationCounters cpu_counts;
+ VideoAdaptationCounters quality_counts;
+ quality_counts.resolution_adaptations = kDownscales;
+ statistics_proxy_->UpdateAdaptationSettings(kScalingEnabled, kScalingEnabled);
+ statistics_proxy_->OnAdaptationChanged(VideoAdaptationReason::kQuality,
+ cpu_counts, quality_counts);
+ EncodedImage encoded_image;
+ encoded_image.SetSpatialIndex(0);
+ for (int i = 0; i < SendStatisticsProxy::kMinRequiredMetricsSamples; ++i)
+ statistics_proxy_->OnSendEncodedImage(encoded_image, &kDefaultCodecInfo);
+ // Histograms are updated when the statistics_proxy_ is deleted.
+ statistics_proxy_.reset();
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.QualityLimitedResolutionInPercent"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.QualityLimitedResolutionInPercent",
+ 100));
+ // Resolution downscales.
+ EXPECT_METRIC_EQ(1, metrics::NumSamples(
+ "WebRTC.Video.QualityLimitedResolutionDownscales"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.QualityLimitedResolutionDownscales",
+ kDownscales));
+}
+
+TEST_F(SendStatisticsProxyTest, GetStatsReportsBandwidthLimitedResolution) {
+ // Initially false.
+ EXPECT_FALSE(statistics_proxy_->GetStats().bw_limited_resolution);
+
+ // Configure two streams.
+ VideoEncoderConfig config;
+ config.content_type = VideoEncoderConfig::ContentType::kRealtimeVideo;
+ config.number_of_streams = 2;
+ VideoStream stream1;
+ stream1.width = kWidth / 2;
+ stream1.height = kHeight / 2;
+ VideoStream stream2;
+ stream2.width = kWidth;
+ stream2.height = kHeight;
+ statistics_proxy_->OnEncoderReconfigured(config, {stream1, stream2});
+
+ // One stream encoded.
+ EncodedImage encoded_image;
+ encoded_image._encodedWidth = kWidth / 2;
+ encoded_image._encodedHeight = kHeight / 2;
+
+ // Resolution scaled due to quality.
+ VideoAdaptationCounters cpu_counts;
+ VideoAdaptationCounters quality_counts;
+ quality_counts.resolution_adaptations = 1;
+ statistics_proxy_->UpdateAdaptationSettings(kFramerateScalingDisabled,
+ kFramerateScalingDisabled);
+ statistics_proxy_->OnAdaptationChanged(VideoAdaptationReason::kQuality,
+ cpu_counts, quality_counts);
+ statistics_proxy_->OnSendEncodedImage(encoded_image, nullptr);
+ EXPECT_TRUE(statistics_proxy_->GetStats().bw_limited_resolution);
+
+ // Adapt up.
+ quality_counts.resolution_adaptations = 0;
+ statistics_proxy_->OnAdaptationChanged(VideoAdaptationReason::kQuality,
+ cpu_counts, quality_counts);
+ statistics_proxy_->OnSendEncodedImage(encoded_image, nullptr);
+ EXPECT_FALSE(statistics_proxy_->GetStats().bw_limited_resolution);
+
+ // Bw disabled one layer.
+ VideoCodec codec;
+ codec.numberOfSimulcastStreams = 2;
+ codec.simulcastStream[0].active = true;
+ codec.simulcastStream[1].active = true;
+ VideoBitrateAllocation allocation;
+ // Some positive bitrate only on the second stream.
+ allocation.SetBitrate(1, 0, 10000);
+ allocation.set_bw_limited(true);
+ statistics_proxy_->OnBitrateAllocationUpdated(codec, allocation);
+ EXPECT_TRUE(statistics_proxy_->GetStats().bw_limited_resolution);
+
+ // Revert for the next test.
+ allocation.set_bw_limited(false);
+ statistics_proxy_->OnBitrateAllocationUpdated(codec, allocation);
+ EXPECT_FALSE(statistics_proxy_->GetStats().bw_limited_resolution);
+
+ // Internal encoder scaler reduced resolution.
+ statistics_proxy_->OnEncoderInternalScalerUpdate(/*scaled=*/true);
+ EXPECT_TRUE(statistics_proxy_->GetStats().bw_limited_resolution);
+}
+
+TEST_F(SendStatisticsProxyTest, GetStatsReportsTargetMediaBitrate) {
+ // Initially zero.
+ EXPECT_EQ(0, statistics_proxy_->GetStats().target_media_bitrate_bps);
+
+ const int kBitrate = 100000;
+ statistics_proxy_->OnSetEncoderTargetRate(kBitrate);
+ EXPECT_EQ(kBitrate, statistics_proxy_->GetStats().target_media_bitrate_bps);
+
+ statistics_proxy_->OnSetEncoderTargetRate(0);
+ EXPECT_EQ(0, statistics_proxy_->GetStats().target_media_bitrate_bps);
+}
+
+TEST_F(SendStatisticsProxyTest, NoSubstreams) {
+ uint32_t excluded_ssrc =
+ std::max(*absl::c_max_element(config_.rtp.ssrcs),
+ *absl::c_max_element(config_.rtp.rtx.ssrcs)) +
+ 1;
+ // From ReportBlockDataObserver.
+ ReportBlockDataObserver* rtcp_callback = statistics_proxy_.get();
+ RTCPReportBlock report_block;
+ report_block.source_ssrc = excluded_ssrc;
+ ReportBlockData data;
+ data.SetReportBlock(report_block, 0);
+ rtcp_callback->OnReportBlockDataUpdated(data);
+
+ // From BitrateStatisticsObserver.
+ uint32_t total = 0;
+ uint32_t retransmit = 0;
+ BitrateStatisticsObserver* bitrate_observer = statistics_proxy_.get();
+ bitrate_observer->Notify(total, retransmit, excluded_ssrc);
+
+ // From FrameCountObserver.
+ FrameCountObserver* fps_observer = statistics_proxy_.get();
+ FrameCounts frame_counts;
+ frame_counts.key_frames = 1;
+ fps_observer->FrameCountUpdated(frame_counts, excluded_ssrc);
+
+ VideoSendStream::Stats stats = statistics_proxy_->GetStats();
+ EXPECT_TRUE(stats.substreams.empty());
+}
+
+TEST_F(SendStatisticsProxyTest, EncodedResolutionTimesOut) {
+ static const int kEncodedWidth = 123;
+ static const int kEncodedHeight = 81;
+ EncodedImage encoded_image;
+ encoded_image._encodedWidth = kEncodedWidth;
+ encoded_image._encodedHeight = kEncodedHeight;
+ encoded_image.SetSpatialIndex(0);
+
+ CodecSpecificInfo codec_info;
+ codec_info.codecType = kVideoCodecVP8;
+
+ statistics_proxy_->OnSendEncodedImage(encoded_image, &codec_info);
+ encoded_image.SetSpatialIndex(1);
+ statistics_proxy_->OnSendEncodedImage(encoded_image, &codec_info);
+
+ VideoSendStream::Stats stats = statistics_proxy_->GetStats();
+ EXPECT_EQ(kEncodedWidth, stats.substreams[config_.rtp.ssrcs[0]].width);
+ EXPECT_EQ(kEncodedHeight, stats.substreams[config_.rtp.ssrcs[0]].height);
+ EXPECT_EQ(kEncodedWidth, stats.substreams[config_.rtp.ssrcs[1]].width);
+ EXPECT_EQ(kEncodedHeight, stats.substreams[config_.rtp.ssrcs[1]].height);
+
+ // Forward almost to timeout, this should not have removed stats.
+ fake_clock_.AdvanceTimeMilliseconds(SendStatisticsProxy::kStatsTimeoutMs - 1);
+ stats = statistics_proxy_->GetStats();
+ EXPECT_EQ(kEncodedWidth, stats.substreams[config_.rtp.ssrcs[0]].width);
+ EXPECT_EQ(kEncodedHeight, stats.substreams[config_.rtp.ssrcs[0]].height);
+
+ // Update the first SSRC with bogus RTCP stats to make sure that encoded
+ // resolution still times out (no global timeout for all stats).
+ ReportBlockDataObserver* rtcp_callback = statistics_proxy_.get();
+ RTCPReportBlock report_block;
+ report_block.source_ssrc = config_.rtp.ssrcs[0];
+ ReportBlockData data;
+ data.SetReportBlock(report_block, 0);
+ rtcp_callback->OnReportBlockDataUpdated(data);
+
+ // Report stats for second SSRC to make sure it's not outdated along with the
+ // first SSRC.
+ statistics_proxy_->OnSendEncodedImage(encoded_image, &codec_info);
+
+ // Forward 1 ms, reach timeout, substream 0 should have no resolution
+ // reported, but substream 1 should.
+ fake_clock_.AdvanceTimeMilliseconds(1);
+ stats = statistics_proxy_->GetStats();
+ EXPECT_EQ(0, stats.substreams[config_.rtp.ssrcs[0]].width);
+ EXPECT_EQ(0, stats.substreams[config_.rtp.ssrcs[0]].height);
+ EXPECT_EQ(kEncodedWidth, stats.substreams[config_.rtp.ssrcs[1]].width);
+ EXPECT_EQ(kEncodedHeight, stats.substreams[config_.rtp.ssrcs[1]].height);
+}
+
+TEST_F(SendStatisticsProxyTest, ClearsResolutionFromInactiveSsrcs) {
+ static const int kEncodedWidth = 123;
+ static const int kEncodedHeight = 81;
+ EncodedImage encoded_image;
+ encoded_image._encodedWidth = kEncodedWidth;
+ encoded_image._encodedHeight = kEncodedHeight;
+ encoded_image.SetSpatialIndex(0);
+
+ CodecSpecificInfo codec_info;
+ codec_info.codecType = kVideoCodecVP8;
+
+ statistics_proxy_->OnSendEncodedImage(encoded_image, &codec_info);
+ encoded_image.SetSpatialIndex(1);
+ statistics_proxy_->OnSendEncodedImage(encoded_image, &codec_info);
+
+ statistics_proxy_->OnInactiveSsrc(config_.rtp.ssrcs[1]);
+ VideoSendStream::Stats stats = statistics_proxy_->GetStats();
+ EXPECT_EQ(kEncodedWidth, stats.substreams[config_.rtp.ssrcs[0]].width);
+ EXPECT_EQ(kEncodedHeight, stats.substreams[config_.rtp.ssrcs[0]].height);
+ EXPECT_EQ(0, stats.substreams[config_.rtp.ssrcs[1]].width);
+ EXPECT_EQ(0, stats.substreams[config_.rtp.ssrcs[1]].height);
+}
+
+TEST_F(SendStatisticsProxyTest, ClearsBitratesFromInactiveSsrcs) {
+ uint32_t bitrate = 42;
+ BitrateStatisticsObserver* observer = statistics_proxy_.get();
+ observer->Notify(bitrate, bitrate, config_.rtp.ssrcs[0]);
+ observer->Notify(bitrate, bitrate, config_.rtp.ssrcs[1]);
+
+ statistics_proxy_->OnInactiveSsrc(config_.rtp.ssrcs[1]);
+
+ VideoSendStream::Stats stats = statistics_proxy_->GetStats();
+ EXPECT_EQ(static_cast<int>(bitrate),
+ stats.substreams[config_.rtp.ssrcs[0]].total_bitrate_bps);
+ EXPECT_EQ(static_cast<int>(bitrate),
+ stats.substreams[config_.rtp.ssrcs[0]].retransmit_bitrate_bps);
+ EXPECT_EQ(0, stats.substreams[config_.rtp.ssrcs[1]].total_bitrate_bps);
+ EXPECT_EQ(0, stats.substreams[config_.rtp.ssrcs[1]].retransmit_bitrate_bps);
+}
+
+TEST_F(SendStatisticsProxyTest, ResetsRtcpCountersOnContentChange) {
+ RtcpPacketTypeCounterObserver* proxy =
+ static_cast<RtcpPacketTypeCounterObserver*>(statistics_proxy_.get());
+ RtcpPacketTypeCounter counters;
+ proxy->RtcpPacketTypesCounterUpdated(kFirstSsrc, counters);
+ proxy->RtcpPacketTypesCounterUpdated(kSecondSsrc, counters);
+
+ fake_clock_.AdvanceTimeMilliseconds(1000 * metrics::kMinRunTimeInSeconds);
+
+ counters.nack_packets += 1 * metrics::kMinRunTimeInSeconds;
+ counters.fir_packets += 2 * metrics::kMinRunTimeInSeconds;
+ counters.pli_packets += 3 * metrics::kMinRunTimeInSeconds;
+ counters.unique_nack_requests += 4 * metrics::kMinRunTimeInSeconds;
+ counters.nack_requests += 5 * metrics::kMinRunTimeInSeconds;
+
+ proxy->RtcpPacketTypesCounterUpdated(kFirstSsrc, counters);
+ proxy->RtcpPacketTypesCounterUpdated(kSecondSsrc, counters);
+
+ // Changing content type causes histograms to be reported.
+ VideoEncoderConfig config;
+ config.content_type = VideoEncoderConfig::ContentType::kScreen;
+ statistics_proxy_->OnEncoderReconfigured(config, {});
+
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.NackPacketsReceivedPerMinute"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.FirPacketsReceivedPerMinute"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.PliPacketsReceivedPerMinute"));
+ EXPECT_METRIC_EQ(1, metrics::NumSamples(
+ "WebRTC.Video.UniqueNackRequestsReceivedInPercent"));
+
+ const int kRate = 60 * 2; // Packets per minute with two streams.
+
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.NackPacketsReceivedPerMinute",
+ 1 * kRate));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.FirPacketsReceivedPerMinute",
+ 2 * kRate));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.PliPacketsReceivedPerMinute",
+ 3 * kRate));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.UniqueNackRequestsReceivedInPercent",
+ 4 * 100 / 5));
+
+ // New start time but same counter values.
+ proxy->RtcpPacketTypesCounterUpdated(kFirstSsrc, counters);
+ proxy->RtcpPacketTypesCounterUpdated(kSecondSsrc, counters);
+
+ fake_clock_.AdvanceTimeMilliseconds(1000 * metrics::kMinRunTimeInSeconds);
+
+ counters.nack_packets += 1 * metrics::kMinRunTimeInSeconds;
+ counters.fir_packets += 2 * metrics::kMinRunTimeInSeconds;
+ counters.pli_packets += 3 * metrics::kMinRunTimeInSeconds;
+ counters.unique_nack_requests += 4 * metrics::kMinRunTimeInSeconds;
+ counters.nack_requests += 5 * metrics::kMinRunTimeInSeconds;
+
+ proxy->RtcpPacketTypesCounterUpdated(kFirstSsrc, counters);
+ proxy->RtcpPacketTypesCounterUpdated(kSecondSsrc, counters);
+
+ SetUp(); // Reset stats proxy also causes histograms to be reported.
+
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples(
+ "WebRTC.Video.Screenshare.NackPacketsReceivedPerMinute"));
+ EXPECT_METRIC_EQ(1,
+ metrics::NumSamples(
+ "WebRTC.Video.Screenshare.FirPacketsReceivedPerMinute"));
+ EXPECT_METRIC_EQ(1,
+ metrics::NumSamples(
+ "WebRTC.Video.Screenshare.PliPacketsReceivedPerMinute"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples(
+ "WebRTC.Video.Screenshare.UniqueNackRequestsReceivedInPercent"));
+
+ EXPECT_METRIC_EQ(
+ 1,
+ metrics::NumEvents(
+ "WebRTC.Video.Screenshare.NackPacketsReceivedPerMinute", 1 * kRate));
+ EXPECT_METRIC_EQ(
+ 1,
+ metrics::NumEvents("WebRTC.Video.Screenshare.FirPacketsReceivedPerMinute",
+ 2 * kRate));
+ EXPECT_METRIC_EQ(
+ 1,
+ metrics::NumEvents("WebRTC.Video.Screenshare.PliPacketsReceivedPerMinute",
+ 3 * kRate));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents(
+ "WebRTC.Video.Screenshare.UniqueNackRequestsReceivedInPercent",
+ 4 * 100 / 5));
+}
+
+TEST_F(SendStatisticsProxyTest, GetStatsReportsIsRtx) {
+ StreamDataCountersCallback* proxy =
+ static_cast<StreamDataCountersCallback*>(statistics_proxy_.get());
+ StreamDataCounters counters;
+ proxy->DataCountersUpdated(counters, kFirstSsrc);
+ proxy->DataCountersUpdated(counters, kFirstRtxSsrc);
+
+ EXPECT_NE(GetStreamStats(kFirstSsrc).type,
+ VideoSendStream::StreamStats::StreamType::kRtx);
+ EXPECT_EQ(GetStreamStats(kFirstSsrc).referenced_media_ssrc, absl::nullopt);
+ EXPECT_EQ(GetStreamStats(kFirstRtxSsrc).type,
+ VideoSendStream::StreamStats::StreamType::kRtx);
+ EXPECT_EQ(GetStreamStats(kFirstRtxSsrc).referenced_media_ssrc, kFirstSsrc);
+}
+
+TEST_F(SendStatisticsProxyTest, GetStatsReportsIsFlexFec) {
+ test::ScopedKeyValueConfig field_trials;
+ statistics_proxy_.reset(new SendStatisticsProxy(
+ &fake_clock_, GetTestConfigWithFlexFec(),
+ VideoEncoderConfig::ContentType::kRealtimeVideo, field_trials));
+
+ StreamDataCountersCallback* proxy =
+ static_cast<StreamDataCountersCallback*>(statistics_proxy_.get());
+ StreamDataCounters counters;
+ proxy->DataCountersUpdated(counters, kFirstSsrc);
+ proxy->DataCountersUpdated(counters, kFlexFecSsrc);
+
+ EXPECT_NE(GetStreamStats(kFirstSsrc).type,
+ VideoSendStream::StreamStats::StreamType::kFlexfec);
+ EXPECT_EQ(GetStreamStats(kFirstSsrc).referenced_media_ssrc, absl::nullopt);
+ EXPECT_EQ(GetStreamStats(kFlexFecSsrc).type,
+ VideoSendStream::StreamStats::StreamType::kFlexfec);
+ EXPECT_EQ(GetStreamStats(kFlexFecSsrc).referenced_media_ssrc, kFirstSsrc);
+}
+
+TEST_F(SendStatisticsProxyTest, SendBitratesAreReportedWithFlexFecEnabled) {
+ test::ScopedKeyValueConfig field_trials;
+ statistics_proxy_.reset(new SendStatisticsProxy(
+ &fake_clock_, GetTestConfigWithFlexFec(),
+ VideoEncoderConfig::ContentType::kRealtimeVideo, field_trials));
+
+ StreamDataCountersCallback* proxy =
+ static_cast<StreamDataCountersCallback*>(statistics_proxy_.get());
+ StreamDataCounters counters;
+ StreamDataCounters rtx_counters;
+
+ const int kMinRequiredPeriodSamples = 8;
+ const int kPeriodIntervalMs = 2000;
+ for (int i = 0; i < kMinRequiredPeriodSamples; ++i) {
+ counters.transmitted.packets += 20;
+ counters.transmitted.header_bytes += 500;
+ counters.transmitted.padding_bytes += 1000;
+ counters.transmitted.payload_bytes += 2000;
+ counters.retransmitted.packets += 2;
+ counters.retransmitted.header_bytes += 25;
+ counters.retransmitted.padding_bytes += 100;
+ counters.retransmitted.payload_bytes += 250;
+ counters.fec = counters.retransmitted;
+ rtx_counters.transmitted = counters.transmitted;
+ // Advance one interval and update counters.
+ fake_clock_.AdvanceTimeMilliseconds(kPeriodIntervalMs);
+ proxy->DataCountersUpdated(counters, kFirstSsrc);
+ proxy->DataCountersUpdated(counters, kSecondSsrc);
+ proxy->DataCountersUpdated(rtx_counters, kFirstRtxSsrc);
+ proxy->DataCountersUpdated(rtx_counters, kSecondRtxSsrc);
+ proxy->DataCountersUpdated(counters, kFlexFecSsrc);
+ }
+
+ statistics_proxy_.reset();
+ // Interval: 3500 bytes * 4 / 2 sec = 7000 bytes / sec = 56 kbps
+ EXPECT_METRIC_EQ(1, metrics::NumSamples("WebRTC.Video.BitrateSentInKbps"));
+ EXPECT_METRIC_EQ(1, metrics::NumEvents("WebRTC.Video.BitrateSentInKbps", 56));
+ // Interval: 3500 bytes * 2 / 2 sec = 3500 bytes / sec = 28 kbps
+ EXPECT_METRIC_EQ(1, metrics::NumSamples("WebRTC.Video.RtxBitrateSentInKbps"));
+ EXPECT_METRIC_EQ(1,
+ metrics::NumEvents("WebRTC.Video.RtxBitrateSentInKbps", 28));
+ // Interval: (2000 - 2 * 250) bytes / 2 sec = 1500 bytes / sec = 12 kbps
+ EXPECT_METRIC_EQ(1,
+ metrics::NumSamples("WebRTC.Video.MediaBitrateSentInKbps"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.MediaBitrateSentInKbps", 12));
+ // Interval: 1000 bytes * 4 / 2 sec = 2000 bytes / sec = 16 kbps
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.PaddingBitrateSentInKbps"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.PaddingBitrateSentInKbps", 16));
+ // Interval: 375 bytes * 2 / 2 sec = 375 bytes / sec = 3 kbps
+ EXPECT_METRIC_EQ(1, metrics::NumSamples("WebRTC.Video.FecBitrateSentInKbps"));
+ EXPECT_METRIC_EQ(1,
+ metrics::NumEvents("WebRTC.Video.FecBitrateSentInKbps", 3));
+ // Interval: 375 bytes * 2 / 2 sec = 375 bytes / sec = 3 kbps
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.RetransmittedBitrateSentInKbps"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.RetransmittedBitrateSentInKbps", 3));
+}
+
+TEST_F(SendStatisticsProxyTest, ResetsRtpCountersOnContentChange) {
+ StreamDataCountersCallback* proxy =
+ static_cast<StreamDataCountersCallback*>(statistics_proxy_.get());
+ StreamDataCounters counters;
+ StreamDataCounters rtx_counters;
+ counters.first_packet_time_ms = fake_clock_.TimeInMilliseconds();
+
+ const int kMinRequiredPeriodSamples = 8;
+ const int kPeriodIntervalMs = 2000;
+ for (int i = 0; i < kMinRequiredPeriodSamples; ++i) {
+ counters.transmitted.packets += 20;
+ counters.transmitted.header_bytes += 500;
+ counters.transmitted.padding_bytes += 1000;
+ counters.transmitted.payload_bytes += 2000;
+ counters.retransmitted.packets += 2;
+ counters.retransmitted.header_bytes += 25;
+ counters.retransmitted.padding_bytes += 100;
+ counters.retransmitted.payload_bytes += 250;
+ counters.fec = counters.retransmitted;
+ rtx_counters.transmitted = counters.transmitted;
+ // Advance one interval and update counters.
+ fake_clock_.AdvanceTimeMilliseconds(kPeriodIntervalMs);
+ proxy->DataCountersUpdated(counters, kFirstSsrc);
+ proxy->DataCountersUpdated(counters, kSecondSsrc);
+ proxy->DataCountersUpdated(rtx_counters, kFirstRtxSsrc);
+ proxy->DataCountersUpdated(rtx_counters, kSecondRtxSsrc);
+ }
+
+ // Changing content type causes histograms to be reported.
+ VideoEncoderConfig config;
+ config.content_type = VideoEncoderConfig::ContentType::kScreen;
+ statistics_proxy_->OnEncoderReconfigured(config, {});
+
+ // Interval: 3500 bytes * 4 / 2 sec = 7000 bytes / sec = 56 kbps
+ EXPECT_METRIC_EQ(1, metrics::NumSamples("WebRTC.Video.BitrateSentInKbps"));
+ EXPECT_METRIC_EQ(1, metrics::NumEvents("WebRTC.Video.BitrateSentInKbps", 56));
+ // Interval: 3500 bytes * 2 / 2 sec = 3500 bytes / sec = 28 kbps
+ EXPECT_METRIC_EQ(1, metrics::NumSamples("WebRTC.Video.RtxBitrateSentInKbps"));
+ EXPECT_METRIC_EQ(1,
+ metrics::NumEvents("WebRTC.Video.RtxBitrateSentInKbps", 28));
+ // Interval: (2000 - 2 * 250) bytes / 2 sec = 1500 bytes / sec = 12 kbps
+ EXPECT_METRIC_EQ(1,
+ metrics::NumSamples("WebRTC.Video.MediaBitrateSentInKbps"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.MediaBitrateSentInKbps", 12));
+ // Interval: 1000 bytes * 4 / 2 sec = 2000 bytes / sec = 16 kbps
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.PaddingBitrateSentInKbps"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.PaddingBitrateSentInKbps", 16));
+ // Interval: 375 bytes * 2 / 2 sec = 375 bytes / sec = 3 kbps
+ EXPECT_METRIC_EQ(1, metrics::NumSamples("WebRTC.Video.FecBitrateSentInKbps"));
+ EXPECT_METRIC_EQ(1,
+ metrics::NumEvents("WebRTC.Video.FecBitrateSentInKbps", 3));
+ // Interval: 375 bytes * 2 / 2 sec = 375 bytes / sec = 3 kbps
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.RetransmittedBitrateSentInKbps"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.RetransmittedBitrateSentInKbps", 3));
+
+ // New metric counters but same data counters.
+ // Double counter values, this should result in the same counts as before but
+ // with new histogram names.
+ for (int i = 0; i < kMinRequiredPeriodSamples; ++i) {
+ counters.transmitted.packets += 20;
+ counters.transmitted.header_bytes += 500;
+ counters.transmitted.padding_bytes += 1000;
+ counters.transmitted.payload_bytes += 2000;
+ counters.retransmitted.packets += 2;
+ counters.retransmitted.header_bytes += 25;
+ counters.retransmitted.padding_bytes += 100;
+ counters.retransmitted.payload_bytes += 250;
+ counters.fec = counters.retransmitted;
+ rtx_counters.transmitted = counters.transmitted;
+ // Advance one interval and update counters.
+ fake_clock_.AdvanceTimeMilliseconds(kPeriodIntervalMs);
+ proxy->DataCountersUpdated(counters, kFirstSsrc);
+ proxy->DataCountersUpdated(counters, kSecondSsrc);
+ proxy->DataCountersUpdated(rtx_counters, kFirstRtxSsrc);
+ proxy->DataCountersUpdated(rtx_counters, kSecondRtxSsrc);
+ }
+
+ // Reset stats proxy also causes histograms to be reported.
+ statistics_proxy_.reset();
+
+ // Interval: 3500 bytes * 4 / 2 sec = 7000 bytes / sec = 56 kbps
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.Screenshare.BitrateSentInKbps"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.Screenshare.BitrateSentInKbps", 56));
+ // Interval: 3500 bytes * 2 / 2 sec = 3500 bytes / sec = 28 kbps
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.Screenshare.RtxBitrateSentInKbps"));
+ EXPECT_METRIC_EQ(1, metrics::NumEvents(
+ "WebRTC.Video.Screenshare.RtxBitrateSentInKbps", 28));
+ // Interval: (2000 - 2 * 250) bytes / 2 sec = 1500 bytes / sec = 12 kbps
+ EXPECT_METRIC_EQ(1, metrics::NumSamples(
+ "WebRTC.Video.Screenshare.MediaBitrateSentInKbps"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.Screenshare.MediaBitrateSentInKbps",
+ 12));
+ // Interval: 1000 bytes * 4 / 2 sec = 2000 bytes / sec = 16 kbps
+ EXPECT_METRIC_EQ(1, metrics::NumSamples(
+ "WebRTC.Video.Screenshare.PaddingBitrateSentInKbps"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.Screenshare.PaddingBitrateSentInKbps",
+ 16));
+ // Interval: 375 bytes * 2 / 2 sec = 375 bytes / sec = 3 kbps
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.Screenshare.FecBitrateSentInKbps"));
+ EXPECT_METRIC_EQ(1, metrics::NumEvents(
+ "WebRTC.Video.Screenshare.FecBitrateSentInKbps", 3));
+ // Interval: 375 bytes * 2 / 2 sec = 375 bytes / sec = 3 kbps
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples(
+ "WebRTC.Video.Screenshare.RetransmittedBitrateSentInKbps"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents(
+ "WebRTC.Video.Screenshare.RetransmittedBitrateSentInKbps", 3));
+}
+
+TEST_F(SendStatisticsProxyTest, RtxBitrateIsZeroWhenEnabledAndNoRtxDataIsSent) {
+ StreamDataCountersCallback* proxy =
+ static_cast<StreamDataCountersCallback*>(statistics_proxy_.get());
+ StreamDataCounters counters;
+ StreamDataCounters rtx_counters;
+
+ const int kMinRequiredPeriodSamples = 8;
+ const int kPeriodIntervalMs = 2000;
+ for (int i = 0; i < kMinRequiredPeriodSamples; ++i) {
+ counters.transmitted.packets += 20;
+ counters.transmitted.header_bytes += 500;
+ counters.transmitted.payload_bytes += 2000;
+ counters.fec = counters.retransmitted;
+ // Advance one interval and update counters.
+ fake_clock_.AdvanceTimeMilliseconds(kPeriodIntervalMs);
+ proxy->DataCountersUpdated(counters, kFirstSsrc);
+ }
+
+ // RTX enabled. No data sent over RTX.
+ statistics_proxy_.reset();
+ EXPECT_METRIC_EQ(1, metrics::NumSamples("WebRTC.Video.RtxBitrateSentInKbps"));
+ EXPECT_METRIC_EQ(1,
+ metrics::NumEvents("WebRTC.Video.RtxBitrateSentInKbps", 0));
+}
+
+TEST_F(SendStatisticsProxyTest, RtxBitrateNotReportedWhenNotEnabled) {
+ test::ScopedKeyValueConfig field_trials;
+ VideoSendStream::Config config(nullptr);
+ config.rtp.ssrcs.push_back(kFirstSsrc); // RTX not configured.
+ statistics_proxy_.reset(new SendStatisticsProxy(
+ &fake_clock_, config, VideoEncoderConfig::ContentType::kRealtimeVideo,
+ field_trials));
+
+ StreamDataCountersCallback* proxy =
+ static_cast<StreamDataCountersCallback*>(statistics_proxy_.get());
+ StreamDataCounters counters;
+
+ const int kMinRequiredPeriodSamples = 8;
+ const int kPeriodIntervalMs = 2000;
+ for (int i = 0; i < kMinRequiredPeriodSamples; ++i) {
+ counters.transmitted.packets += 20;
+ counters.transmitted.header_bytes += 500;
+ counters.transmitted.payload_bytes += 2000;
+ counters.fec = counters.retransmitted;
+ // Advance one interval and update counters.
+ fake_clock_.AdvanceTimeMilliseconds(kPeriodIntervalMs);
+ proxy->DataCountersUpdated(counters, kFirstSsrc);
+ }
+
+ // RTX not enabled.
+ statistics_proxy_.reset();
+ EXPECT_METRIC_EQ(0, metrics::NumSamples("WebRTC.Video.RtxBitrateSentInKbps"));
+}
+
+TEST_F(SendStatisticsProxyTest, FecBitrateIsZeroWhenEnabledAndNoFecDataIsSent) {
+ StreamDataCountersCallback* proxy =
+ static_cast<StreamDataCountersCallback*>(statistics_proxy_.get());
+ StreamDataCounters counters;
+ StreamDataCounters rtx_counters;
+
+ const int kMinRequiredPeriodSamples = 8;
+ const int kPeriodIntervalMs = 2000;
+ for (int i = 0; i < kMinRequiredPeriodSamples; ++i) {
+ counters.transmitted.packets += 20;
+ counters.transmitted.header_bytes += 500;
+ counters.transmitted.payload_bytes += 2000;
+ // Advance one interval and update counters.
+ fake_clock_.AdvanceTimeMilliseconds(kPeriodIntervalMs);
+ proxy->DataCountersUpdated(counters, kFirstSsrc);
+ }
+
+ // FEC enabled. No FEC data sent.
+ statistics_proxy_.reset();
+ EXPECT_METRIC_EQ(1, metrics::NumSamples("WebRTC.Video.FecBitrateSentInKbps"));
+ EXPECT_METRIC_EQ(1,
+ metrics::NumEvents("WebRTC.Video.FecBitrateSentInKbps", 0));
+}
+
+TEST_F(SendStatisticsProxyTest, FecBitrateNotReportedWhenNotEnabled) {
+ test::ScopedKeyValueConfig field_trials;
+ VideoSendStream::Config config(nullptr);
+ config.rtp.ssrcs.push_back(kFirstSsrc); // FEC not configured.
+ statistics_proxy_.reset(new SendStatisticsProxy(
+ &fake_clock_, config, VideoEncoderConfig::ContentType::kRealtimeVideo,
+ field_trials));
+
+ StreamDataCountersCallback* proxy =
+ static_cast<StreamDataCountersCallback*>(statistics_proxy_.get());
+ StreamDataCounters counters;
+
+ const int kMinRequiredPeriodSamples = 8;
+ const int kPeriodIntervalMs = 2000;
+ for (int i = 0; i < kMinRequiredPeriodSamples; ++i) {
+ counters.transmitted.packets += 20;
+ counters.transmitted.header_bytes += 500;
+ counters.transmitted.payload_bytes += 2000;
+ counters.fec = counters.retransmitted;
+ // Advance one interval and update counters.
+ fake_clock_.AdvanceTimeMilliseconds(kPeriodIntervalMs);
+ proxy->DataCountersUpdated(counters, kFirstSsrc);
+ }
+
+ // FEC not enabled.
+ statistics_proxy_.reset();
+ EXPECT_METRIC_EQ(0, metrics::NumSamples("WebRTC.Video.FecBitrateSentInKbps"));
+}
+
+TEST_F(SendStatisticsProxyTest, GetStatsReportsEncoderImplementationName) {
+ const std::string kName = "encoderName";
+ statistics_proxy_->OnEncoderImplementationChanged(EncoderImplementation{
+ .name = kName,
+ .is_hardware_accelerated = true,
+ });
+ EXPECT_EQ(kName, statistics_proxy_->GetStats().encoder_implementation_name);
+ EXPECT_THAT(statistics_proxy_->GetStats().power_efficient_encoder,
+ ::testing::IsTrue());
+}
+
+TEST_F(SendStatisticsProxyTest, Vp9SvcLowSpatialLayerDoesNotUpdateResolution) {
+ static const int kEncodedWidth = 123;
+ static const int kEncodedHeight = 81;
+ EncodedImage encoded_image;
+ encoded_image._encodedWidth = kEncodedWidth;
+ encoded_image._encodedHeight = kEncodedHeight;
+ encoded_image.SetSpatialIndex(0);
+
+ CodecSpecificInfo codec_info;
+ codec_info.codecType = kVideoCodecVP9;
+
+ // For first picture, it is expected that low layer updates resolution.
+ codec_info.end_of_picture = false;
+ statistics_proxy_->OnSendEncodedImage(encoded_image, &codec_info);
+ VideoSendStream::Stats stats = statistics_proxy_->GetStats();
+ EXPECT_EQ(kEncodedWidth, stats.substreams[config_.rtp.ssrcs[0]].width);
+ EXPECT_EQ(kEncodedHeight, stats.substreams[config_.rtp.ssrcs[0]].height);
+
+ // Top layer updates resolution.
+ encoded_image._encodedWidth = kEncodedWidth * 2;
+ encoded_image._encodedHeight = kEncodedHeight * 2;
+ codec_info.end_of_picture = true;
+ statistics_proxy_->OnSendEncodedImage(encoded_image, &codec_info);
+ stats = statistics_proxy_->GetStats();
+ EXPECT_EQ(kEncodedWidth * 2, stats.substreams[config_.rtp.ssrcs[0]].width);
+ EXPECT_EQ(kEncodedHeight * 2, stats.substreams[config_.rtp.ssrcs[0]].height);
+
+ // Low layer of next frame doesn't update resolution.
+ encoded_image._encodedWidth = kEncodedWidth;
+ encoded_image._encodedHeight = kEncodedHeight;
+ codec_info.end_of_picture = false;
+ statistics_proxy_->OnSendEncodedImage(encoded_image, &codec_info);
+ stats = statistics_proxy_->GetStats();
+ EXPECT_EQ(kEncodedWidth * 2, stats.substreams[config_.rtp.ssrcs[0]].width);
+ EXPECT_EQ(kEncodedHeight * 2, stats.substreams[config_.rtp.ssrcs[0]].height);
+}
+
+class ForcedFallbackTest : public SendStatisticsProxyTest {
+ public:
+ explicit ForcedFallbackTest(const std::string& field_trials)
+ : SendStatisticsProxyTest(field_trials) {
+ codec_info_.codecType = kVideoCodecVP8;
+ codec_info_.codecSpecific.VP8.temporalIdx = 0;
+ encoded_image_._encodedWidth = kWidth;
+ encoded_image_._encodedHeight = kHeight;
+ encoded_image_.SetSpatialIndex(0);
+ }
+
+ ~ForcedFallbackTest() override {}
+
+ protected:
+ void InsertEncodedFrames(int num_frames, int interval_ms) {
+ statistics_proxy_->OnEncoderImplementationChanged(
+ {.name = codec_name_, .is_hardware_accelerated = false});
+
+ // First frame is not updating stats, insert initial frame.
+ if (statistics_proxy_->GetStats().frames_encoded == 0) {
+ statistics_proxy_->OnSendEncodedImage(encoded_image_, &codec_info_);
+ }
+ for (int i = 0; i < num_frames; ++i) {
+ statistics_proxy_->OnSendEncodedImage(encoded_image_, &codec_info_);
+ fake_clock_.AdvanceTimeMilliseconds(interval_ms);
+ }
+ // Add frame to include last time interval.
+ statistics_proxy_->OnSendEncodedImage(encoded_image_, &codec_info_);
+ }
+
+ EncodedImage encoded_image_;
+ CodecSpecificInfo codec_info_;
+ std::string codec_name_;
+ const std::string kPrefix = "WebRTC.Video.Encoder.ForcedSw";
+ const int kFrameIntervalMs = 1000;
+ const int kMinFrames = 20; // Min run time 20 sec.
+};
+
+class ForcedFallbackDisabled : public ForcedFallbackTest {
+ public:
+ ForcedFallbackDisabled()
+ : ForcedFallbackTest("WebRTC-VP8-Forced-Fallback-Encoder-v2/Disabled-1," +
+ std::to_string(kWidth * kHeight) + ",3/") {}
+};
+
+class ForcedFallbackEnabled : public ForcedFallbackTest {
+ public:
+ ForcedFallbackEnabled()
+ : ForcedFallbackTest("WebRTC-VP8-Forced-Fallback-Encoder-v2/Enabled-1," +
+ std::to_string(kWidth * kHeight) + ",3/") {}
+};
+
+TEST_F(ForcedFallbackEnabled, StatsNotUpdatedIfMinRunTimeHasNotPassed) {
+ InsertEncodedFrames(kMinFrames, kFrameIntervalMs - 1);
+ statistics_proxy_.reset();
+ EXPECT_METRIC_EQ(0,
+ metrics::NumSamples(kPrefix + "FallbackTimeInPercent.Vp8"));
+ EXPECT_METRIC_EQ(
+ 0, metrics::NumSamples(kPrefix + "FallbackChangesPerMinute.Vp8"));
+}
+
+TEST_F(ForcedFallbackEnabled, StatsUpdated) {
+ InsertEncodedFrames(kMinFrames, kFrameIntervalMs);
+ EXPECT_FALSE(statistics_proxy_->GetStats().has_entered_low_resolution);
+ statistics_proxy_.reset();
+ EXPECT_METRIC_EQ(1,
+ metrics::NumSamples(kPrefix + "FallbackTimeInPercent.Vp8"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents(kPrefix + "FallbackTimeInPercent.Vp8", 0));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples(kPrefix + "FallbackChangesPerMinute.Vp8"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents(kPrefix + "FallbackChangesPerMinute.Vp8", 0));
+}
+
+TEST_F(ForcedFallbackEnabled, StatsNotUpdatedIfNotVp8) {
+ codec_info_.codecType = kVideoCodecVP9;
+ InsertEncodedFrames(kMinFrames, kFrameIntervalMs);
+ statistics_proxy_.reset();
+ EXPECT_METRIC_EQ(0,
+ metrics::NumSamples(kPrefix + "FallbackTimeInPercent.Vp8"));
+ EXPECT_METRIC_EQ(
+ 0, metrics::NumSamples(kPrefix + "FallbackChangesPerMinute.Vp8"));
+}
+
+TEST_F(ForcedFallbackEnabled, StatsNotUpdatedForTemporalLayers) {
+ codec_info_.codecSpecific.VP8.temporalIdx = 1;
+ InsertEncodedFrames(kMinFrames, kFrameIntervalMs);
+ statistics_proxy_.reset();
+ EXPECT_METRIC_EQ(0,
+ metrics::NumSamples(kPrefix + "FallbackTimeInPercent.Vp8"));
+ EXPECT_METRIC_EQ(
+ 0, metrics::NumSamples(kPrefix + "FallbackChangesPerMinute.Vp8"));
+}
+
+TEST_F(ForcedFallbackEnabled, StatsNotUpdatedForSimulcast) {
+ encoded_image_.SetSpatialIndex(1);
+ InsertEncodedFrames(kMinFrames, kFrameIntervalMs);
+ statistics_proxy_.reset();
+ EXPECT_METRIC_EQ(0,
+ metrics::NumSamples(kPrefix + "FallbackTimeInPercent.Vp8"));
+ EXPECT_METRIC_EQ(
+ 0, metrics::NumSamples(kPrefix + "FallbackChangesPerMinute.Vp8"));
+}
+
+TEST_F(ForcedFallbackDisabled, StatsNotUpdatedIfNoFieldTrial) {
+ InsertEncodedFrames(kMinFrames, kFrameIntervalMs);
+ statistics_proxy_.reset();
+ EXPECT_METRIC_EQ(0,
+ metrics::NumSamples(kPrefix + "FallbackTimeInPercent.Vp8"));
+ EXPECT_METRIC_EQ(
+ 0, metrics::NumSamples(kPrefix + "FallbackChangesPerMinute.Vp8"));
+}
+
+TEST_F(ForcedFallbackDisabled, EnteredLowResolutionSetIfAtMaxPixels) {
+ InsertEncodedFrames(1, kFrameIntervalMs);
+ EXPECT_TRUE(statistics_proxy_->GetStats().has_entered_low_resolution);
+}
+
+TEST_F(ForcedFallbackEnabled, EnteredLowResolutionNotSetIfNotLibvpx) {
+ InsertEncodedFrames(1, kFrameIntervalMs);
+ EXPECT_FALSE(statistics_proxy_->GetStats().has_entered_low_resolution);
+}
+
+TEST_F(ForcedFallbackEnabled, EnteredLowResolutionSetIfLibvpx) {
+ codec_name_ = "libvpx";
+ InsertEncodedFrames(1, kFrameIntervalMs);
+ EXPECT_TRUE(statistics_proxy_->GetStats().has_entered_low_resolution);
+}
+
+TEST_F(ForcedFallbackDisabled, EnteredLowResolutionNotSetIfAboveMaxPixels) {
+ encoded_image_._encodedWidth = kWidth + 1;
+ InsertEncodedFrames(1, kFrameIntervalMs);
+ EXPECT_FALSE(statistics_proxy_->GetStats().has_entered_low_resolution);
+}
+
+TEST_F(ForcedFallbackDisabled, EnteredLowResolutionNotSetIfLibvpx) {
+ codec_name_ = "libvpx";
+ InsertEncodedFrames(1, kFrameIntervalMs);
+ EXPECT_FALSE(statistics_proxy_->GetStats().has_entered_low_resolution);
+}
+
+TEST_F(ForcedFallbackDisabled,
+ EnteredLowResolutionSetIfOnMinPixelLimitReached) {
+ encoded_image_._encodedWidth = kWidth + 1;
+ statistics_proxy_->OnMinPixelLimitReached();
+ InsertEncodedFrames(1, kFrameIntervalMs);
+ EXPECT_TRUE(statistics_proxy_->GetStats().has_entered_low_resolution);
+}
+
+TEST_F(ForcedFallbackEnabled, OneFallbackEvent) {
+ // One change. Video: 20000 ms, fallback: 5000 ms (25%).
+ EXPECT_FALSE(statistics_proxy_->GetStats().has_entered_low_resolution);
+ InsertEncodedFrames(15, 1000);
+ EXPECT_FALSE(statistics_proxy_->GetStats().has_entered_low_resolution);
+ codec_name_ = "libvpx";
+ InsertEncodedFrames(5, 1000);
+ EXPECT_TRUE(statistics_proxy_->GetStats().has_entered_low_resolution);
+
+ statistics_proxy_.reset();
+ EXPECT_METRIC_EQ(1,
+ metrics::NumSamples(kPrefix + "FallbackTimeInPercent.Vp8"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents(kPrefix + "FallbackTimeInPercent.Vp8", 25));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples(kPrefix + "FallbackChangesPerMinute.Vp8"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents(kPrefix + "FallbackChangesPerMinute.Vp8", 3));
+}
+
+TEST_F(ForcedFallbackEnabled, ThreeFallbackEvents) {
+ codec_info_.codecSpecific.VP8.temporalIdx = kNoTemporalIdx; // Should work.
+ const int kMaxFrameDiffMs = 2000;
+
+ // Three changes. Video: 60000 ms, fallback: 15000 ms (25%).
+ InsertEncodedFrames(10, 1000);
+ EXPECT_FALSE(statistics_proxy_->GetStats().has_entered_low_resolution);
+ codec_name_ = "libvpx";
+ InsertEncodedFrames(15, 500);
+ EXPECT_TRUE(statistics_proxy_->GetStats().has_entered_low_resolution);
+ codec_name_ = "notlibvpx";
+ InsertEncodedFrames(20, 1000);
+ InsertEncodedFrames(3, kMaxFrameDiffMs); // Should not be included.
+ InsertEncodedFrames(10, 1000);
+ EXPECT_TRUE(statistics_proxy_->GetStats().has_entered_low_resolution);
+ codec_name_ = "notlibvpx2";
+ InsertEncodedFrames(10, 500);
+ EXPECT_TRUE(statistics_proxy_->GetStats().has_entered_low_resolution);
+ codec_name_ = "libvpx";
+ InsertEncodedFrames(15, 500);
+ EXPECT_TRUE(statistics_proxy_->GetStats().has_entered_low_resolution);
+
+ statistics_proxy_.reset();
+ EXPECT_METRIC_EQ(1,
+ metrics::NumSamples(kPrefix + "FallbackTimeInPercent.Vp8"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents(kPrefix + "FallbackTimeInPercent.Vp8", 25));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples(kPrefix + "FallbackChangesPerMinute.Vp8"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents(kPrefix + "FallbackChangesPerMinute.Vp8", 3));
+}
+
+TEST_F(ForcedFallbackEnabled, NoFallbackIfAboveMaxPixels) {
+ encoded_image_._encodedWidth = kWidth + 1;
+ codec_name_ = "libvpx";
+ InsertEncodedFrames(kMinFrames, kFrameIntervalMs);
+
+ EXPECT_FALSE(statistics_proxy_->GetStats().has_entered_low_resolution);
+ statistics_proxy_.reset();
+ EXPECT_METRIC_EQ(0,
+ metrics::NumSamples(kPrefix + "FallbackTimeInPercent.Vp8"));
+ EXPECT_METRIC_EQ(
+ 0, metrics::NumSamples(kPrefix + "FallbackChangesPerMinute.Vp8"));
+}
+
+TEST_F(ForcedFallbackEnabled, FallbackIfAtMaxPixels) {
+ encoded_image_._encodedWidth = kWidth;
+ codec_name_ = "libvpx";
+ InsertEncodedFrames(kMinFrames, kFrameIntervalMs);
+
+ EXPECT_TRUE(statistics_proxy_->GetStats().has_entered_low_resolution);
+ statistics_proxy_.reset();
+ EXPECT_METRIC_EQ(1,
+ metrics::NumSamples(kPrefix + "FallbackTimeInPercent.Vp8"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples(kPrefix + "FallbackChangesPerMinute.Vp8"));
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/video/stats_counter.cc b/third_party/libwebrtc/video/stats_counter.cc
new file mode 100644
index 0000000000..dc548ea3c3
--- /dev/null
+++ b/third_party/libwebrtc/video/stats_counter.cc
@@ -0,0 +1,463 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video/stats_counter.h"
+
+#include <algorithm>
+#include <limits>
+#include <map>
+
+#include "rtc_base/checks.h"
+#include "rtc_base/strings/string_builder.h"
+#include "system_wrappers/include/clock.h"
+
+namespace webrtc {
+
+namespace {
+// Default periodic time interval for processing samples.
+const int64_t kDefaultProcessIntervalMs = 2000;
+const uint32_t kStreamId0 = 0;
+} // namespace
+
+std::string AggregatedStats::ToString() const {
+ return ToStringWithMultiplier(1);
+}
+
+std::string AggregatedStats::ToStringWithMultiplier(int multiplier) const {
+ rtc::StringBuilder ss;
+ ss << "periodic_samples:" << num_samples << ", {";
+ ss << "min:" << (min * multiplier) << ", ";
+ ss << "avg:" << (average * multiplier) << ", ";
+ ss << "max:" << (max * multiplier) << "}";
+ return ss.Release();
+}
+
+// Class holding periodically computed metrics.
+class AggregatedCounter {
+ public:
+ AggregatedCounter() : last_sample_(0), sum_samples_(0) {}
+ ~AggregatedCounter() {}
+
+ void Add(int sample) {
+ last_sample_ = sample;
+ sum_samples_ += sample;
+ ++stats_.num_samples;
+ if (stats_.num_samples == 1) {
+ stats_.min = sample;
+ stats_.max = sample;
+ }
+ stats_.min = std::min(sample, stats_.min);
+ stats_.max = std::max(sample, stats_.max);
+ }
+
+ AggregatedStats ComputeStats() {
+ Compute();
+ return stats_;
+ }
+
+ bool Empty() const { return stats_.num_samples == 0; }
+
+ int last_sample() const { return last_sample_; }
+
+ private:
+ void Compute() {
+ if (stats_.num_samples == 0)
+ return;
+
+ stats_.average =
+ (sum_samples_ + stats_.num_samples / 2) / stats_.num_samples;
+ }
+ int last_sample_;
+ int64_t sum_samples_;
+ AggregatedStats stats_;
+};
+
+// Class holding gathered samples within a process interval.
+class Samples {
+ public:
+ Samples() : total_count_(0) {}
+ ~Samples() {}
+
+ void Add(int sample, uint32_t stream_id) {
+ samples_[stream_id].Add(sample);
+ ++total_count_;
+ }
+ void Set(int64_t sample, uint32_t stream_id) {
+ samples_[stream_id].Set(sample);
+ ++total_count_;
+ }
+ void SetLast(int64_t sample, uint32_t stream_id) {
+ samples_[stream_id].SetLast(sample);
+ }
+ int64_t GetLast(uint32_t stream_id) { return samples_[stream_id].GetLast(); }
+
+ int64_t Count() const { return total_count_; }
+ bool Empty() const { return total_count_ == 0; }
+
+ int64_t Sum() const {
+ int64_t sum = 0;
+ for (const auto& it : samples_)
+ sum += it.second.sum_;
+ return sum;
+ }
+
+ int Max() const {
+ int max = std::numeric_limits<int>::min();
+ for (const auto& it : samples_)
+ max = std::max(it.second.max_, max);
+ return max;
+ }
+
+ void Reset() {
+ for (auto& it : samples_)
+ it.second.Reset();
+ total_count_ = 0;
+ }
+
+ int64_t Diff() const {
+ int64_t sum_diff = 0;
+ int count = 0;
+ for (const auto& it : samples_) {
+ if (it.second.count_ > 0) {
+ int64_t diff = it.second.sum_ - it.second.last_sum_;
+ if (diff >= 0) {
+ sum_diff += diff;
+ ++count;
+ }
+ }
+ }
+ return (count > 0) ? sum_diff : -1;
+ }
+
+ private:
+ struct Stats {
+ void Add(int sample) {
+ sum_ += sample;
+ ++count_;
+ max_ = std::max(sample, max_);
+ }
+ void Set(int64_t sample) {
+ sum_ = sample;
+ ++count_;
+ }
+ void SetLast(int64_t sample) { last_sum_ = sample; }
+ int64_t GetLast() const { return last_sum_; }
+ void Reset() {
+ if (count_ > 0)
+ last_sum_ = sum_;
+ sum_ = 0;
+ count_ = 0;
+ max_ = std::numeric_limits<int>::min();
+ }
+
+ int max_ = std::numeric_limits<int>::min();
+ int64_t count_ = 0;
+ int64_t sum_ = 0;
+ int64_t last_sum_ = 0;
+ };
+
+ int64_t total_count_;
+ std::map<uint32_t, Stats> samples_; // Gathered samples mapped by stream id.
+};
+
+// StatsCounter class.
+StatsCounter::StatsCounter(Clock* clock,
+ int64_t process_intervals_ms,
+ bool include_empty_intervals,
+ StatsCounterObserver* observer)
+ : include_empty_intervals_(include_empty_intervals),
+ process_intervals_ms_(process_intervals_ms),
+ aggregated_counter_(new AggregatedCounter()),
+ samples_(new Samples()),
+ clock_(clock),
+ observer_(observer),
+ last_process_time_ms_(-1),
+ paused_(false),
+ pause_time_ms_(-1),
+ min_pause_time_ms_(0) {
+ RTC_DCHECK_GT(process_intervals_ms_, 0);
+}
+
+StatsCounter::~StatsCounter() {}
+
+AggregatedStats StatsCounter::GetStats() {
+ return aggregated_counter_->ComputeStats();
+}
+
+AggregatedStats StatsCounter::ProcessAndGetStats() {
+ if (HasSample())
+ TryProcess();
+ return aggregated_counter_->ComputeStats();
+}
+
+void StatsCounter::ProcessAndPauseForDuration(int64_t min_pause_time_ms) {
+ ProcessAndPause();
+ min_pause_time_ms_ = min_pause_time_ms;
+}
+
+void StatsCounter::ProcessAndPause() {
+ if (HasSample())
+ TryProcess();
+ paused_ = true;
+ pause_time_ms_ = clock_->TimeInMilliseconds();
+}
+
+void StatsCounter::ProcessAndStopPause() {
+ if (HasSample())
+ TryProcess();
+ Resume();
+}
+
+bool StatsCounter::HasSample() const {
+ return last_process_time_ms_ != -1;
+}
+
+bool StatsCounter::TimeToProcess(int* elapsed_intervals) {
+ int64_t now = clock_->TimeInMilliseconds();
+ if (last_process_time_ms_ == -1)
+ last_process_time_ms_ = now;
+
+ int64_t diff_ms = now - last_process_time_ms_;
+ if (diff_ms < process_intervals_ms_)
+ return false;
+
+ // Advance number of complete `process_intervals_ms_` that have passed.
+ int64_t num_intervals = diff_ms / process_intervals_ms_;
+ last_process_time_ms_ += num_intervals * process_intervals_ms_;
+
+ *elapsed_intervals = num_intervals;
+ return true;
+}
+
+void StatsCounter::Add(int sample) {
+ TryProcess();
+ samples_->Add(sample, kStreamId0);
+ ResumeIfMinTimePassed();
+}
+
+void StatsCounter::Set(int64_t sample, uint32_t stream_id) {
+ if (paused_ && sample == samples_->GetLast(stream_id)) {
+ // Do not add same sample while paused (will reset pause).
+ return;
+ }
+ TryProcess();
+ samples_->Set(sample, stream_id);
+ ResumeIfMinTimePassed();
+}
+
+void StatsCounter::SetLast(int64_t sample, uint32_t stream_id) {
+ RTC_DCHECK(!HasSample()) << "Should be set before first sample is added.";
+ samples_->SetLast(sample, stream_id);
+}
+
+// Reports periodically computed metric.
+void StatsCounter::ReportMetricToAggregatedCounter(
+ int value,
+ int num_values_to_add) const {
+ for (int i = 0; i < num_values_to_add; ++i) {
+ aggregated_counter_->Add(value);
+ if (observer_)
+ observer_->OnMetricUpdated(value);
+ }
+}
+
+void StatsCounter::TryProcess() {
+ int elapsed_intervals;
+ if (!TimeToProcess(&elapsed_intervals))
+ return;
+
+ // Get and report periodically computed metric.
+ int metric;
+ if (GetMetric(&metric))
+ ReportMetricToAggregatedCounter(metric, 1);
+
+ // Report value for elapsed intervals without samples.
+ if (IncludeEmptyIntervals()) {
+ // If there are no samples, all elapsed intervals are empty (otherwise one
+ // interval contains sample(s), discard this interval).
+ int empty_intervals =
+ samples_->Empty() ? elapsed_intervals : (elapsed_intervals - 1);
+ ReportMetricToAggregatedCounter(GetValueForEmptyInterval(),
+ empty_intervals);
+ }
+
+ // Reset samples for elapsed interval.
+ samples_->Reset();
+}
+
+bool StatsCounter::IncludeEmptyIntervals() const {
+ return include_empty_intervals_ && !paused_ && !aggregated_counter_->Empty();
+}
+void StatsCounter::ResumeIfMinTimePassed() {
+ if (paused_ &&
+ (clock_->TimeInMilliseconds() - pause_time_ms_) >= min_pause_time_ms_) {
+ Resume();
+ }
+}
+
+void StatsCounter::Resume() {
+ paused_ = false;
+ min_pause_time_ms_ = 0;
+}
+
+// StatsCounter sub-classes.
+AvgCounter::AvgCounter(Clock* clock,
+ StatsCounterObserver* observer,
+ bool include_empty_intervals)
+ : StatsCounter(clock,
+ kDefaultProcessIntervalMs,
+ include_empty_intervals,
+ observer) {}
+
+void AvgCounter::Add(int sample) {
+ StatsCounter::Add(sample);
+}
+
+bool AvgCounter::GetMetric(int* metric) const {
+ int64_t count = samples_->Count();
+ if (count == 0)
+ return false;
+
+ *metric = (samples_->Sum() + count / 2) / count;
+ return true;
+}
+
+int AvgCounter::GetValueForEmptyInterval() const {
+ return aggregated_counter_->last_sample();
+}
+
+MaxCounter::MaxCounter(Clock* clock,
+ StatsCounterObserver* observer,
+ int64_t process_intervals_ms)
+ : StatsCounter(clock,
+ process_intervals_ms,
+ false, // `include_empty_intervals`
+ observer) {}
+
+void MaxCounter::Add(int sample) {
+ StatsCounter::Add(sample);
+}
+
+bool MaxCounter::GetMetric(int* metric) const {
+ if (samples_->Empty())
+ return false;
+
+ *metric = samples_->Max();
+ return true;
+}
+
+int MaxCounter::GetValueForEmptyInterval() const {
+ RTC_DCHECK_NOTREACHED();
+ return 0;
+}
+
+PercentCounter::PercentCounter(Clock* clock, StatsCounterObserver* observer)
+ : StatsCounter(clock,
+ kDefaultProcessIntervalMs,
+ false, // `include_empty_intervals`
+ observer) {}
+
+void PercentCounter::Add(bool sample) {
+ StatsCounter::Add(sample ? 1 : 0);
+}
+
+bool PercentCounter::GetMetric(int* metric) const {
+ int64_t count = samples_->Count();
+ if (count == 0)
+ return false;
+
+ *metric = (samples_->Sum() * 100 + count / 2) / count;
+ return true;
+}
+
+int PercentCounter::GetValueForEmptyInterval() const {
+ RTC_DCHECK_NOTREACHED();
+ return 0;
+}
+
+PermilleCounter::PermilleCounter(Clock* clock, StatsCounterObserver* observer)
+ : StatsCounter(clock,
+ kDefaultProcessIntervalMs,
+ false, // `include_empty_intervals`
+ observer) {}
+
+void PermilleCounter::Add(bool sample) {
+ StatsCounter::Add(sample ? 1 : 0);
+}
+
+bool PermilleCounter::GetMetric(int* metric) const {
+ int64_t count = samples_->Count();
+ if (count == 0)
+ return false;
+
+ *metric = (samples_->Sum() * 1000 + count / 2) / count;
+ return true;
+}
+
+int PermilleCounter::GetValueForEmptyInterval() const {
+ RTC_DCHECK_NOTREACHED();
+ return 0;
+}
+
+RateCounter::RateCounter(Clock* clock,
+ StatsCounterObserver* observer,
+ bool include_empty_intervals)
+ : StatsCounter(clock,
+ kDefaultProcessIntervalMs,
+ include_empty_intervals,
+ observer) {}
+
+void RateCounter::Add(int sample) {
+ StatsCounter::Add(sample);
+}
+
+bool RateCounter::GetMetric(int* metric) const {
+ if (samples_->Empty())
+ return false;
+
+ *metric = (samples_->Sum() * 1000 + process_intervals_ms_ / 2) /
+ process_intervals_ms_;
+ return true;
+}
+
+int RateCounter::GetValueForEmptyInterval() const {
+ return 0;
+}
+
+RateAccCounter::RateAccCounter(Clock* clock,
+ StatsCounterObserver* observer,
+ bool include_empty_intervals)
+ : StatsCounter(clock,
+ kDefaultProcessIntervalMs,
+ include_empty_intervals,
+ observer) {}
+
+void RateAccCounter::Set(int64_t sample, uint32_t stream_id) {
+ StatsCounter::Set(sample, stream_id);
+}
+
+void RateAccCounter::SetLast(int64_t sample, uint32_t stream_id) {
+ StatsCounter::SetLast(sample, stream_id);
+}
+
+bool RateAccCounter::GetMetric(int* metric) const {
+ int64_t diff = samples_->Diff();
+ if (diff < 0 || (!include_empty_intervals_ && diff == 0))
+ return false;
+
+ *metric = (diff * 1000 + process_intervals_ms_ / 2) / process_intervals_ms_;
+ return true;
+}
+
+int RateAccCounter::GetValueForEmptyInterval() const {
+ return 0;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/video/stats_counter.h b/third_party/libwebrtc/video/stats_counter.h
new file mode 100644
index 0000000000..9e2b8702d6
--- /dev/null
+++ b/third_party/libwebrtc/video/stats_counter.h
@@ -0,0 +1,296 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VIDEO_STATS_COUNTER_H_
+#define VIDEO_STATS_COUNTER_H_
+
+#include <memory>
+#include <string>
+
+namespace webrtc {
+
+class AggregatedCounter;
+class Clock;
+class Samples;
+
+// `StatsCounterObserver` is called periodically when a metric is updated.
+class StatsCounterObserver {
+ public:
+ virtual void OnMetricUpdated(int sample) = 0;
+
+ virtual ~StatsCounterObserver() {}
+};
+
+struct AggregatedStats {
+ std::string ToString() const;
+ std::string ToStringWithMultiplier(int multiplier) const;
+
+ int64_t num_samples = 0;
+ int min = -1;
+ int max = -1;
+ int average = -1;
+ // TODO(asapersson): Consider adding median/percentiles.
+};
+
+// Classes which periodically computes a metric.
+//
+// During a period, `kProcessIntervalMs`, different metrics can be computed e.g:
+// - `AvgCounter`: average of samples
+// - `PercentCounter`: percentage of samples
+// - `PermilleCounter`: permille of samples
+//
+// Each periodic metric can be either:
+// - reported to an `observer` each period
+// - aggregated during the call (e.g. min, max, average)
+//
+// periodically computed
+// GetMetric() GetMetric() => AggregatedStats
+// ^ ^ (e.g. min/max/avg)
+// | |
+// | * * * * | ** * * * * | ...
+// |<- process interval ->|
+//
+// (*) - samples
+//
+//
+// Example usage:
+//
+// AvgCounter counter(&clock, nullptr);
+// counter.Add(5);
+// counter.Add(1);
+// counter.Add(6); // process interval passed -> GetMetric() avg:4
+// counter.Add(7);
+// counter.Add(3); // process interval passed -> GetMetric() avg:5
+// counter.Add(10);
+// counter.Add(20); // process interval passed -> GetMetric() avg:15
+// AggregatedStats stats = counter.GetStats();
+// stats: {min:4, max:15, avg:8}
+//
+
+// Note: StatsCounter takes ownership of `observer`.
+
+class StatsCounter {
+ public:
+ virtual ~StatsCounter();
+
+ // Gets metric within an interval. Returns true on success false otherwise.
+ virtual bool GetMetric(int* metric) const = 0;
+
+ // Gets the value to use for an interval without samples.
+ virtual int GetValueForEmptyInterval() const = 0;
+
+ // Gets aggregated stats (i.e. aggregate of periodically computed metrics).
+ AggregatedStats GetStats();
+
+ // Reports metrics for elapsed intervals to AggregatedCounter and GetStats.
+ AggregatedStats ProcessAndGetStats();
+
+ // Reports metrics for elapsed intervals to AggregatedCounter and pauses stats
+ // (i.e. empty intervals will be discarded until next sample is added).
+ void ProcessAndPause();
+
+ // As above with a minimum pause time. Added samples within this interval will
+ // not resume the stats (i.e. stop the pause).
+ void ProcessAndPauseForDuration(int64_t min_pause_time_ms);
+
+ // Reports metrics for elapsed intervals to AggregatedCounter and stops pause.
+ void ProcessAndStopPause();
+
+ // Checks if a sample has been added (i.e. Add or Set called).
+ bool HasSample() const;
+
+ protected:
+ StatsCounter(Clock* clock,
+ int64_t process_intervals_ms,
+ bool include_empty_intervals,
+ StatsCounterObserver* observer);
+
+ void Add(int sample);
+ void Set(int64_t sample, uint32_t stream_id);
+ void SetLast(int64_t sample, uint32_t stream_id);
+
+ const bool include_empty_intervals_;
+ const int64_t process_intervals_ms_;
+ const std::unique_ptr<AggregatedCounter> aggregated_counter_;
+ const std::unique_ptr<Samples> samples_;
+
+ private:
+ bool TimeToProcess(int* num_elapsed_intervals);
+ void TryProcess();
+ void ReportMetricToAggregatedCounter(int value, int num_values_to_add) const;
+ bool IncludeEmptyIntervals() const;
+ void Resume();
+ void ResumeIfMinTimePassed();
+
+ Clock* const clock_;
+ const std::unique_ptr<StatsCounterObserver> observer_;
+ int64_t last_process_time_ms_;
+ bool paused_;
+ int64_t pause_time_ms_;
+ int64_t min_pause_time_ms_;
+};
+
+// AvgCounter: average of samples
+//
+// | * * * | * * | ...
+// | Add(5) Add(1) Add(6) | Add(5) Add(5) |
+// GetMetric | (5 + 1 + 6) / 3 | (5 + 5) / 2 |
+//
+// `include_empty_intervals`: If set, intervals without samples will be included
+// in the stats. The value for an interval is
+// determined by GetValueForEmptyInterval().
+//
+class AvgCounter : public StatsCounter {
+ public:
+ AvgCounter(Clock* clock,
+ StatsCounterObserver* observer,
+ bool include_empty_intervals);
+ ~AvgCounter() override {}
+
+ AvgCounter(const AvgCounter&) = delete;
+ AvgCounter& operator=(const AvgCounter&) = delete;
+
+ void Add(int sample);
+
+ private:
+ bool GetMetric(int* metric) const override;
+
+ // Returns the last computed metric (i.e. from GetMetric).
+ int GetValueForEmptyInterval() const override;
+};
+
+// MaxCounter: maximum of samples
+//
+// | * * * | * * | ...
+// | Add(5) Add(1) Add(6) | Add(5) Add(5) |
+// GetMetric | max: (5, 1, 6) | max: (5, 5) |
+//
+class MaxCounter : public StatsCounter {
+ public:
+ MaxCounter(Clock* clock,
+ StatsCounterObserver* observer,
+ int64_t process_intervals_ms);
+ ~MaxCounter() override {}
+
+ MaxCounter(const MaxCounter&) = delete;
+ MaxCounter& operator=(const MaxCounter&) = delete;
+
+ void Add(int sample);
+
+ private:
+ bool GetMetric(int* metric) const override;
+ int GetValueForEmptyInterval() const override;
+};
+
+// PercentCounter: percentage of samples
+//
+// | * * * | * * | ...
+// | Add(T) Add(F) Add(T) | Add(F) Add(T) |
+// GetMetric | 100 * 2 / 3 | 100 * 1 / 2 |
+//
+class PercentCounter : public StatsCounter {
+ public:
+ PercentCounter(Clock* clock, StatsCounterObserver* observer);
+ ~PercentCounter() override {}
+
+ PercentCounter(const PercentCounter&) = delete;
+ PercentCounter& operator=(const PercentCounter&) = delete;
+
+ void Add(bool sample);
+
+ private:
+ bool GetMetric(int* metric) const override;
+ int GetValueForEmptyInterval() const override;
+};
+
+// PermilleCounter: permille of samples
+//
+// | * * * | * * | ...
+// | Add(T) Add(F) Add(T) | Add(F) Add(T) |
+// GetMetric | 1000 * 2 / 3 | 1000 * 1 / 2 |
+//
+class PermilleCounter : public StatsCounter {
+ public:
+ PermilleCounter(Clock* clock, StatsCounterObserver* observer);
+ ~PermilleCounter() override {}
+
+ PermilleCounter(const PermilleCounter&) = delete;
+ PermilleCounter& operator=(const PermilleCounter&) = delete;
+
+ void Add(bool sample);
+
+ private:
+ bool GetMetric(int* metric) const override;
+ int GetValueForEmptyInterval() const override;
+};
+
+// RateCounter: units per second
+//
+// | * * * | * * | ...
+// | Add(5) Add(1) Add(6) | Add(5) Add(5) |
+// |<------ 2 sec ------->| |
+// GetMetric | (5 + 1 + 6) / 2 | (5 + 5) / 2 |
+//
+// `include_empty_intervals`: If set, intervals without samples will be included
+// in the stats. The value for an interval is
+// determined by GetValueForEmptyInterval().
+//
+class RateCounter : public StatsCounter {
+ public:
+ RateCounter(Clock* clock,
+ StatsCounterObserver* observer,
+ bool include_empty_intervals);
+ ~RateCounter() override {}
+
+ RateCounter(const RateCounter&) = delete;
+ RateCounter& operator=(const RateCounter&) = delete;
+
+ void Add(int sample);
+
+ private:
+ bool GetMetric(int* metric) const override;
+ int GetValueForEmptyInterval() const override; // Returns zero.
+};
+
+// RateAccCounter: units per second (used for counters)
+//
+// | * * * | * * | ...
+// | Set(5) Set(6) Set(8) | Set(11) Set(13) |
+// |<------ 2 sec ------->| |
+// GetMetric | (8 - 0) / 2 | (13 - 8) / 2 |
+//
+// `include_empty_intervals`: If set, intervals without samples will be included
+// in the stats. The value for an interval is
+// determined by GetValueForEmptyInterval().
+//
+class RateAccCounter : public StatsCounter {
+ public:
+ RateAccCounter(Clock* clock,
+ StatsCounterObserver* observer,
+ bool include_empty_intervals);
+ ~RateAccCounter() override {}
+
+ RateAccCounter(const RateAccCounter&) = delete;
+ RateAccCounter& operator=(const RateAccCounter&) = delete;
+
+ void Set(int64_t sample, uint32_t stream_id);
+
+ // Sets the value for previous interval.
+ // To be used if a value other than zero is initially required.
+ void SetLast(int64_t sample, uint32_t stream_id);
+
+ private:
+ bool GetMetric(int* metric) const override;
+ int GetValueForEmptyInterval() const override; // Returns zero.
+};
+
+} // namespace webrtc
+
+#endif // VIDEO_STATS_COUNTER_H_
diff --git a/third_party/libwebrtc/video/stats_counter_unittest.cc b/third_party/libwebrtc/video/stats_counter_unittest.cc
new file mode 100644
index 0000000000..32f8c8e5e1
--- /dev/null
+++ b/third_party/libwebrtc/video/stats_counter_unittest.cc
@@ -0,0 +1,602 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video/stats_counter.h"
+
+#include "system_wrappers/include/clock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+const int kDefaultProcessIntervalMs = 2000;
+const uint32_t kStreamId = 123456;
+
+class StatsCounterObserverImpl : public StatsCounterObserver {
+ public:
+ StatsCounterObserverImpl() : num_calls_(0), last_sample_(-1) {}
+ void OnMetricUpdated(int sample) override {
+ ++num_calls_;
+ last_sample_ = sample;
+ }
+ int num_calls_;
+ int last_sample_;
+};
+} // namespace
+
+class StatsCounterTest : public ::testing::Test {
+ protected:
+ StatsCounterTest() : clock_(1234) {}
+
+ void AddSampleAndAdvance(int sample, int interval_ms, AvgCounter* counter) {
+ counter->Add(sample);
+ clock_.AdvanceTimeMilliseconds(interval_ms);
+ }
+
+ void SetSampleAndAdvance(int sample,
+ int interval_ms,
+ RateAccCounter* counter) {
+ counter->Set(sample, kStreamId);
+ clock_.AdvanceTimeMilliseconds(interval_ms);
+ }
+
+ void VerifyStatsIsNotSet(const AggregatedStats& stats) {
+ EXPECT_EQ(0, stats.num_samples);
+ EXPECT_EQ(-1, stats.min);
+ EXPECT_EQ(-1, stats.max);
+ EXPECT_EQ(-1, stats.average);
+ }
+
+ SimulatedClock clock_;
+};
+
+TEST_F(StatsCounterTest, NoSamples) {
+ AvgCounter counter(&clock_, nullptr, false);
+ VerifyStatsIsNotSet(counter.GetStats());
+}
+
+TEST_F(StatsCounterTest, TestRegisterObserver) {
+ StatsCounterObserverImpl* observer = new StatsCounterObserverImpl();
+ const int kSample = 22;
+ AvgCounter counter(&clock_, observer, false);
+ AddSampleAndAdvance(kSample, kDefaultProcessIntervalMs, &counter);
+ // Trigger process (sample included in next interval).
+ counter.Add(111);
+ EXPECT_EQ(1, observer->num_calls_);
+}
+
+TEST_F(StatsCounterTest, HasSample) {
+ AvgCounter counter(&clock_, nullptr, false);
+ EXPECT_FALSE(counter.HasSample());
+ counter.Add(1);
+ EXPECT_TRUE(counter.HasSample());
+}
+
+TEST_F(StatsCounterTest, VerifyProcessInterval) {
+ StatsCounterObserverImpl* observer = new StatsCounterObserverImpl();
+ AvgCounter counter(&clock_, observer, false);
+ counter.Add(4);
+ clock_.AdvanceTimeMilliseconds(kDefaultProcessIntervalMs - 1);
+ // Try trigger process (interval has not passed).
+ counter.Add(8);
+ EXPECT_EQ(0, observer->num_calls_);
+ VerifyStatsIsNotSet(counter.GetStats());
+ // Make process interval pass.
+ clock_.AdvanceTimeMilliseconds(1);
+ // Trigger process (sample included in next interval).
+ counter.Add(111);
+ EXPECT_EQ(1, observer->num_calls_);
+ EXPECT_EQ(6, observer->last_sample_);
+ // Aggregated stats.
+ AggregatedStats stats = counter.GetStats();
+ EXPECT_EQ(1, stats.num_samples);
+}
+
+TEST_F(StatsCounterTest, TestMetric_AvgCounter) {
+ StatsCounterObserverImpl* observer = new StatsCounterObserverImpl();
+ AvgCounter counter(&clock_, observer, false);
+ counter.Add(4);
+ counter.Add(8);
+ counter.Add(9);
+ clock_.AdvanceTimeMilliseconds(kDefaultProcessIntervalMs);
+ // Trigger process (sample included in next interval).
+ counter.Add(111);
+ // Average per interval.
+ EXPECT_EQ(1, observer->num_calls_);
+ EXPECT_EQ(7, observer->last_sample_);
+ // Aggregated stats.
+ AggregatedStats stats = counter.GetStats();
+ EXPECT_EQ(1, stats.num_samples);
+ EXPECT_EQ(7, stats.min);
+ EXPECT_EQ(7, stats.max);
+ EXPECT_EQ(7, stats.average);
+}
+
+TEST_F(StatsCounterTest, TestMetric_MaxCounter) {
+ const int64_t kProcessIntervalMs = 1000;
+ StatsCounterObserverImpl* observer = new StatsCounterObserverImpl();
+ MaxCounter counter(&clock_, observer, kProcessIntervalMs);
+ counter.Add(4);
+ counter.Add(9);
+ counter.Add(8);
+ clock_.AdvanceTimeMilliseconds(kProcessIntervalMs);
+ // Trigger process (sample included in next interval).
+ counter.Add(111);
+ // Average per interval.
+ EXPECT_EQ(1, observer->num_calls_);
+ EXPECT_EQ(9, observer->last_sample_);
+ // Aggregated stats.
+ AggregatedStats stats = counter.GetStats();
+ EXPECT_EQ(1, stats.num_samples);
+ EXPECT_EQ(9, stats.min);
+ EXPECT_EQ(9, stats.max);
+ EXPECT_EQ(9, stats.average);
+}
+
+TEST_F(StatsCounterTest, TestMetric_PercentCounter) {
+ StatsCounterObserverImpl* observer = new StatsCounterObserverImpl();
+ PercentCounter counter(&clock_, observer);
+ counter.Add(true);
+ counter.Add(false);
+ clock_.AdvanceTimeMilliseconds(kDefaultProcessIntervalMs);
+ // Trigger process (sample included in next interval).
+ counter.Add(false);
+ // Percentage per interval.
+ EXPECT_EQ(1, observer->num_calls_);
+ EXPECT_EQ(50, observer->last_sample_);
+ // Aggregated stats.
+ AggregatedStats stats = counter.GetStats();
+ EXPECT_EQ(1, stats.num_samples);
+ EXPECT_EQ(50, stats.min);
+ EXPECT_EQ(50, stats.max);
+}
+
+TEST_F(StatsCounterTest, TestMetric_PermilleCounter) {
+ StatsCounterObserverImpl* observer = new StatsCounterObserverImpl();
+ PermilleCounter counter(&clock_, observer);
+ counter.Add(true);
+ counter.Add(false);
+ clock_.AdvanceTimeMilliseconds(kDefaultProcessIntervalMs);
+ // Trigger process (sample included in next interval).
+ counter.Add(false);
+ // Permille per interval.
+ EXPECT_EQ(1, observer->num_calls_);
+ EXPECT_EQ(500, observer->last_sample_);
+ // Aggregated stats.
+ AggregatedStats stats = counter.GetStats();
+ EXPECT_EQ(1, stats.num_samples);
+ EXPECT_EQ(500, stats.min);
+ EXPECT_EQ(500, stats.max);
+}
+
+TEST_F(StatsCounterTest, TestMetric_RateCounter) {
+ StatsCounterObserverImpl* observer = new StatsCounterObserverImpl();
+ RateCounter counter(&clock_, observer, true);
+ counter.Add(186);
+ counter.Add(350);
+ counter.Add(22);
+ clock_.AdvanceTimeMilliseconds(kDefaultProcessIntervalMs);
+ // Trigger process (sample included in next interval).
+ counter.Add(111);
+ // Rate per interval, (186 + 350 + 22) / 2 sec = 279 samples/sec
+ EXPECT_EQ(1, observer->num_calls_);
+ EXPECT_EQ(279, observer->last_sample_);
+ // Aggregated stats.
+ AggregatedStats stats = counter.GetStats();
+ EXPECT_EQ(1, stats.num_samples);
+ EXPECT_EQ(279, stats.min);
+ EXPECT_EQ(279, stats.max);
+}
+
+TEST_F(StatsCounterTest, TestMetric_RateAccCounter) {
+ StatsCounterObserverImpl* observer = new StatsCounterObserverImpl();
+ RateAccCounter counter(&clock_, observer, true);
+ counter.Set(175, kStreamId);
+ counter.Set(188, kStreamId);
+ clock_.AdvanceTimeMilliseconds(kDefaultProcessIntervalMs);
+ // Trigger process (sample included in next interval).
+ counter.Set(192, kStreamId);
+ // Rate per interval: (188 - 0) / 2 sec = 94 samples/sec
+ EXPECT_EQ(1, observer->num_calls_);
+ EXPECT_EQ(94, observer->last_sample_);
+ // Aggregated stats.
+ AggregatedStats stats = counter.GetStats();
+ EXPECT_EQ(1, stats.num_samples);
+ EXPECT_EQ(94, stats.min);
+ EXPECT_EQ(94, stats.max);
+}
+
+TEST_F(StatsCounterTest, TestMetric_RateAccCounterWithSetLast) {
+ StatsCounterObserverImpl* observer = new StatsCounterObserverImpl();
+ RateAccCounter counter(&clock_, observer, true);
+ counter.SetLast(98, kStreamId);
+ counter.Set(175, kStreamId);
+ counter.Set(188, kStreamId);
+ clock_.AdvanceTimeMilliseconds(kDefaultProcessIntervalMs);
+ // Trigger process (sample included in next interval).
+ counter.Set(192, kStreamId);
+ // Rate per interval: (188 - 98) / 2 sec = 45 samples/sec
+ EXPECT_EQ(1, observer->num_calls_);
+ EXPECT_EQ(45, observer->last_sample_);
+}
+
+TEST_F(StatsCounterTest, TestMetric_RateAccCounterWithMultipleStreamIds) {
+ StatsCounterObserverImpl* observer = new StatsCounterObserverImpl();
+ RateAccCounter counter(&clock_, observer, true);
+ counter.Set(175, kStreamId);
+ counter.Set(188, kStreamId);
+ counter.Set(100, kStreamId + 1);
+ clock_.AdvanceTimeMilliseconds(kDefaultProcessIntervalMs);
+ // Trigger process (sample included in next interval).
+ counter.Set(150, kStreamId + 1);
+ // Rate per interval: ((188 - 0) + (100 - 0)) / 2 sec = 144 samples/sec
+ EXPECT_EQ(1, observer->num_calls_);
+ EXPECT_EQ(144, observer->last_sample_);
+ clock_.AdvanceTimeMilliseconds(kDefaultProcessIntervalMs);
+ // Trigger process (sample included in next interval).
+ counter.Set(198, kStreamId);
+ // Rate per interval: (0 + (150 - 100)) / 2 sec = 25 samples/sec
+ EXPECT_EQ(2, observer->num_calls_);
+ EXPECT_EQ(25, observer->last_sample_);
+ clock_.AdvanceTimeMilliseconds(kDefaultProcessIntervalMs);
+ // Trigger process (sample included in next interval).
+ counter.Set(200, kStreamId);
+ // Rate per interval: ((198 - 188) + (0)) / 2 sec = 5 samples/sec
+ EXPECT_EQ(3, observer->num_calls_);
+ EXPECT_EQ(5, observer->last_sample_);
+ // Aggregated stats.
+ AggregatedStats stats = counter.GetStats();
+ EXPECT_EQ(3, stats.num_samples);
+ EXPECT_EQ(5, stats.min);
+ EXPECT_EQ(144, stats.max);
+}
+
+TEST_F(StatsCounterTest, TestGetStats_MultipleIntervals) {
+ AvgCounter counter(&clock_, nullptr, false);
+ const int kSample1 = 1;
+ const int kSample2 = 5;
+ const int kSample3 = 8;
+ const int kSample4 = 11;
+ const int kSample5 = 50;
+ AddSampleAndAdvance(kSample1, kDefaultProcessIntervalMs, &counter);
+ AddSampleAndAdvance(kSample2, kDefaultProcessIntervalMs, &counter);
+ AddSampleAndAdvance(kSample3, kDefaultProcessIntervalMs, &counter);
+ AddSampleAndAdvance(kSample4, kDefaultProcessIntervalMs, &counter);
+ AddSampleAndAdvance(kSample5, kDefaultProcessIntervalMs, &counter);
+ // Trigger process (sample included in next interval).
+ counter.Add(111);
+ AggregatedStats stats = counter.GetStats();
+ EXPECT_EQ(5, stats.num_samples);
+ EXPECT_EQ(kSample1, stats.min);
+ EXPECT_EQ(kSample5, stats.max);
+ EXPECT_EQ(15, stats.average);
+}
+
+TEST_F(StatsCounterTest, TestGetStatsTwice) {
+ const int kSample1 = 4;
+ const int kSample2 = 7;
+ AvgCounter counter(&clock_, nullptr, false);
+ AddSampleAndAdvance(kSample1, kDefaultProcessIntervalMs, &counter);
+ // Trigger process (sample included in next interval).
+ counter.Add(kSample2);
+ AggregatedStats stats = counter.GetStats();
+ EXPECT_EQ(1, stats.num_samples);
+ EXPECT_EQ(kSample1, stats.min);
+ EXPECT_EQ(kSample1, stats.max);
+ // Trigger process (sample included in next interval).
+ clock_.AdvanceTimeMilliseconds(kDefaultProcessIntervalMs);
+ counter.Add(111);
+ stats = counter.GetStats();
+ EXPECT_EQ(2, stats.num_samples);
+ EXPECT_EQ(kSample1, stats.min);
+ EXPECT_EQ(kSample2, stats.max);
+ EXPECT_EQ(6, stats.average);
+}
+
+TEST_F(StatsCounterTest, TestRateAccCounter_NegativeRateIgnored) {
+ StatsCounterObserverImpl* observer = new StatsCounterObserverImpl();
+ const int kSample1 = 200; // 200 / 2 sec
+ const int kSample2 = 100; // -100 / 2 sec - negative ignored
+ const int kSample3 = 700; // 600 / 2 sec
+ RateAccCounter counter(&clock_, observer, true);
+ SetSampleAndAdvance(kSample1, kDefaultProcessIntervalMs, &counter);
+ SetSampleAndAdvance(kSample2, kDefaultProcessIntervalMs, &counter);
+ SetSampleAndAdvance(kSample3, kDefaultProcessIntervalMs, &counter);
+ EXPECT_EQ(1, observer->num_calls_);
+ EXPECT_EQ(100, observer->last_sample_);
+ // Trigger process (sample included in next interval).
+ counter.Set(2000, kStreamId);
+ EXPECT_EQ(2, observer->num_calls_);
+ EXPECT_EQ(300, observer->last_sample_);
+ // Aggregated stats.
+ AggregatedStats stats = counter.GetStats();
+ EXPECT_EQ(2, stats.num_samples);
+ EXPECT_EQ(100, stats.min);
+ EXPECT_EQ(300, stats.max);
+ EXPECT_EQ(200, stats.average);
+}
+
+TEST_F(StatsCounterTest, TestAvgCounter_IntervalsWithoutSamplesIncluded) {
+ // Samples: | 6 | x | x | 8 | // x: empty interval
+ // Stats: | 6 | 6 | 6 | 8 | // x -> last value reported
+ StatsCounterObserverImpl* observer = new StatsCounterObserverImpl();
+ AvgCounter counter(&clock_, observer, true);
+ AddSampleAndAdvance(6, kDefaultProcessIntervalMs * 4 - 1, &counter);
+ // Trigger process (sample included in next interval).
+ counter.Add(8);
+ // [6:3], 3 intervals passed (2 without samples -> last value reported).
+ AggregatedStats stats = counter.ProcessAndGetStats();
+ EXPECT_EQ(3, stats.num_samples);
+ EXPECT_EQ(6, stats.min);
+ EXPECT_EQ(6, stats.max);
+ // Make next interval pass and verify stats: [6:3],[8:1]
+ clock_.AdvanceTimeMilliseconds(1);
+ counter.ProcessAndGetStats();
+ EXPECT_EQ(4, observer->num_calls_);
+ EXPECT_EQ(8, observer->last_sample_);
+}
+
+TEST_F(StatsCounterTest, TestAvgCounter_WithPause) {
+ // Samples: | 6 | x | x | x | - | 22 | x | // x: empty interval, -: paused
+ // Stats: | 6 | 6 | 6 | 6 | - | 22 | 22 | // x -> last value reported
+ StatsCounterObserverImpl* observer = new StatsCounterObserverImpl();
+ AvgCounter counter(&clock_, observer, true);
+ // Add sample and advance 3 intervals (2 w/o samples -> last value reported).
+ AddSampleAndAdvance(6, kDefaultProcessIntervalMs * 4 - 1, &counter);
+ // Trigger process and verify stats: [6:3]
+ counter.ProcessAndGetStats();
+ EXPECT_EQ(3, observer->num_calls_);
+ EXPECT_EQ(6, observer->last_sample_);
+ // Make next interval pass (1 without samples).
+ // Process and pause. Verify stats: [6:4].
+ clock_.AdvanceTimeMilliseconds(1);
+ counter.ProcessAndPause();
+ EXPECT_EQ(4, observer->num_calls_); // Last value reported.
+ EXPECT_EQ(6, observer->last_sample_);
+ // Make next interval pass (1 without samples -> ignored while paused).
+ clock_.AdvanceTimeMilliseconds(kDefaultProcessIntervalMs * 2 - 1);
+ counter.Add(22); // Stops pause.
+ EXPECT_EQ(4, observer->num_calls_);
+ EXPECT_EQ(6, observer->last_sample_);
+ // Make next interval pass, [6:4][22:1]
+ clock_.AdvanceTimeMilliseconds(1);
+ counter.ProcessAndGetStats();
+ EXPECT_EQ(5, observer->num_calls_);
+ EXPECT_EQ(22, observer->last_sample_);
+ // Make 1 interval pass (1 w/o samples -> pause stopped, last value reported).
+ clock_.AdvanceTimeMilliseconds(kDefaultProcessIntervalMs);
+ counter.ProcessAndGetStats();
+ EXPECT_EQ(6, observer->num_calls_);
+ EXPECT_EQ(22, observer->last_sample_);
+}
+
+TEST_F(StatsCounterTest, TestRateAccCounter_AddSampleStopsPause) {
+ // Samples: | 12 | 24 | // -: paused
+ // Stats: | 6 | 6 |
+ StatsCounterObserverImpl* observer = new StatsCounterObserverImpl();
+ RateAccCounter counter(&clock_, observer, true);
+ // Add sample and advance 1 intervals.
+ counter.Set(12, kStreamId);
+ clock_.AdvanceTimeMilliseconds(kDefaultProcessIntervalMs);
+ // Trigger process and verify stats: [6:1]
+ counter.ProcessAndPause();
+ EXPECT_EQ(1, observer->num_calls_);
+ EXPECT_EQ(6, observer->last_sample_);
+ // Add sample and advance 1 intervals.
+ counter.Set(24, kStreamId); // Pause stopped.
+ clock_.AdvanceTimeMilliseconds(kDefaultProcessIntervalMs);
+ counter.ProcessAndGetStats();
+ EXPECT_EQ(2, observer->num_calls_);
+ EXPECT_EQ(6, observer->last_sample_);
+}
+
+TEST_F(StatsCounterTest, TestRateAccCounter_AddSameSampleDoesNotStopPause) {
+ // Samples: | 12 | 12 | 24 | // -: paused
+ // Stats: | 6 | - | 6 |
+ StatsCounterObserverImpl* observer = new StatsCounterObserverImpl();
+ RateAccCounter counter(&clock_, observer, true);
+ // Add sample and advance 1 intervals.
+ counter.Set(12, kStreamId);
+ clock_.AdvanceTimeMilliseconds(kDefaultProcessIntervalMs);
+ // Trigger process and verify stats: [6:1]
+ counter.ProcessAndPause();
+ EXPECT_EQ(1, observer->num_calls_);
+ EXPECT_EQ(6, observer->last_sample_);
+ // Add same sample and advance 1 intervals.
+ counter.Set(12, kStreamId); // Pause not stopped.
+ clock_.AdvanceTimeMilliseconds(kDefaultProcessIntervalMs);
+ counter.ProcessAndGetStats();
+ EXPECT_EQ(1, observer->num_calls_);
+ EXPECT_EQ(6, observer->last_sample_);
+ // Add new sample and advance 1 intervals.
+ counter.Set(24, kStreamId); // Pause stopped.
+ clock_.AdvanceTimeMilliseconds(kDefaultProcessIntervalMs);
+ counter.ProcessAndGetStats();
+ EXPECT_EQ(2, observer->num_calls_);
+ EXPECT_EQ(6, observer->last_sample_);
+}
+
+TEST_F(StatsCounterTest, TestRateAccCounter_PauseAndStopPause) {
+ // Samples: | 12 | 12 | 12 | // -: paused
+ // Stats: | 6 | - | 0 |
+ StatsCounterObserverImpl* observer = new StatsCounterObserverImpl();
+ RateAccCounter counter(&clock_, observer, true);
+ // Add sample and advance 1 intervals.
+ counter.Set(12, kStreamId);
+ clock_.AdvanceTimeMilliseconds(kDefaultProcessIntervalMs);
+ // Trigger process and verify stats: [6:1]
+ counter.ProcessAndPause();
+ EXPECT_EQ(1, observer->num_calls_);
+ EXPECT_EQ(6, observer->last_sample_);
+ // Add same sample and advance 1 intervals.
+ counter.Set(12, kStreamId); // Pause not stopped.
+ clock_.AdvanceTimeMilliseconds(kDefaultProcessIntervalMs);
+ counter.ProcessAndGetStats();
+ EXPECT_EQ(1, observer->num_calls_);
+ EXPECT_EQ(6, observer->last_sample_);
+ // Stop pause, add sample and advance 1 intervals.
+ counter.ProcessAndStopPause();
+ counter.Set(12, kStreamId);
+ clock_.AdvanceTimeMilliseconds(kDefaultProcessIntervalMs);
+ counter.ProcessAndGetStats();
+ EXPECT_EQ(2, observer->num_calls_);
+ EXPECT_EQ(0, observer->last_sample_);
+}
+
+TEST_F(StatsCounterTest, TestAvgCounter_WithoutMinPauseTimePassed) {
+ // Samples: | 6 | 2 | - | // x: empty interval, -: paused
+ // Stats: | 6 | 2 | - | // x -> last value reported
+ StatsCounterObserverImpl* observer = new StatsCounterObserverImpl();
+ AvgCounter counter(&clock_, observer, true);
+ // Add sample and advance 1 intervals.
+ AddSampleAndAdvance(6, kDefaultProcessIntervalMs, &counter);
+ // Process and pause. Verify stats: [6:1].
+ const int64_t kMinMs = 500;
+ counter.ProcessAndPauseForDuration(kMinMs);
+ EXPECT_EQ(1, observer->num_calls_); // Last value reported.
+ EXPECT_EQ(6, observer->last_sample_);
+ // Min pause time has not pass.
+ clock_.AdvanceTimeMilliseconds(kMinMs - 1);
+ counter.Add(2); // Pause not stopped.
+ // Make two intervals pass (1 without samples -> ignored while paused).
+ clock_.AdvanceTimeMilliseconds(kDefaultProcessIntervalMs * 2 - (kMinMs - 1));
+ counter.ProcessAndGetStats();
+ EXPECT_EQ(2, observer->num_calls_);
+ EXPECT_EQ(2, observer->last_sample_);
+}
+
+TEST_F(StatsCounterTest, TestAvgCounter_WithMinPauseTimePassed) {
+ // Samples: | 6 | 2 | x | // x: empty interval, -: paused
+ // Stats: | 6 | 2 | 2 | // x -> last value reported
+ StatsCounterObserverImpl* observer = new StatsCounterObserverImpl();
+ AvgCounter counter(&clock_, observer, true);
+ // Add sample and advance 1 intervals.
+ AddSampleAndAdvance(6, kDefaultProcessIntervalMs, &counter);
+ // Process and pause. Verify stats: [6:1].
+ const int64_t kMinMs = 500;
+ counter.ProcessAndPauseForDuration(kMinMs);
+ EXPECT_EQ(1, observer->num_calls_); // Last value reported.
+ EXPECT_EQ(6, observer->last_sample_);
+ // Make min pause time pass.
+ clock_.AdvanceTimeMilliseconds(kMinMs);
+ counter.Add(2); // Stop pause.
+ // Make two intervals pass (1 without samples -> last value reported).
+ clock_.AdvanceTimeMilliseconds(kDefaultProcessIntervalMs * 2 - kMinMs);
+ counter.ProcessAndGetStats();
+ EXPECT_EQ(3, observer->num_calls_);
+ EXPECT_EQ(2, observer->last_sample_);
+}
+
+TEST_F(StatsCounterTest, TestRateCounter_IntervalsWithoutSamplesIgnored) {
+ // Samples: | 50 | x | 20 | // x: empty interval
+ // Stats: | 25 | x | 10 | // x -> ignored
+ const bool kIncludeEmptyIntervals = false;
+ StatsCounterObserverImpl* observer = new StatsCounterObserverImpl();
+ const int kSample1 = 50; // 50 / 2 sec
+ const int kSample2 = 20; // 20 / 2 sec
+ RateCounter counter(&clock_, observer, kIncludeEmptyIntervals);
+ counter.Add(kSample1);
+ clock_.AdvanceTimeMilliseconds(kDefaultProcessIntervalMs * 3 - 1);
+ // Trigger process (sample included in next interval).
+ counter.Add(kSample2);
+ // [25:1], 2 intervals passed (1 without samples -> ignored).
+ EXPECT_EQ(1, observer->num_calls_);
+ EXPECT_EQ(25, observer->last_sample_);
+ // Make next interval pass and verify stats: [10:1],[25:1]
+ clock_.AdvanceTimeMilliseconds(1);
+ counter.ProcessAndGetStats();
+ EXPECT_EQ(2, observer->num_calls_);
+ EXPECT_EQ(10, observer->last_sample_);
+}
+
+TEST_F(StatsCounterTest, TestRateCounter_IntervalsWithoutSamplesIncluded) {
+ // Samples: | 50 | x | 20 | // x: empty interval
+ // Stats: | 25 | 0 | 10 | // x -> zero reported
+ const bool kIncludeEmptyIntervals = true;
+ StatsCounterObserverImpl* observer = new StatsCounterObserverImpl();
+ const int kSample1 = 50; // 50 / 2 sec
+ const int kSample2 = 20; // 20 / 2 sec
+ RateCounter counter(&clock_, observer, kIncludeEmptyIntervals);
+ counter.Add(kSample1);
+ clock_.AdvanceTimeMilliseconds(kDefaultProcessIntervalMs * 3 - 1);
+ // Trigger process (sample included in next interval).
+ counter.Add(kSample2);
+ // [0:1],[25:1], 2 intervals passed (1 without samples -> zero reported).
+ EXPECT_EQ(2, observer->num_calls_);
+ EXPECT_EQ(0, observer->last_sample_);
+ // Make last interval pass and verify stats: [0:1],[10:1],[25:1]
+ clock_.AdvanceTimeMilliseconds(1);
+ AggregatedStats stats = counter.ProcessAndGetStats();
+ EXPECT_EQ(25, stats.max);
+ EXPECT_EQ(3, observer->num_calls_);
+ EXPECT_EQ(10, observer->last_sample_);
+}
+
+TEST_F(StatsCounterTest, TestRateAccCounter_IntervalsWithoutSamplesIncluded) {
+ // Samples: | 12 | x | x | x | 60 | // x: empty interval
+ // Stats: | 6 | 0 | 0 | 0 | 24 | // x -> zero reported
+ StatsCounterObserverImpl* observer = new StatsCounterObserverImpl();
+ RateAccCounter counter(&clock_, observer, true);
+ VerifyStatsIsNotSet(counter.ProcessAndGetStats());
+ // Advance one interval and verify stats.
+ clock_.AdvanceTimeMilliseconds(kDefaultProcessIntervalMs);
+ VerifyStatsIsNotSet(counter.ProcessAndGetStats());
+ // Add sample and advance 3 intervals (2 w/o samples -> zero reported).
+ counter.Set(12, kStreamId);
+ clock_.AdvanceTimeMilliseconds(kDefaultProcessIntervalMs * 4 - 1);
+ // Trigger process and verify stats: [0:2][6:1]
+ counter.ProcessAndGetStats();
+ EXPECT_EQ(3, observer->num_calls_);
+ EXPECT_EQ(0, observer->last_sample_);
+ // Make next interval pass (1 w/o samples -> zero reported), [0:3][6:1]
+ clock_.AdvanceTimeMilliseconds(1);
+ counter.ProcessAndGetStats();
+ EXPECT_EQ(4, observer->num_calls_);
+ EXPECT_EQ(0, observer->last_sample_);
+ // Insert sample and advance non-complete interval, no change, [0:3][6:1]
+ clock_.AdvanceTimeMilliseconds(kDefaultProcessIntervalMs - 1);
+ counter.Set(60, kStreamId);
+ EXPECT_EQ(4, observer->num_calls_);
+ // Make next interval pass, [0:3][6:1][24:1]
+ clock_.AdvanceTimeMilliseconds(1);
+ AggregatedStats stats = counter.ProcessAndGetStats();
+ EXPECT_EQ(5, observer->num_calls_);
+ EXPECT_EQ(24, observer->last_sample_);
+ EXPECT_EQ(6, stats.average);
+}
+
+TEST_F(StatsCounterTest, TestRateAccCounter_IntervalsWithoutSamplesIgnored) {
+ // Samples: | 12 | x | x | x | 60 | // x: empty interval
+ // Stats: | 6 | x | x | x | 24 | // x -> ignored
+ StatsCounterObserverImpl* observer = new StatsCounterObserverImpl();
+ RateAccCounter counter(&clock_, observer, false);
+ // Add sample and advance 3 intervals (2 w/o samples -> ignored).
+ counter.Set(12, kStreamId);
+ clock_.AdvanceTimeMilliseconds(kDefaultProcessIntervalMs * 4 - 1);
+ // Trigger process and verify stats: [6:1]
+ counter.ProcessAndGetStats();
+ EXPECT_EQ(1, observer->num_calls_);
+ EXPECT_EQ(6, observer->last_sample_);
+ // Make next interval pass (1 w/o samples -> ignored), [6:1]
+ clock_.AdvanceTimeMilliseconds(1);
+ counter.ProcessAndGetStats();
+ EXPECT_EQ(1, observer->num_calls_);
+ // Insert sample and advance non-complete interval, no change, [6:1]
+ clock_.AdvanceTimeMilliseconds(kDefaultProcessIntervalMs - 1);
+ counter.Set(60, kStreamId);
+ counter.ProcessAndGetStats();
+ EXPECT_EQ(1, observer->num_calls_);
+ // Make next interval pass, [6:1][24:1]
+ clock_.AdvanceTimeMilliseconds(1);
+ counter.ProcessAndGetStats();
+ EXPECT_EQ(2, observer->num_calls_);
+ EXPECT_EQ(24, observer->last_sample_);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/video/stream_synchronization.cc b/third_party/libwebrtc/video/stream_synchronization.cc
new file mode 100644
index 0000000000..d86cc79203
--- /dev/null
+++ b/third_party/libwebrtc/video/stream_synchronization.cc
@@ -0,0 +1,195 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video/stream_synchronization.h"
+
+#include <stdlib.h>
+
+#include <algorithm>
+
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+
+static const int kMaxChangeMs = 80;
+static const int kMaxDeltaDelayMs = 10000;
+static const int kFilterLength = 4;
+// Minimum difference between audio and video to warrant a change.
+static const int kMinDeltaMs = 30;
+
+StreamSynchronization::StreamSynchronization(uint32_t video_stream_id,
+ uint32_t audio_stream_id)
+ : video_stream_id_(video_stream_id),
+ audio_stream_id_(audio_stream_id),
+ base_target_delay_ms_(0),
+ avg_diff_ms_(0) {}
+
+bool StreamSynchronization::ComputeRelativeDelay(
+ const Measurements& audio_measurement,
+ const Measurements& video_measurement,
+ int* relative_delay_ms) {
+ NtpTime audio_last_capture_time =
+ audio_measurement.rtp_to_ntp.Estimate(audio_measurement.latest_timestamp);
+ if (!audio_last_capture_time.Valid()) {
+ return false;
+ }
+ NtpTime video_last_capture_time =
+ video_measurement.rtp_to_ntp.Estimate(video_measurement.latest_timestamp);
+ if (!video_last_capture_time.Valid()) {
+ return false;
+ }
+ int64_t audio_last_capture_time_ms = audio_last_capture_time.ToMs();
+ int64_t video_last_capture_time_ms = video_last_capture_time.ToMs();
+
+ // Positive diff means that video_measurement is behind audio_measurement.
+ *relative_delay_ms =
+ video_measurement.latest_receive_time_ms -
+ audio_measurement.latest_receive_time_ms -
+ (video_last_capture_time_ms - audio_last_capture_time_ms);
+
+ if (*relative_delay_ms > kMaxDeltaDelayMs ||
+ *relative_delay_ms < -kMaxDeltaDelayMs) {
+ return false;
+ }
+ return true;
+}
+
+bool StreamSynchronization::ComputeDelays(int relative_delay_ms,
+ int current_audio_delay_ms,
+ int* total_audio_delay_target_ms,
+ int* total_video_delay_target_ms) {
+ int current_video_delay_ms = *total_video_delay_target_ms;
+
+ RTC_LOG(LS_VERBOSE) << "Audio delay: " << current_audio_delay_ms
+ << " current diff: " << relative_delay_ms
+ << " for stream " << audio_stream_id_;
+
+ // Calculate the difference between the lowest possible video delay and the
+ // current audio delay.
+ int current_diff_ms =
+ current_video_delay_ms - current_audio_delay_ms + relative_delay_ms;
+
+ avg_diff_ms_ =
+ ((kFilterLength - 1) * avg_diff_ms_ + current_diff_ms) / kFilterLength;
+ if (abs(avg_diff_ms_) < kMinDeltaMs) {
+ // Don't adjust if the diff is within our margin.
+ return false;
+ }
+
+ // Make sure we don't move too fast.
+ int diff_ms = avg_diff_ms_ / 2;
+ diff_ms = std::min(diff_ms, kMaxChangeMs);
+ diff_ms = std::max(diff_ms, -kMaxChangeMs);
+
+ // Reset the average after a move to prevent overshooting reaction.
+ avg_diff_ms_ = 0;
+
+ if (diff_ms > 0) {
+ // The minimum video delay is longer than the current audio delay.
+ // We need to decrease extra video delay, or add extra audio delay.
+ if (video_delay_.extra_ms > base_target_delay_ms_) {
+ // We have extra delay added to ViE. Reduce this delay before adding
+ // extra delay to VoE.
+ video_delay_.extra_ms -= diff_ms;
+ audio_delay_.extra_ms = base_target_delay_ms_;
+ } else { // video_delay_.extra_ms > 0
+ // We have no extra video delay to remove, increase the audio delay.
+ audio_delay_.extra_ms += diff_ms;
+ video_delay_.extra_ms = base_target_delay_ms_;
+ }
+ } else { // if (diff_ms > 0)
+ // The video delay is lower than the current audio delay.
+ // We need to decrease extra audio delay, or add extra video delay.
+ if (audio_delay_.extra_ms > base_target_delay_ms_) {
+ // We have extra delay in VoiceEngine.
+ // Start with decreasing the voice delay.
+ // Note: diff_ms is negative; add the negative difference.
+ audio_delay_.extra_ms += diff_ms;
+ video_delay_.extra_ms = base_target_delay_ms_;
+ } else { // audio_delay_.extra_ms > base_target_delay_ms_
+ // We have no extra delay in VoiceEngine, increase the video delay.
+ // Note: diff_ms is negative; subtract the negative difference.
+ video_delay_.extra_ms -= diff_ms; // X - (-Y) = X + Y.
+ audio_delay_.extra_ms = base_target_delay_ms_;
+ }
+ }
+
+ // Make sure that video is never below our target.
+ video_delay_.extra_ms =
+ std::max(video_delay_.extra_ms, base_target_delay_ms_);
+
+ int new_video_delay_ms;
+ if (video_delay_.extra_ms > base_target_delay_ms_) {
+ new_video_delay_ms = video_delay_.extra_ms;
+ } else {
+ // No change to the extra video delay. We are changing audio and we only
+ // allow to change one at the time.
+ new_video_delay_ms = video_delay_.last_ms;
+ }
+
+ // Make sure that we don't go below the extra video delay.
+ new_video_delay_ms = std::max(new_video_delay_ms, video_delay_.extra_ms);
+
+ // Verify we don't go above the maximum allowed video delay.
+ new_video_delay_ms =
+ std::min(new_video_delay_ms, base_target_delay_ms_ + kMaxDeltaDelayMs);
+
+ int new_audio_delay_ms;
+ if (audio_delay_.extra_ms > base_target_delay_ms_) {
+ new_audio_delay_ms = audio_delay_.extra_ms;
+ } else {
+ // No change to the audio delay. We are changing video and we only allow to
+ // change one at the time.
+ new_audio_delay_ms = audio_delay_.last_ms;
+ }
+
+ // Make sure that we don't go below the extra audio delay.
+ new_audio_delay_ms = std::max(new_audio_delay_ms, audio_delay_.extra_ms);
+
+ // Verify we don't go above the maximum allowed audio delay.
+ new_audio_delay_ms =
+ std::min(new_audio_delay_ms, base_target_delay_ms_ + kMaxDeltaDelayMs);
+
+ video_delay_.last_ms = new_video_delay_ms;
+ audio_delay_.last_ms = new_audio_delay_ms;
+
+ RTC_LOG(LS_VERBOSE) << "Sync video delay " << new_video_delay_ms
+ << " for video stream " << video_stream_id_
+ << " and audio delay " << audio_delay_.extra_ms
+ << " for audio stream " << audio_stream_id_;
+
+ *total_video_delay_target_ms = new_video_delay_ms;
+ *total_audio_delay_target_ms = new_audio_delay_ms;
+ return true;
+}
+
+void StreamSynchronization::SetTargetBufferingDelay(int target_delay_ms) {
+ // Initial extra delay for audio (accounting for existing extra delay).
+ audio_delay_.extra_ms += target_delay_ms - base_target_delay_ms_;
+ audio_delay_.last_ms += target_delay_ms - base_target_delay_ms_;
+
+ // The video delay is compared to the last value (and how much we can update
+ // is limited by that as well).
+ video_delay_.last_ms += target_delay_ms - base_target_delay_ms_;
+ video_delay_.extra_ms += target_delay_ms - base_target_delay_ms_;
+
+ // Video is already delayed by the desired amount.
+ base_target_delay_ms_ = target_delay_ms;
+}
+
+void StreamSynchronization::ReduceAudioDelay() {
+ audio_delay_.extra_ms *= 0.9f;
+}
+
+void StreamSynchronization::ReduceVideoDelay() {
+ video_delay_.extra_ms *= 0.9f;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/video/stream_synchronization.h b/third_party/libwebrtc/video/stream_synchronization.h
new file mode 100644
index 0000000000..61073cb4b2
--- /dev/null
+++ b/third_party/libwebrtc/video/stream_synchronization.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VIDEO_STREAM_SYNCHRONIZATION_H_
+#define VIDEO_STREAM_SYNCHRONIZATION_H_
+
+#include <stdint.h>
+
+#include "system_wrappers/include/rtp_to_ntp_estimator.h"
+
+namespace webrtc {
+
+class StreamSynchronization {
+ public:
+ struct Measurements {
+ Measurements() : latest_receive_time_ms(0), latest_timestamp(0) {}
+ RtpToNtpEstimator rtp_to_ntp;
+ int64_t latest_receive_time_ms;
+ uint32_t latest_timestamp;
+ };
+
+ StreamSynchronization(uint32_t video_stream_id, uint32_t audio_stream_id);
+
+ bool ComputeDelays(int relative_delay_ms,
+ int current_audio_delay_ms,
+ int* total_audio_delay_target_ms,
+ int* total_video_delay_target_ms);
+
+ // On success `relative_delay_ms` contains the number of milliseconds later
+ // video is rendered relative audio. If audio is played back later than video
+ // `relative_delay_ms` will be negative.
+ static bool ComputeRelativeDelay(const Measurements& audio_measurement,
+ const Measurements& video_measurement,
+ int* relative_delay_ms);
+
+ // Set target buffering delay. Audio and video will be delayed by at least
+ // `target_delay_ms`.
+ void SetTargetBufferingDelay(int target_delay_ms);
+
+ // Lowers the audio delay by 10%. Can be used to recover from errors.
+ void ReduceAudioDelay();
+
+ // Lowers the video delay by 10%. Can be used to recover from errors.
+ void ReduceVideoDelay();
+
+ uint32_t audio_stream_id() const { return audio_stream_id_; }
+ uint32_t video_stream_id() const { return video_stream_id_; }
+
+ private:
+ struct SynchronizationDelays {
+ int extra_ms = 0;
+ int last_ms = 0;
+ };
+
+ const uint32_t video_stream_id_;
+ const uint32_t audio_stream_id_;
+ SynchronizationDelays audio_delay_;
+ SynchronizationDelays video_delay_;
+ int base_target_delay_ms_;
+ int avg_diff_ms_;
+};
+} // namespace webrtc
+
+#endif // VIDEO_STREAM_SYNCHRONIZATION_H_
diff --git a/third_party/libwebrtc/video/stream_synchronization_unittest.cc b/third_party/libwebrtc/video/stream_synchronization_unittest.cc
new file mode 100644
index 0000000000..b733a1d2cf
--- /dev/null
+++ b/third_party/libwebrtc/video/stream_synchronization_unittest.cc
@@ -0,0 +1,532 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video/stream_synchronization.h"
+
+#include <algorithm>
+
+#include "system_wrappers/include/clock.h"
+#include "system_wrappers/include/ntp_time.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+constexpr int kMaxChangeMs = 80; // From stream_synchronization.cc
+constexpr int kDefaultAudioFrequency = 8000;
+constexpr int kDefaultVideoFrequency = 90000;
+constexpr int kSmoothingFilter = 4 * 2;
+} // namespace
+
+class StreamSynchronizationTest : public ::testing::Test {
+ public:
+ StreamSynchronizationTest()
+ : sync_(0, 0), clock_sender_(98765000), clock_receiver_(43210000) {}
+
+ protected:
+ // Generates the necessary RTCP measurements and RTP timestamps and computes
+ // the audio and video delays needed to get the two streams in sync.
+ // `audio_delay_ms` and `video_delay_ms` are the number of milliseconds after
+ // capture which the frames are received.
+ // `current_audio_delay_ms` is the number of milliseconds which audio is
+ // currently being delayed by the receiver.
+ bool DelayedStreams(int audio_delay_ms,
+ int video_delay_ms,
+ int current_audio_delay_ms,
+ int* total_audio_delay_ms,
+ int* total_video_delay_ms) {
+ int audio_frequency =
+ static_cast<int>(kDefaultAudioFrequency * audio_clock_drift_ + 0.5);
+ int video_frequency =
+ static_cast<int>(kDefaultVideoFrequency * video_clock_drift_ + 0.5);
+
+ // Generate NTP/RTP timestamp pair for both streams corresponding to RTCP.
+ StreamSynchronization::Measurements audio;
+ StreamSynchronization::Measurements video;
+ NtpTime ntp_time = clock_sender_.CurrentNtpTime();
+ uint32_t rtp_timestamp =
+ clock_sender_.CurrentTime().ms() * audio_frequency / 1000;
+ EXPECT_EQ(audio.rtp_to_ntp.UpdateMeasurements(ntp_time, rtp_timestamp),
+ RtpToNtpEstimator::kNewMeasurement);
+ clock_sender_.AdvanceTimeMilliseconds(100);
+ clock_receiver_.AdvanceTimeMilliseconds(100);
+ ntp_time = clock_sender_.CurrentNtpTime();
+ rtp_timestamp = clock_sender_.CurrentTime().ms() * video_frequency / 1000;
+ EXPECT_EQ(video.rtp_to_ntp.UpdateMeasurements(ntp_time, rtp_timestamp),
+ RtpToNtpEstimator::kNewMeasurement);
+ clock_sender_.AdvanceTimeMilliseconds(900);
+ clock_receiver_.AdvanceTimeMilliseconds(900);
+ ntp_time = clock_sender_.CurrentNtpTime();
+ rtp_timestamp = clock_sender_.CurrentTime().ms() * audio_frequency / 1000;
+ EXPECT_EQ(audio.rtp_to_ntp.UpdateMeasurements(ntp_time, rtp_timestamp),
+ RtpToNtpEstimator::kNewMeasurement);
+ clock_sender_.AdvanceTimeMilliseconds(100);
+ clock_receiver_.AdvanceTimeMilliseconds(100);
+ ntp_time = clock_sender_.CurrentNtpTime();
+ rtp_timestamp = clock_sender_.CurrentTime().ms() * video_frequency / 1000;
+ EXPECT_EQ(video.rtp_to_ntp.UpdateMeasurements(ntp_time, rtp_timestamp),
+ RtpToNtpEstimator::kNewMeasurement);
+ clock_sender_.AdvanceTimeMilliseconds(900);
+ clock_receiver_.AdvanceTimeMilliseconds(900);
+
+ // Capture an audio and a video frame at the same time.
+ audio.latest_timestamp =
+ clock_sender_.CurrentTime().ms() * audio_frequency / 1000;
+ video.latest_timestamp =
+ clock_sender_.CurrentTime().ms() * video_frequency / 1000;
+
+ if (audio_delay_ms > video_delay_ms) {
+ // Audio later than video.
+ clock_receiver_.AdvanceTimeMilliseconds(video_delay_ms);
+ video.latest_receive_time_ms = clock_receiver_.CurrentTime().ms();
+ clock_receiver_.AdvanceTimeMilliseconds(audio_delay_ms - video_delay_ms);
+ audio.latest_receive_time_ms = clock_receiver_.CurrentTime().ms();
+ } else {
+ // Video later than audio.
+ clock_receiver_.AdvanceTimeMilliseconds(audio_delay_ms);
+ audio.latest_receive_time_ms = clock_receiver_.CurrentTime().ms();
+ clock_receiver_.AdvanceTimeMilliseconds(video_delay_ms - audio_delay_ms);
+ video.latest_receive_time_ms = clock_receiver_.CurrentTime().ms();
+ }
+
+ int relative_delay_ms;
+ EXPECT_TRUE(StreamSynchronization::ComputeRelativeDelay(
+ audio, video, &relative_delay_ms));
+ EXPECT_EQ(video_delay_ms - audio_delay_ms, relative_delay_ms);
+
+ return sync_.ComputeDelays(relative_delay_ms, current_audio_delay_ms,
+ total_audio_delay_ms, total_video_delay_ms);
+ }
+
+ // Simulate audio playback 300 ms after capture and video rendering 100 ms
+ // after capture. Verify that the correct extra delays are calculated for
+ // audio and video, and that they change correctly when we simulate that
+ // NetEQ or the VCM adds more delay to the streams.
+ void BothDelayedAudioLaterTest(int base_target_delay_ms) {
+ const int kAudioDelayMs = base_target_delay_ms + 300;
+ const int kVideoDelayMs = base_target_delay_ms + 100;
+ int current_audio_delay_ms = base_target_delay_ms;
+ int total_audio_delay_ms = 0;
+ int total_video_delay_ms = base_target_delay_ms;
+ int filtered_move = (kAudioDelayMs - kVideoDelayMs) / kSmoothingFilter;
+
+ EXPECT_TRUE(DelayedStreams(kAudioDelayMs, kVideoDelayMs,
+ current_audio_delay_ms, &total_audio_delay_ms,
+ &total_video_delay_ms));
+ EXPECT_EQ(base_target_delay_ms + filtered_move, total_video_delay_ms);
+ EXPECT_EQ(base_target_delay_ms, total_audio_delay_ms);
+
+ // Set new current delay.
+ current_audio_delay_ms = total_audio_delay_ms;
+ clock_sender_.AdvanceTimeMilliseconds(1000);
+ clock_receiver_.AdvanceTimeMilliseconds(
+ 1000 - std::max(kAudioDelayMs, kVideoDelayMs));
+ // Simulate base_target_delay_ms minimum delay in the VCM.
+ total_video_delay_ms = base_target_delay_ms;
+ EXPECT_TRUE(DelayedStreams(kAudioDelayMs, kVideoDelayMs,
+ current_audio_delay_ms, &total_audio_delay_ms,
+ &total_video_delay_ms));
+ EXPECT_EQ(base_target_delay_ms + 2 * filtered_move, total_video_delay_ms);
+ EXPECT_EQ(base_target_delay_ms, total_audio_delay_ms);
+
+ // Set new current delay.
+ current_audio_delay_ms = total_audio_delay_ms;
+ clock_sender_.AdvanceTimeMilliseconds(1000);
+ clock_receiver_.AdvanceTimeMilliseconds(
+ 1000 - std::max(kAudioDelayMs, kVideoDelayMs));
+ // Simulate base_target_delay_ms minimum delay in the VCM.
+ total_video_delay_ms = base_target_delay_ms;
+ EXPECT_TRUE(DelayedStreams(kAudioDelayMs, kVideoDelayMs,
+ current_audio_delay_ms, &total_audio_delay_ms,
+ &total_video_delay_ms));
+ EXPECT_EQ(base_target_delay_ms + 3 * filtered_move, total_video_delay_ms);
+ EXPECT_EQ(base_target_delay_ms, total_audio_delay_ms);
+
+ // Simulate that NetEQ introduces some audio delay.
+ const int kNeteqDelayIncrease = 50;
+ current_audio_delay_ms = base_target_delay_ms + kNeteqDelayIncrease;
+ clock_sender_.AdvanceTimeMilliseconds(1000);
+ clock_receiver_.AdvanceTimeMilliseconds(
+ 1000 - std::max(kAudioDelayMs, kVideoDelayMs));
+ // Simulate base_target_delay_ms minimum delay in the VCM.
+ total_video_delay_ms = base_target_delay_ms;
+ EXPECT_TRUE(DelayedStreams(kAudioDelayMs, kVideoDelayMs,
+ current_audio_delay_ms, &total_audio_delay_ms,
+ &total_video_delay_ms));
+ filtered_move = 3 * filtered_move +
+ (kNeteqDelayIncrease + kAudioDelayMs - kVideoDelayMs) /
+ kSmoothingFilter;
+ EXPECT_EQ(base_target_delay_ms + filtered_move, total_video_delay_ms);
+ EXPECT_EQ(base_target_delay_ms, total_audio_delay_ms);
+
+ // Simulate that NetEQ reduces its delay.
+ const int kNeteqDelayDecrease = 10;
+ current_audio_delay_ms = base_target_delay_ms + kNeteqDelayDecrease;
+ clock_sender_.AdvanceTimeMilliseconds(1000);
+ clock_receiver_.AdvanceTimeMilliseconds(
+ 1000 - std::max(kAudioDelayMs, kVideoDelayMs));
+ // Simulate base_target_delay_ms minimum delay in the VCM.
+ total_video_delay_ms = base_target_delay_ms;
+ EXPECT_TRUE(DelayedStreams(kAudioDelayMs, kVideoDelayMs,
+ current_audio_delay_ms, &total_audio_delay_ms,
+ &total_video_delay_ms));
+ filtered_move =
+ filtered_move + (kNeteqDelayDecrease + kAudioDelayMs - kVideoDelayMs) /
+ kSmoothingFilter;
+ EXPECT_EQ(base_target_delay_ms + filtered_move, total_video_delay_ms);
+ EXPECT_EQ(base_target_delay_ms, total_audio_delay_ms);
+ }
+
+ void BothDelayedVideoLaterTest(int base_target_delay_ms) {
+ const int kAudioDelayMs = base_target_delay_ms + 100;
+ const int kVideoDelayMs = base_target_delay_ms + 300;
+ int current_audio_delay_ms = base_target_delay_ms;
+ int total_audio_delay_ms = 0;
+ int total_video_delay_ms = base_target_delay_ms;
+
+ EXPECT_TRUE(DelayedStreams(kAudioDelayMs, kVideoDelayMs,
+ current_audio_delay_ms, &total_audio_delay_ms,
+ &total_video_delay_ms));
+ EXPECT_EQ(base_target_delay_ms, total_video_delay_ms);
+ // The audio delay is not allowed to change more than this.
+ EXPECT_GE(base_target_delay_ms + kMaxChangeMs, total_audio_delay_ms);
+ int last_total_audio_delay_ms = total_audio_delay_ms;
+
+ // Set new current audio delay.
+ current_audio_delay_ms = total_audio_delay_ms;
+ clock_sender_.AdvanceTimeMilliseconds(1000);
+ clock_receiver_.AdvanceTimeMilliseconds(800);
+ EXPECT_TRUE(DelayedStreams(kAudioDelayMs, kVideoDelayMs,
+ current_audio_delay_ms, &total_audio_delay_ms,
+ &total_video_delay_ms));
+ EXPECT_EQ(base_target_delay_ms, total_video_delay_ms);
+ EXPECT_EQ(last_total_audio_delay_ms +
+ MaxAudioDelayChangeMs(
+ current_audio_delay_ms,
+ base_target_delay_ms + kVideoDelayMs - kAudioDelayMs),
+ total_audio_delay_ms);
+ last_total_audio_delay_ms = total_audio_delay_ms;
+
+ // Set new current audio delay.
+ current_audio_delay_ms = total_audio_delay_ms;
+ clock_sender_.AdvanceTimeMilliseconds(1000);
+ clock_receiver_.AdvanceTimeMilliseconds(800);
+ EXPECT_TRUE(DelayedStreams(kAudioDelayMs, kVideoDelayMs,
+ current_audio_delay_ms, &total_audio_delay_ms,
+ &total_video_delay_ms));
+ EXPECT_EQ(base_target_delay_ms, total_video_delay_ms);
+ EXPECT_EQ(last_total_audio_delay_ms +
+ MaxAudioDelayChangeMs(
+ current_audio_delay_ms,
+ base_target_delay_ms + kVideoDelayMs - kAudioDelayMs),
+ total_audio_delay_ms);
+ last_total_audio_delay_ms = total_audio_delay_ms;
+
+ // Simulate that NetEQ for some reason reduced the delay.
+ current_audio_delay_ms = base_target_delay_ms + 10;
+ clock_sender_.AdvanceTimeMilliseconds(1000);
+ clock_receiver_.AdvanceTimeMilliseconds(800);
+ EXPECT_TRUE(DelayedStreams(kAudioDelayMs, kVideoDelayMs,
+ current_audio_delay_ms, &total_audio_delay_ms,
+ &total_video_delay_ms));
+ EXPECT_EQ(base_target_delay_ms, total_video_delay_ms);
+ EXPECT_EQ(last_total_audio_delay_ms +
+ MaxAudioDelayChangeMs(
+ current_audio_delay_ms,
+ base_target_delay_ms + kVideoDelayMs - kAudioDelayMs),
+ total_audio_delay_ms);
+ last_total_audio_delay_ms = total_audio_delay_ms;
+
+ // Simulate that NetEQ for some reason significantly increased the delay.
+ current_audio_delay_ms = base_target_delay_ms + 350;
+ clock_sender_.AdvanceTimeMilliseconds(1000);
+ clock_receiver_.AdvanceTimeMilliseconds(800);
+ EXPECT_TRUE(DelayedStreams(kAudioDelayMs, kVideoDelayMs,
+ current_audio_delay_ms, &total_audio_delay_ms,
+ &total_video_delay_ms));
+ EXPECT_EQ(base_target_delay_ms, total_video_delay_ms);
+ EXPECT_EQ(last_total_audio_delay_ms +
+ MaxAudioDelayChangeMs(
+ current_audio_delay_ms,
+ base_target_delay_ms + kVideoDelayMs - kAudioDelayMs),
+ total_audio_delay_ms);
+ }
+
+ int MaxAudioDelayChangeMs(int current_audio_delay_ms, int delay_ms) const {
+ int diff_ms = (delay_ms - current_audio_delay_ms) / kSmoothingFilter;
+ diff_ms = std::min(diff_ms, kMaxChangeMs);
+ diff_ms = std::max(diff_ms, -kMaxChangeMs);
+ return diff_ms;
+ }
+
+ StreamSynchronization sync_;
+ SimulatedClock clock_sender_;
+ SimulatedClock clock_receiver_;
+ double audio_clock_drift_ = 1.0;
+ double video_clock_drift_ = 1.0;
+};
+
+TEST_F(StreamSynchronizationTest, NoDelay) {
+ int total_audio_delay_ms = 0;
+ int total_video_delay_ms = 0;
+
+ EXPECT_FALSE(DelayedStreams(/*audio_delay_ms=*/0, /*video_delay_ms=*/0,
+ /*current_audio_delay_ms=*/0,
+ &total_audio_delay_ms, &total_video_delay_ms));
+ EXPECT_EQ(0, total_audio_delay_ms);
+ EXPECT_EQ(0, total_video_delay_ms);
+}
+
+TEST_F(StreamSynchronizationTest, VideoDelayed) {
+ const int kAudioDelayMs = 200;
+ int total_audio_delay_ms = 0;
+ int total_video_delay_ms = 0;
+
+ EXPECT_TRUE(DelayedStreams(kAudioDelayMs, /*video_delay_ms=*/0,
+ /*current_audio_delay_ms=*/0,
+ &total_audio_delay_ms, &total_video_delay_ms));
+ EXPECT_EQ(0, total_audio_delay_ms);
+ // The delay is not allowed to change more than this.
+ EXPECT_EQ(kAudioDelayMs / kSmoothingFilter, total_video_delay_ms);
+
+ // Simulate 0 minimum delay in the VCM.
+ total_video_delay_ms = 0;
+ clock_sender_.AdvanceTimeMilliseconds(1000);
+ clock_receiver_.AdvanceTimeMilliseconds(800);
+ EXPECT_TRUE(DelayedStreams(kAudioDelayMs, /*video_delay_ms=*/0,
+ /*current_audio_delay_ms=*/0,
+ &total_audio_delay_ms, &total_video_delay_ms));
+ EXPECT_EQ(0, total_audio_delay_ms);
+ EXPECT_EQ(2 * kAudioDelayMs / kSmoothingFilter, total_video_delay_ms);
+
+ // Simulate 0 minimum delay in the VCM.
+ total_video_delay_ms = 0;
+ clock_sender_.AdvanceTimeMilliseconds(1000);
+ clock_receiver_.AdvanceTimeMilliseconds(800);
+ EXPECT_TRUE(DelayedStreams(kAudioDelayMs, /*video_delay_ms=*/0,
+ /*current_audio_delay_ms=*/0,
+ &total_audio_delay_ms, &total_video_delay_ms));
+ EXPECT_EQ(0, total_audio_delay_ms);
+ EXPECT_EQ(3 * kAudioDelayMs / kSmoothingFilter, total_video_delay_ms);
+}
+
+TEST_F(StreamSynchronizationTest, AudioDelayed) {
+ const int kVideoDelayMs = 200;
+ int current_audio_delay_ms = 0;
+ int total_audio_delay_ms = 0;
+ int total_video_delay_ms = 0;
+
+ EXPECT_TRUE(DelayedStreams(/*audio_delay_ms=*/0, kVideoDelayMs,
+ current_audio_delay_ms, &total_audio_delay_ms,
+ &total_video_delay_ms));
+ EXPECT_EQ(0, total_video_delay_ms);
+ // The delay is not allowed to change more than this.
+ EXPECT_EQ(kVideoDelayMs / kSmoothingFilter, total_audio_delay_ms);
+ int last_total_audio_delay_ms = total_audio_delay_ms;
+
+ // Set new current audio delay.
+ current_audio_delay_ms = total_audio_delay_ms;
+ clock_sender_.AdvanceTimeMilliseconds(1000);
+ clock_receiver_.AdvanceTimeMilliseconds(800);
+ EXPECT_TRUE(DelayedStreams(/*audio_delay_ms=*/0, kVideoDelayMs,
+ current_audio_delay_ms, &total_audio_delay_ms,
+ &total_video_delay_ms));
+ EXPECT_EQ(0, total_video_delay_ms);
+ EXPECT_EQ(last_total_audio_delay_ms +
+ MaxAudioDelayChangeMs(current_audio_delay_ms, kVideoDelayMs),
+ total_audio_delay_ms);
+ last_total_audio_delay_ms = total_audio_delay_ms;
+
+ // Set new current audio delay.
+ current_audio_delay_ms = total_audio_delay_ms;
+ clock_sender_.AdvanceTimeMilliseconds(1000);
+ clock_receiver_.AdvanceTimeMilliseconds(800);
+ EXPECT_TRUE(DelayedStreams(/*audio_delay_ms=*/0, kVideoDelayMs,
+ current_audio_delay_ms, &total_audio_delay_ms,
+ &total_video_delay_ms));
+ EXPECT_EQ(0, total_video_delay_ms);
+ EXPECT_EQ(last_total_audio_delay_ms +
+ MaxAudioDelayChangeMs(current_audio_delay_ms, kVideoDelayMs),
+ total_audio_delay_ms);
+ last_total_audio_delay_ms = total_audio_delay_ms;
+
+ // Simulate that NetEQ for some reason reduced the delay.
+ current_audio_delay_ms = 10;
+ clock_sender_.AdvanceTimeMilliseconds(1000);
+ clock_receiver_.AdvanceTimeMilliseconds(800);
+ EXPECT_TRUE(DelayedStreams(/*audio_delay_ms=*/0, kVideoDelayMs,
+ current_audio_delay_ms, &total_audio_delay_ms,
+ &total_video_delay_ms));
+ EXPECT_EQ(0, total_video_delay_ms);
+ EXPECT_EQ(last_total_audio_delay_ms +
+ MaxAudioDelayChangeMs(current_audio_delay_ms, kVideoDelayMs),
+ total_audio_delay_ms);
+ last_total_audio_delay_ms = total_audio_delay_ms;
+
+ // Simulate that NetEQ for some reason significantly increased the delay.
+ current_audio_delay_ms = 350;
+ clock_sender_.AdvanceTimeMilliseconds(1000);
+ clock_receiver_.AdvanceTimeMilliseconds(800);
+ EXPECT_TRUE(DelayedStreams(/*audio_delay_ms=*/0, kVideoDelayMs,
+ current_audio_delay_ms, &total_audio_delay_ms,
+ &total_video_delay_ms));
+ EXPECT_EQ(0, total_video_delay_ms);
+ EXPECT_EQ(last_total_audio_delay_ms +
+ MaxAudioDelayChangeMs(current_audio_delay_ms, kVideoDelayMs),
+ total_audio_delay_ms);
+}
+
+TEST_F(StreamSynchronizationTest, NoAudioIncomingUnboundedIncrease) {
+ // Test how audio delay can grow unbounded when audio stops coming in.
+ // This is handled in caller of RtpStreamsSynchronizer, for example in
+ // RtpStreamsSynchronizer by not updating delays when audio samples stop
+ // coming in.
+ const int kVideoDelayMs = 300;
+ const int kAudioDelayMs = 100;
+ int current_audio_delay_ms = kAudioDelayMs;
+ int total_audio_delay_ms = 0;
+ int total_video_delay_ms = 0;
+
+ EXPECT_TRUE(DelayedStreams(/*audio_delay_ms=*/0, kVideoDelayMs,
+ current_audio_delay_ms, &total_audio_delay_ms,
+ &total_video_delay_ms));
+ EXPECT_EQ(0, total_video_delay_ms);
+ // The delay is not allowed to change more than this.
+ EXPECT_EQ((kVideoDelayMs - kAudioDelayMs) / kSmoothingFilter,
+ total_audio_delay_ms);
+ int last_total_audio_delay_ms = total_audio_delay_ms;
+
+ // Set new current audio delay: simulate audio samples are flowing in.
+ current_audio_delay_ms = total_audio_delay_ms;
+
+ clock_sender_.AdvanceTimeMilliseconds(1000);
+ clock_receiver_.AdvanceTimeMilliseconds(1000);
+ EXPECT_TRUE(DelayedStreams(/*audio_delay_ms=*/0, kVideoDelayMs,
+ current_audio_delay_ms, &total_audio_delay_ms,
+ &total_video_delay_ms));
+ EXPECT_EQ(0, total_video_delay_ms);
+ EXPECT_EQ(last_total_audio_delay_ms +
+ MaxAudioDelayChangeMs(current_audio_delay_ms, kVideoDelayMs),
+ total_audio_delay_ms);
+ last_total_audio_delay_ms = total_audio_delay_ms;
+
+ // Simulate no incoming audio by not update audio delay.
+ const int kSimulationSecs = 300; // 5min
+ const int kMaxDeltaDelayMs = 10000; // max delay for audio in webrtc
+ for (auto time_secs = 0; time_secs < kSimulationSecs; time_secs++) {
+ clock_sender_.AdvanceTimeMilliseconds(1000);
+ clock_receiver_.AdvanceTimeMilliseconds(1000);
+ EXPECT_TRUE(DelayedStreams(/*audio_delay_ms=*/0, kVideoDelayMs,
+ current_audio_delay_ms, &total_audio_delay_ms,
+ &total_video_delay_ms));
+ EXPECT_EQ(0, total_video_delay_ms);
+
+ // Audio delay does not go above kMaxDeltaDelayMs.
+ EXPECT_EQ(std::min(kMaxDeltaDelayMs,
+ last_total_audio_delay_ms +
+ MaxAudioDelayChangeMs(current_audio_delay_ms,
+ kVideoDelayMs)),
+ total_audio_delay_ms);
+ last_total_audio_delay_ms = total_audio_delay_ms;
+ }
+ // By now the audio delay has grown unbounded to kMaxDeltaDelayMs.
+ EXPECT_EQ(kMaxDeltaDelayMs, last_total_audio_delay_ms);
+}
+
+TEST_F(StreamSynchronizationTest, BothDelayedVideoLater) {
+ BothDelayedVideoLaterTest(0);
+}
+
+TEST_F(StreamSynchronizationTest, BothDelayedVideoLaterAudioClockDrift) {
+ audio_clock_drift_ = 1.05;
+ BothDelayedVideoLaterTest(0);
+}
+
+TEST_F(StreamSynchronizationTest, BothDelayedVideoLaterVideoClockDrift) {
+ video_clock_drift_ = 1.05;
+ BothDelayedVideoLaterTest(0);
+}
+
+TEST_F(StreamSynchronizationTest, BothDelayedAudioLater) {
+ BothDelayedAudioLaterTest(0);
+}
+
+TEST_F(StreamSynchronizationTest, BothDelayedAudioClockDrift) {
+ audio_clock_drift_ = 1.05;
+ BothDelayedAudioLaterTest(0);
+}
+
+TEST_F(StreamSynchronizationTest, BothDelayedVideoClockDrift) {
+ video_clock_drift_ = 1.05;
+ BothDelayedAudioLaterTest(0);
+}
+
+TEST_F(StreamSynchronizationTest, BothEquallyDelayed) {
+ const int kDelayMs = 2000;
+ int current_audio_delay_ms = kDelayMs;
+ int total_audio_delay_ms = 0;
+ int total_video_delay_ms = kDelayMs;
+ // In sync, expect no change.
+ EXPECT_FALSE(DelayedStreams(kDelayMs, kDelayMs, current_audio_delay_ms,
+ &total_audio_delay_ms, &total_video_delay_ms));
+ // Trigger another call with the same values, delay should not be modified.
+ total_video_delay_ms = kDelayMs;
+ EXPECT_FALSE(DelayedStreams(kDelayMs, kDelayMs, current_audio_delay_ms,
+ &total_audio_delay_ms, &total_video_delay_ms));
+ // Change delay value, delay should not be modified.
+ const int kDelayMs2 = 5000;
+ current_audio_delay_ms = kDelayMs2;
+ total_video_delay_ms = kDelayMs2;
+ EXPECT_FALSE(DelayedStreams(kDelayMs2, kDelayMs2, current_audio_delay_ms,
+ &total_audio_delay_ms, &total_video_delay_ms));
+}
+
+TEST_F(StreamSynchronizationTest, BothDelayedAudioLaterWithBaseDelay) {
+ const int kBaseTargetDelayMs = 3000;
+ sync_.SetTargetBufferingDelay(kBaseTargetDelayMs);
+ BothDelayedAudioLaterTest(kBaseTargetDelayMs);
+}
+
+TEST_F(StreamSynchronizationTest, BothDelayedAudioClockDriftWithBaseDelay) {
+ const int kBaseTargetDelayMs = 3000;
+ sync_.SetTargetBufferingDelay(kBaseTargetDelayMs);
+ audio_clock_drift_ = 1.05;
+ BothDelayedAudioLaterTest(kBaseTargetDelayMs);
+}
+
+TEST_F(StreamSynchronizationTest, BothDelayedVideoClockDriftWithBaseDelay) {
+ const int kBaseTargetDelayMs = 3000;
+ sync_.SetTargetBufferingDelay(kBaseTargetDelayMs);
+ video_clock_drift_ = 1.05;
+ BothDelayedAudioLaterTest(kBaseTargetDelayMs);
+}
+
+TEST_F(StreamSynchronizationTest, BothDelayedVideoLaterWithBaseDelay) {
+ const int kBaseTargetDelayMs = 2000;
+ sync_.SetTargetBufferingDelay(kBaseTargetDelayMs);
+ BothDelayedVideoLaterTest(kBaseTargetDelayMs);
+}
+
+TEST_F(StreamSynchronizationTest,
+ BothDelayedVideoLaterAudioClockDriftWithBaseDelay) {
+ const int kBaseTargetDelayMs = 2000;
+ audio_clock_drift_ = 1.05;
+ sync_.SetTargetBufferingDelay(kBaseTargetDelayMs);
+ BothDelayedVideoLaterTest(kBaseTargetDelayMs);
+}
+
+TEST_F(StreamSynchronizationTest,
+ BothDelayedVideoLaterVideoClockDriftWithBaseDelay) {
+ const int kBaseTargetDelayMs = 2000;
+ video_clock_drift_ = 1.05;
+ sync_.SetTargetBufferingDelay(kBaseTargetDelayMs);
+ BothDelayedVideoLaterTest(kBaseTargetDelayMs);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/video/sv_loopback.cc b/third_party/libwebrtc/video/sv_loopback.cc
new file mode 100644
index 0000000000..af475ae4eb
--- /dev/null
+++ b/third_party/libwebrtc/video/sv_loopback.cc
@@ -0,0 +1,719 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <stdio.h>
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "absl/flags/flag.h"
+#include "absl/flags/parse.h"
+#include "absl/types/optional.h"
+#include "api/test/simulated_network.h"
+#include "api/test/video_quality_test_fixture.h"
+#include "api/transport/bitrate_settings.h"
+#include "api/video_codecs/video_codec.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/string_encode.h"
+#include "system_wrappers/include/field_trial.h"
+#include "test/field_trial.h"
+#include "test/gtest.h"
+#include "test/run_test.h"
+#include "video/video_quality_test.h"
+
+// Flags for video.
+ABSL_FLAG(int, vwidth, 640, "Video width.");
+
+ABSL_FLAG(int, vheight, 480, "Video height.");
+
+ABSL_FLAG(int, vfps, 30, "Video frames per second.");
+
+ABSL_FLAG(int,
+ capture_device_index,
+ 0,
+ "Capture device to select for video stream");
+
+ABSL_FLAG(int, vtarget_bitrate, 400, "Video stream target bitrate in kbps.");
+
+ABSL_FLAG(int, vmin_bitrate, 100, "Video stream min bitrate in kbps.");
+
+ABSL_FLAG(int, vmax_bitrate, 2000, "Video stream max bitrate in kbps.");
+
+ABSL_FLAG(bool,
+ suspend_below_min_bitrate,
+ false,
+ "Suspends video below the configured min bitrate.");
+
+ABSL_FLAG(int,
+ vnum_temporal_layers,
+ 1,
+ "Number of temporal layers for video. Set to 1-4 to override.");
+
+ABSL_FLAG(int, vnum_streams, 0, "Number of video streams to show or analyze.");
+
+ABSL_FLAG(int,
+ vnum_spatial_layers,
+ 1,
+ "Number of video spatial layers to use.");
+
+ABSL_FLAG(int,
+ vinter_layer_pred,
+ 2,
+ "Video inter-layer prediction mode. "
+ "0 - enabled, 1 - disabled, 2 - enabled only for key pictures.");
+
+ABSL_FLAG(std::string,
+ vstream0,
+ "",
+ "Comma separated values describing VideoStream for video stream #0.");
+
+ABSL_FLAG(std::string,
+ vstream1,
+ "",
+ "Comma separated values describing VideoStream for video stream #1.");
+
+ABSL_FLAG(std::string,
+ vsl0,
+ "",
+ "Comma separated values describing SpatialLayer for video layer #0.");
+
+ABSL_FLAG(std::string,
+ vsl1,
+ "",
+ "Comma separated values describing SpatialLayer for video layer #1.");
+
+ABSL_FLAG(int,
+ vselected_tl,
+ -1,
+ "Temporal layer to show or analyze for screenshare. -1 to disable "
+ "filtering.");
+
+ABSL_FLAG(int,
+ vselected_stream,
+ 0,
+ "ID of the stream to show or analyze for screenshare."
+ "Set to the number of streams to show them all.");
+
+ABSL_FLAG(int,
+ vselected_sl,
+ -1,
+ "Spatial layer to show or analyze for screenshare. -1 to disable "
+ "filtering.");
+
+// Flags for screenshare.
+ABSL_FLAG(int,
+ min_transmit_bitrate,
+ 400,
+ "Min transmit bitrate incl. padding for screenshare.");
+
+ABSL_FLAG(int, swidth, 1850, "Screenshare width (crops source).");
+
+ABSL_FLAG(int, sheight, 1110, "Screenshare height (crops source).");
+
+ABSL_FLAG(int, sfps, 5, "Frames per second for screenshare.");
+
+ABSL_FLAG(int,
+ starget_bitrate,
+ 100,
+ "Screenshare stream target bitrate in kbps.");
+
+ABSL_FLAG(int, smin_bitrate, 100, "Screenshare stream min bitrate in kbps.");
+
+ABSL_FLAG(int, smax_bitrate, 2000, "Screenshare stream max bitrate in kbps.");
+
+ABSL_FLAG(int,
+ snum_temporal_layers,
+ 2,
+ "Number of temporal layers to use in screenshare.");
+
+ABSL_FLAG(int,
+ snum_streams,
+ 0,
+ "Number of screenshare streams to show or analyze.");
+
+ABSL_FLAG(int,
+ snum_spatial_layers,
+ 1,
+ "Number of screenshare spatial layers to use.");
+
+ABSL_FLAG(int,
+ sinter_layer_pred,
+ 0,
+ "Screenshare inter-layer prediction mode. "
+ "0 - enabled, 1 - disabled, 2 - enabled only for key pictures.");
+
+ABSL_FLAG(
+ std::string,
+ sstream0,
+ "",
+ "Comma separated values describing VideoStream for screenshare stream #0.");
+
+ABSL_FLAG(
+ std::string,
+ sstream1,
+ "",
+ "Comma separated values describing VideoStream for screenshare stream #1.");
+
+ABSL_FLAG(
+ std::string,
+ ssl0,
+ "",
+ "Comma separated values describing SpatialLayer for screenshare layer #0.");
+
+ABSL_FLAG(
+ std::string,
+ ssl1,
+ "",
+ "Comma separated values describing SpatialLayer for screenshare layer #1.");
+
+ABSL_FLAG(int,
+ sselected_tl,
+ -1,
+ "Temporal layer to show or analyze for screenshare. -1 to disable "
+ "filtering.");
+
+ABSL_FLAG(int,
+ sselected_stream,
+ 0,
+ "ID of the stream to show or analyze for screenshare."
+ "Set to the number of streams to show them all.");
+
+ABSL_FLAG(int,
+ sselected_sl,
+ -1,
+ "Spatial layer to show or analyze for screenshare. -1 to disable "
+ "filtering.");
+
+ABSL_FLAG(bool,
+ generate_slides,
+ false,
+ "Whether to use randomly generated slides or read them from files.");
+
+ABSL_FLAG(int,
+ slide_change_interval,
+ 10,
+ "Interval (in seconds) between simulated slide changes.");
+
+ABSL_FLAG(
+ int,
+ scroll_duration,
+ 0,
+ "Duration (in seconds) during which a slide will be scrolled into place.");
+
+ABSL_FLAG(std::string,
+ slides,
+ "",
+ "Comma-separated list of *.yuv files to display as slides.");
+
+// Flags common with screenshare and video loopback, with equal default values.
+ABSL_FLAG(int, start_bitrate, 600, "Call start bitrate in kbps.");
+
+ABSL_FLAG(std::string, codec, "VP8", "Video codec to use.");
+
+ABSL_FLAG(bool,
+ analyze_video,
+ false,
+ "Analyze video stream (if --duration is present)");
+
+ABSL_FLAG(bool,
+ analyze_screenshare,
+ false,
+ "Analyze screenshare stream (if --duration is present)");
+
+ABSL_FLAG(
+ int,
+ duration,
+ 0,
+ "Duration of the test in seconds. If 0, rendered will be shown instead.");
+
+ABSL_FLAG(std::string, output_filename, "", "Target graph data filename.");
+
+ABSL_FLAG(std::string,
+ graph_title,
+ "",
+ "If empty, title will be generated automatically.");
+
+ABSL_FLAG(int, loss_percent, 0, "Percentage of packets randomly lost.");
+
+ABSL_FLAG(int,
+ avg_burst_loss_length,
+ -1,
+ "Average burst length of lost packets.");
+
+ABSL_FLAG(int,
+ link_capacity,
+ 0,
+ "Capacity (kbps) of the fake link. 0 means infinite.");
+
+ABSL_FLAG(int, queue_size, 0, "Size of the bottleneck link queue in packets.");
+
+ABSL_FLAG(int,
+ avg_propagation_delay_ms,
+ 0,
+ "Average link propagation delay in ms.");
+
+ABSL_FLAG(std::string,
+ rtc_event_log_name,
+ "",
+ "Filename for rtc event log. Two files "
+ "with \"_send\" and \"_recv\" suffixes will be created. "
+ "Works only when --duration is set.");
+
+ABSL_FLAG(std::string,
+ rtp_dump_name,
+ "",
+ "Filename for dumped received RTP stream.");
+
+ABSL_FLAG(int,
+ std_propagation_delay_ms,
+ 0,
+ "Link propagation delay standard deviation in ms.");
+
+ABSL_FLAG(std::string,
+ encoded_frame_path,
+ "",
+ "The base path for encoded frame logs. Created files will have "
+ "the form <encoded_frame_path>.<n>.(recv|send.<m>).ivf");
+
+ABSL_FLAG(bool, logs, false, "print logs to stderr");
+
+ABSL_FLAG(bool, send_side_bwe, true, "Use send-side bandwidth estimation");
+
+ABSL_FLAG(bool, generic_descriptor, false, "Use the generic frame descriptor.");
+
+ABSL_FLAG(bool, allow_reordering, false, "Allow packet reordering to occur");
+
+ABSL_FLAG(bool, use_ulpfec, false, "Use RED+ULPFEC forward error correction.");
+
+ABSL_FLAG(bool, use_flexfec, false, "Use FlexFEC forward error correction.");
+
+ABSL_FLAG(bool, audio, false, "Add audio stream");
+
+ABSL_FLAG(bool,
+ audio_video_sync,
+ false,
+ "Sync audio and video stream (no effect if"
+ " audio is false)");
+
+ABSL_FLAG(bool,
+ audio_dtx,
+ false,
+ "Enable audio DTX (no effect if audio is false)");
+
+ABSL_FLAG(bool, video, true, "Add video stream");
+
+ABSL_FLAG(
+ std::string,
+ force_fieldtrials,
+ "",
+ "Field trials control experimental feature code which can be forced. "
+ "E.g. running with --force_fieldtrials=WebRTC-FooFeature/Enable/"
+ " will assign the group Enable to field trial WebRTC-FooFeature. Multiple "
+ "trials are separated by \"/\"");
+
+// Video-specific flags.
+ABSL_FLAG(std::string,
+ vclip,
+ "",
+ "Name of the clip to show. If empty, the camera is used. Use "
+ "\"Generator\" for chroma generator.");
+
+namespace webrtc {
+namespace {
+
+InterLayerPredMode IntToInterLayerPredMode(int inter_layer_pred) {
+ if (inter_layer_pred == 0) {
+ return InterLayerPredMode::kOn;
+ } else if (inter_layer_pred == 1) {
+ return InterLayerPredMode::kOff;
+ } else {
+ RTC_DCHECK_EQ(inter_layer_pred, 2);
+ return InterLayerPredMode::kOnKeyPic;
+ }
+}
+
+size_t VideoWidth() {
+ return static_cast<size_t>(absl::GetFlag(FLAGS_vwidth));
+}
+
+size_t VideoHeight() {
+ return static_cast<size_t>(absl::GetFlag(FLAGS_vheight));
+}
+
+int VideoFps() {
+ return absl::GetFlag(FLAGS_vfps);
+}
+
+size_t GetCaptureDevice() {
+ return static_cast<size_t>(absl::GetFlag(FLAGS_capture_device_index));
+}
+
+int VideoTargetBitrateKbps() {
+ return absl::GetFlag(FLAGS_vtarget_bitrate);
+}
+
+int VideoMinBitrateKbps() {
+ return absl::GetFlag(FLAGS_vmin_bitrate);
+}
+
+int VideoMaxBitrateKbps() {
+ return absl::GetFlag(FLAGS_vmax_bitrate);
+}
+
+int VideoNumTemporalLayers() {
+ return absl::GetFlag(FLAGS_vnum_temporal_layers);
+}
+
+int VideoNumStreams() {
+ return absl::GetFlag(FLAGS_vnum_streams);
+}
+
+int VideoNumSpatialLayers() {
+ return absl::GetFlag(FLAGS_vnum_spatial_layers);
+}
+
+InterLayerPredMode VideoInterLayerPred() {
+ return IntToInterLayerPredMode(absl::GetFlag(FLAGS_vinter_layer_pred));
+}
+
+std::string VideoStream0() {
+ return absl::GetFlag(FLAGS_vstream0);
+}
+
+std::string VideoStream1() {
+ return absl::GetFlag(FLAGS_vstream1);
+}
+
+std::string VideoSL0() {
+ return absl::GetFlag(FLAGS_vsl0);
+}
+
+std::string VideoSL1() {
+ return absl::GetFlag(FLAGS_vsl1);
+}
+
+int VideoSelectedTL() {
+ return absl::GetFlag(FLAGS_vselected_tl);
+}
+
+int VideoSelectedStream() {
+ return absl::GetFlag(FLAGS_vselected_stream);
+}
+
+int VideoSelectedSL() {
+ return absl::GetFlag(FLAGS_vselected_sl);
+}
+
+int ScreenshareMinTransmitBitrateKbps() {
+ return absl::GetFlag(FLAGS_min_transmit_bitrate);
+}
+
+size_t ScreenshareWidth() {
+ return static_cast<size_t>(absl::GetFlag(FLAGS_swidth));
+}
+
+size_t ScreenshareHeight() {
+ return static_cast<size_t>(absl::GetFlag(FLAGS_sheight));
+}
+
+int ScreenshareFps() {
+ return absl::GetFlag(FLAGS_sfps);
+}
+
+int ScreenshareTargetBitrateKbps() {
+ return absl::GetFlag(FLAGS_starget_bitrate);
+}
+
+int ScreenshareMinBitrateKbps() {
+ return absl::GetFlag(FLAGS_smin_bitrate);
+}
+
+int ScreenshareMaxBitrateKbps() {
+ return absl::GetFlag(FLAGS_smax_bitrate);
+}
+
+int ScreenshareNumTemporalLayers() {
+ return absl::GetFlag(FLAGS_snum_temporal_layers);
+}
+
+int ScreenshareNumStreams() {
+ return absl::GetFlag(FLAGS_snum_streams);
+}
+
+int ScreenshareNumSpatialLayers() {
+ return absl::GetFlag(FLAGS_snum_spatial_layers);
+}
+
+InterLayerPredMode ScreenshareInterLayerPred() {
+ return IntToInterLayerPredMode(absl::GetFlag(FLAGS_sinter_layer_pred));
+}
+
+std::string ScreenshareStream0() {
+ return absl::GetFlag(FLAGS_sstream0);
+}
+
+std::string ScreenshareStream1() {
+ return absl::GetFlag(FLAGS_sstream1);
+}
+
+std::string ScreenshareSL0() {
+ return absl::GetFlag(FLAGS_ssl0);
+}
+
+std::string ScreenshareSL1() {
+ return absl::GetFlag(FLAGS_ssl1);
+}
+
+int ScreenshareSelectedTL() {
+ return absl::GetFlag(FLAGS_sselected_tl);
+}
+
+int ScreenshareSelectedStream() {
+ return absl::GetFlag(FLAGS_sselected_stream);
+}
+
+int ScreenshareSelectedSL() {
+ return absl::GetFlag(FLAGS_sselected_sl);
+}
+
+bool GenerateSlides() {
+ return absl::GetFlag(FLAGS_generate_slides);
+}
+
+int SlideChangeInterval() {
+ return absl::GetFlag(FLAGS_slide_change_interval);
+}
+
+int ScrollDuration() {
+ return absl::GetFlag(FLAGS_scroll_duration);
+}
+
+std::vector<std::string> Slides() {
+ std::vector<std::string> slides;
+ std::string slides_list = absl::GetFlag(FLAGS_slides);
+ rtc::tokenize(slides_list, ',', &slides);
+ return slides;
+}
+
+int StartBitrateKbps() {
+ return absl::GetFlag(FLAGS_start_bitrate);
+}
+
+std::string Codec() {
+ return absl::GetFlag(FLAGS_codec);
+}
+
+bool AnalyzeVideo() {
+ return absl::GetFlag(FLAGS_analyze_video);
+}
+
+bool AnalyzeScreenshare() {
+ return absl::GetFlag(FLAGS_analyze_screenshare);
+}
+
+int DurationSecs() {
+ return absl::GetFlag(FLAGS_duration);
+}
+
+std::string OutputFilename() {
+ return absl::GetFlag(FLAGS_output_filename);
+}
+
+std::string GraphTitle() {
+ return absl::GetFlag(FLAGS_graph_title);
+}
+
+int LossPercent() {
+ return absl::GetFlag(FLAGS_loss_percent);
+}
+
+int AvgBurstLossLength() {
+ return absl::GetFlag(FLAGS_avg_burst_loss_length);
+}
+
+int LinkCapacityKbps() {
+ return absl::GetFlag(FLAGS_link_capacity);
+}
+
+int QueueSize() {
+ return absl::GetFlag(FLAGS_queue_size);
+}
+
+int AvgPropagationDelayMs() {
+ return absl::GetFlag(FLAGS_avg_propagation_delay_ms);
+}
+
+std::string RtcEventLogName() {
+ return absl::GetFlag(FLAGS_rtc_event_log_name);
+}
+
+std::string RtpDumpName() {
+ return absl::GetFlag(FLAGS_rtp_dump_name);
+}
+
+int StdPropagationDelayMs() {
+ return absl::GetFlag(FLAGS_std_propagation_delay_ms);
+}
+
+std::string EncodedFramePath() {
+ return absl::GetFlag(FLAGS_encoded_frame_path);
+}
+
+std::string VideoClip() {
+ return absl::GetFlag(FLAGS_vclip);
+}
+
+} // namespace
+
+void Loopback() {
+ int camera_idx, screenshare_idx;
+ RTC_CHECK(!(AnalyzeScreenshare() && AnalyzeVideo()))
+ << "Select only one of video or screenshare.";
+ RTC_CHECK(!DurationSecs() || AnalyzeScreenshare() || AnalyzeVideo())
+ << "If duration is set, exactly one of analyze_* flags should be set.";
+ // Default: camera feed first, if nothing selected.
+ if (AnalyzeVideo() || !AnalyzeScreenshare()) {
+ camera_idx = 0;
+ screenshare_idx = 1;
+ } else {
+ camera_idx = 1;
+ screenshare_idx = 0;
+ }
+
+ BuiltInNetworkBehaviorConfig pipe_config;
+ pipe_config.loss_percent = LossPercent();
+ pipe_config.avg_burst_loss_length = AvgBurstLossLength();
+ pipe_config.link_capacity_kbps = LinkCapacityKbps();
+ pipe_config.queue_length_packets = QueueSize();
+ pipe_config.queue_delay_ms = AvgPropagationDelayMs();
+ pipe_config.delay_standard_deviation_ms = StdPropagationDelayMs();
+ pipe_config.allow_reordering = absl::GetFlag(FLAGS_allow_reordering);
+
+ BitrateConstraints call_bitrate_config;
+ call_bitrate_config.min_bitrate_bps =
+ (ScreenshareMinBitrateKbps() + VideoMinBitrateKbps()) * 1000;
+ call_bitrate_config.start_bitrate_bps = StartBitrateKbps() * 1000;
+ call_bitrate_config.max_bitrate_bps =
+ (ScreenshareMaxBitrateKbps() + VideoMaxBitrateKbps()) * 1000;
+
+ VideoQualityTest::Params params;
+ params.call.send_side_bwe = absl::GetFlag(FLAGS_send_side_bwe);
+ params.call.generic_descriptor = absl::GetFlag(FLAGS_generic_descriptor);
+ params.call.call_bitrate_config = call_bitrate_config;
+ params.call.dual_video = true;
+ params.video[screenshare_idx].enabled = true;
+ params.video[screenshare_idx].width = ScreenshareWidth();
+ params.video[screenshare_idx].height = ScreenshareHeight();
+ params.video[screenshare_idx].fps = ScreenshareFps();
+ params.video[screenshare_idx].min_bitrate_bps =
+ ScreenshareMinBitrateKbps() * 1000;
+ params.video[screenshare_idx].target_bitrate_bps =
+ ScreenshareTargetBitrateKbps() * 1000;
+ params.video[screenshare_idx].max_bitrate_bps =
+ ScreenshareMaxBitrateKbps() * 1000;
+ params.video[screenshare_idx].codec = Codec();
+ params.video[screenshare_idx].num_temporal_layers =
+ ScreenshareNumTemporalLayers();
+ params.video[screenshare_idx].selected_tl = ScreenshareSelectedTL();
+ params.video[screenshare_idx].min_transmit_bps =
+ ScreenshareMinTransmitBitrateKbps() * 1000;
+ params.video[camera_idx].enabled = absl::GetFlag(FLAGS_video);
+ params.video[camera_idx].width = VideoWidth();
+ params.video[camera_idx].height = VideoHeight();
+ params.video[camera_idx].fps = VideoFps();
+ params.video[camera_idx].min_bitrate_bps = VideoMinBitrateKbps() * 1000;
+ params.video[camera_idx].target_bitrate_bps = VideoTargetBitrateKbps() * 1000;
+ params.video[camera_idx].max_bitrate_bps = VideoMaxBitrateKbps() * 1000;
+ params.video[camera_idx].suspend_below_min_bitrate =
+ absl::GetFlag(FLAGS_suspend_below_min_bitrate);
+ params.video[camera_idx].codec = Codec();
+ params.video[camera_idx].num_temporal_layers = VideoNumTemporalLayers();
+ params.video[camera_idx].selected_tl = VideoSelectedTL();
+ params.video[camera_idx].ulpfec = absl::GetFlag(FLAGS_use_ulpfec);
+ params.video[camera_idx].flexfec = absl::GetFlag(FLAGS_use_flexfec);
+ params.video[camera_idx].clip_path = VideoClip();
+ params.video[camera_idx].capture_device_index = GetCaptureDevice();
+ params.audio.enabled = absl::GetFlag(FLAGS_audio);
+ params.audio.sync_video = absl::GetFlag(FLAGS_audio_video_sync);
+ params.audio.dtx = absl::GetFlag(FLAGS_audio_dtx);
+ params.logging.rtc_event_log_name = RtcEventLogName();
+ params.logging.rtp_dump_name = RtpDumpName();
+ params.logging.encoded_frame_base_path = EncodedFramePath();
+ params.analyzer.test_label = "dual_streams";
+ params.analyzer.test_durations_secs = DurationSecs();
+ params.analyzer.graph_data_output_filename = OutputFilename();
+ params.analyzer.graph_title = GraphTitle();
+ params.config = pipe_config;
+
+ params.screenshare[camera_idx].enabled = false;
+ params.screenshare[screenshare_idx].enabled = true;
+ params.screenshare[screenshare_idx].generate_slides = GenerateSlides();
+ params.screenshare[screenshare_idx].slide_change_interval =
+ SlideChangeInterval();
+ params.screenshare[screenshare_idx].scroll_duration = ScrollDuration();
+ params.screenshare[screenshare_idx].slides = Slides();
+
+ if (VideoNumStreams() > 1 && VideoStream0().empty() &&
+ VideoStream1().empty()) {
+ params.ss[camera_idx].infer_streams = true;
+ }
+
+ if (ScreenshareNumStreams() > 1 && ScreenshareStream0().empty() &&
+ ScreenshareStream1().empty()) {
+ params.ss[screenshare_idx].infer_streams = true;
+ }
+
+ std::vector<std::string> stream_descriptors;
+ stream_descriptors.push_back(ScreenshareStream0());
+ stream_descriptors.push_back(ScreenshareStream1());
+ std::vector<std::string> SL_descriptors;
+ SL_descriptors.push_back(ScreenshareSL0());
+ SL_descriptors.push_back(ScreenshareSL1());
+ VideoQualityTest::FillScalabilitySettings(
+ &params, screenshare_idx, stream_descriptors, ScreenshareNumStreams(),
+ ScreenshareSelectedStream(), ScreenshareNumSpatialLayers(),
+ ScreenshareSelectedSL(), ScreenshareInterLayerPred(), SL_descriptors);
+
+ stream_descriptors.clear();
+ stream_descriptors.push_back(VideoStream0());
+ stream_descriptors.push_back(VideoStream1());
+ SL_descriptors.clear();
+ SL_descriptors.push_back(VideoSL0());
+ SL_descriptors.push_back(VideoSL1());
+ VideoQualityTest::FillScalabilitySettings(
+ &params, camera_idx, stream_descriptors, VideoNumStreams(),
+ VideoSelectedStream(), VideoNumSpatialLayers(), VideoSelectedSL(),
+ VideoInterLayerPred(), SL_descriptors);
+
+ auto fixture = std::make_unique<VideoQualityTest>(nullptr);
+ if (DurationSecs()) {
+ fixture->RunWithAnalyzer(params);
+ } else {
+ fixture->RunWithRenderers(params);
+ }
+}
+} // namespace webrtc
+
+int main(int argc, char* argv[]) {
+ ::testing::InitGoogleTest(&argc, argv);
+ absl::ParseCommandLine(argc, argv);
+
+ rtc::LogMessage::SetLogToStderr(absl::GetFlag(FLAGS_logs));
+
+ // InitFieldTrialsFromString stores the char*, so the char array must outlive
+ // the application.
+ const std::string field_trials = absl::GetFlag(FLAGS_force_fieldtrials);
+ webrtc::field_trial::InitFieldTrialsFromString(field_trials.c_str());
+
+ webrtc::test::RunTest(webrtc::Loopback);
+ return 0;
+}
diff --git a/third_party/libwebrtc/video/task_queue_frame_decode_scheduler.cc b/third_party/libwebrtc/video/task_queue_frame_decode_scheduler.cc
new file mode 100644
index 0000000000..6dd7b47f17
--- /dev/null
+++ b/third_party/libwebrtc/video/task_queue_frame_decode_scheduler.cc
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video/task_queue_frame_decode_scheduler.h"
+
+#include <algorithm>
+#include <utility>
+
+#include "api/sequence_checker.h"
+#include "api/task_queue/task_queue_base.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+TaskQueueFrameDecodeScheduler::TaskQueueFrameDecodeScheduler(
+ Clock* clock,
+ TaskQueueBase* const bookkeeping_queue)
+ : clock_(clock), bookkeeping_queue_(bookkeeping_queue) {
+ RTC_DCHECK(clock_);
+ RTC_DCHECK(bookkeeping_queue_);
+}
+
+TaskQueueFrameDecodeScheduler::~TaskQueueFrameDecodeScheduler() {
+ RTC_DCHECK(stopped_);
+ RTC_DCHECK(!scheduled_rtp_) << "Outstanding scheduled rtp=" << *scheduled_rtp_
+ << ". Call CancelOutstanding before destruction.";
+}
+
+void TaskQueueFrameDecodeScheduler::ScheduleFrame(
+ uint32_t rtp,
+ FrameDecodeTiming::FrameSchedule schedule,
+ FrameReleaseCallback cb) {
+ // Mozilla modification, until https://bugs.webrtc.org/14944 is fixed
+ //RTC_DCHECK(!stopped_) << "Can not schedule frames after stopped.";
+ RTC_DCHECK(!scheduled_rtp_.has_value())
+ << "Can not schedule two frames for release at the same time.";
+ RTC_DCHECK(cb);
+ scheduled_rtp_ = rtp;
+
+ TimeDelta wait = std::max(
+ TimeDelta::Zero(), schedule.latest_decode_time - clock_->CurrentTime());
+ bookkeeping_queue_->PostDelayedHighPrecisionTask(
+ SafeTask(task_safety_.flag(),
+ [this, rtp, schedule, cb = std::move(cb)]() mutable {
+ RTC_DCHECK_RUN_ON(bookkeeping_queue_);
+ // If the next frame rtp has changed since this task was
+ // this scheduled release should be skipped.
+ if (scheduled_rtp_ != rtp)
+ return;
+ scheduled_rtp_ = absl::nullopt;
+ std::move(cb)(rtp, schedule.render_time);
+ }),
+ wait);
+}
+
+void TaskQueueFrameDecodeScheduler::CancelOutstanding() {
+ scheduled_rtp_ = absl::nullopt;
+}
+
+absl::optional<uint32_t>
+TaskQueueFrameDecodeScheduler::ScheduledRtpTimestamp() {
+ return scheduled_rtp_;
+}
+
+void TaskQueueFrameDecodeScheduler::Stop() {
+ CancelOutstanding();
+ stopped_ = true;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/video/task_queue_frame_decode_scheduler.h b/third_party/libwebrtc/video/task_queue_frame_decode_scheduler.h
new file mode 100644
index 0000000000..69c6dae63d
--- /dev/null
+++ b/third_party/libwebrtc/video/task_queue_frame_decode_scheduler.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VIDEO_TASK_QUEUE_FRAME_DECODE_SCHEDULER_H_
+#define VIDEO_TASK_QUEUE_FRAME_DECODE_SCHEDULER_H_
+
+#include "video/frame_decode_scheduler.h"
+
+namespace webrtc {
+
+// An implementation of FrameDecodeScheduler that is based on TaskQueues. This
+// is the default implementation for general use.
+class TaskQueueFrameDecodeScheduler : public FrameDecodeScheduler {
+ public:
+ TaskQueueFrameDecodeScheduler(Clock* clock,
+ TaskQueueBase* const bookkeeping_queue);
+ ~TaskQueueFrameDecodeScheduler() override;
+ TaskQueueFrameDecodeScheduler(const TaskQueueFrameDecodeScheduler&) = delete;
+ TaskQueueFrameDecodeScheduler& operator=(
+ const TaskQueueFrameDecodeScheduler&) = delete;
+
+ // FrameDecodeScheduler implementation.
+ absl::optional<uint32_t> ScheduledRtpTimestamp() override;
+ void ScheduleFrame(uint32_t rtp,
+ FrameDecodeTiming::FrameSchedule schedule,
+ FrameReleaseCallback cb) override;
+ void CancelOutstanding() override;
+ void Stop() override;
+
+ private:
+ Clock* const clock_;
+ TaskQueueBase* const bookkeeping_queue_;
+
+ absl::optional<uint32_t> scheduled_rtp_;
+ ScopedTaskSafetyDetached task_safety_;
+ bool stopped_ = false;
+};
+
+} // namespace webrtc
+
+#endif // VIDEO_TASK_QUEUE_FRAME_DECODE_SCHEDULER_H_
diff --git a/third_party/libwebrtc/video/task_queue_frame_decode_scheduler_gn/moz.build b/third_party/libwebrtc/video/task_queue_frame_decode_scheduler_gn/moz.build
new file mode 100644
index 0000000000..e9e33818de
--- /dev/null
+++ b/third_party/libwebrtc/video/task_queue_frame_decode_scheduler_gn/moz.build
@@ -0,0 +1,232 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+DEFINES["WEBRTC_STRICT_FIELD_TRIALS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "!/third_party/libwebrtc/gen",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/video/task_queue_frame_decode_scheduler.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "crypt32",
+ "iphlpapi",
+ "secur32",
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "mips32":
+
+ DEFINES["MIPS32_LE"] = True
+ DEFINES["MIPS_FPU_LE"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "mips64":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("task_queue_frame_decode_scheduler_gn")
diff --git a/third_party/libwebrtc/video/task_queue_frame_decode_scheduler_unittest.cc b/third_party/libwebrtc/video/task_queue_frame_decode_scheduler_unittest.cc
new file mode 100644
index 0000000000..20258c6382
--- /dev/null
+++ b/third_party/libwebrtc/video/task_queue_frame_decode_scheduler_unittest.cc
@@ -0,0 +1,102 @@
+/*
+ * Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video/task_queue_frame_decode_scheduler.h"
+
+#include <stddef.h>
+
+#include <memory>
+#include <utility>
+
+#include "absl/types/optional.h"
+#include "api/units/time_delta.h"
+#include "api/units/timestamp.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+#include "test/time_controller/simulated_time_controller.h"
+
+namespace webrtc {
+
+using ::testing::_;
+using ::testing::Eq;
+using ::testing::MockFunction;
+using ::testing::Optional;
+
+TEST(TaskQueueFrameDecodeSchedulerTest, FrameYieldedAfterSpecifiedPeriod) {
+ GlobalSimulatedTimeController time_controller_(Timestamp::Seconds(2000));
+ TaskQueueFrameDecodeScheduler scheduler(time_controller_.GetClock(),
+ time_controller_.GetMainThread());
+ constexpr TimeDelta decode_delay = TimeDelta::Millis(5);
+
+ const Timestamp now = time_controller_.GetClock()->CurrentTime();
+ const uint32_t rtp = 90000;
+ const Timestamp render_time = now + TimeDelta::Millis(15);
+ FrameDecodeTiming::FrameSchedule schedule = {
+ .latest_decode_time = now + decode_delay, .render_time = render_time};
+
+ MockFunction<void(uint32_t, Timestamp)> ready_cb;
+ scheduler.ScheduleFrame(rtp, schedule, ready_cb.AsStdFunction());
+ EXPECT_CALL(ready_cb, Call(_, _)).Times(0);
+ EXPECT_THAT(scheduler.ScheduledRtpTimestamp(), Optional(rtp));
+ time_controller_.AdvanceTime(TimeDelta::Zero());
+ // Check that `ready_cb` has not been invoked yet.
+ ::testing::Mock::VerifyAndClearExpectations(&ready_cb);
+
+ EXPECT_CALL(ready_cb, Call(rtp, render_time)).Times(1);
+ time_controller_.AdvanceTime(decode_delay);
+
+ scheduler.Stop();
+}
+
+TEST(TaskQueueFrameDecodeSchedulerTest, NegativeDecodeDelayIsRoundedToZero) {
+ GlobalSimulatedTimeController time_controller_(Timestamp::Seconds(2000));
+ TaskQueueFrameDecodeScheduler scheduler(time_controller_.GetClock(),
+ time_controller_.GetMainThread());
+ constexpr TimeDelta decode_delay = TimeDelta::Millis(-5);
+ const Timestamp now = time_controller_.GetClock()->CurrentTime();
+ const uint32_t rtp = 90000;
+ const Timestamp render_time = now + TimeDelta::Millis(15);
+ FrameDecodeTiming::FrameSchedule schedule = {
+ .latest_decode_time = now + decode_delay, .render_time = render_time};
+
+ MockFunction<void(uint32_t, Timestamp)> ready_cb;
+ EXPECT_CALL(ready_cb, Call(rtp, render_time)).Times(1);
+ scheduler.ScheduleFrame(rtp, schedule, ready_cb.AsStdFunction());
+ EXPECT_THAT(scheduler.ScheduledRtpTimestamp(), Optional(rtp));
+ time_controller_.AdvanceTime(TimeDelta::Zero());
+
+ scheduler.Stop();
+}
+
+TEST(TaskQueueFrameDecodeSchedulerTest, CancelOutstanding) {
+ GlobalSimulatedTimeController time_controller_(Timestamp::Seconds(2000));
+ TaskQueueFrameDecodeScheduler scheduler(time_controller_.GetClock(),
+ time_controller_.GetMainThread());
+ constexpr TimeDelta decode_delay = TimeDelta::Millis(50);
+ const Timestamp now = time_controller_.GetClock()->CurrentTime();
+ const uint32_t rtp = 90000;
+ FrameDecodeTiming::FrameSchedule schedule = {
+ .latest_decode_time = now + decode_delay,
+ .render_time = now + TimeDelta::Millis(75)};
+
+ MockFunction<void(uint32_t, Timestamp)> ready_cb;
+ EXPECT_CALL(ready_cb, Call).Times(0);
+ scheduler.ScheduleFrame(rtp, schedule, ready_cb.AsStdFunction());
+ EXPECT_THAT(scheduler.ScheduledRtpTimestamp(), Optional(rtp));
+ time_controller_.AdvanceTime(decode_delay / 2);
+ EXPECT_THAT(scheduler.ScheduledRtpTimestamp(), Optional(rtp));
+ scheduler.CancelOutstanding();
+ EXPECT_THAT(scheduler.ScheduledRtpTimestamp(), Eq(absl::nullopt));
+ time_controller_.AdvanceTime(decode_delay / 2);
+
+ scheduler.Stop();
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/video/test/mock_video_stream_encoder.h b/third_party/libwebrtc/video/test/mock_video_stream_encoder.h
new file mode 100644
index 0000000000..946f45cc76
--- /dev/null
+++ b/third_party/libwebrtc/video/test/mock_video_stream_encoder.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#ifndef VIDEO_TEST_MOCK_VIDEO_STREAM_ENCODER_H_
+#define VIDEO_TEST_MOCK_VIDEO_STREAM_ENCODER_H_
+
+#include <vector>
+
+#include "test/gmock.h"
+#include "video/video_stream_encoder_interface.h"
+
+namespace webrtc {
+
+class MockVideoStreamEncoder : public VideoStreamEncoderInterface {
+ public:
+ MOCK_METHOD(void,
+ AddAdaptationResource,
+ (rtc::scoped_refptr<Resource>),
+ (override));
+ MOCK_METHOD(std::vector<rtc::scoped_refptr<Resource>>,
+ GetAdaptationResources,
+ (),
+ (override));
+ MOCK_METHOD(void,
+ SetSource,
+ (rtc::VideoSourceInterface<VideoFrame>*,
+ const DegradationPreference&),
+ (override));
+ MOCK_METHOD(void, SetSink, (EncoderSink*, bool), (override));
+ MOCK_METHOD(void, SetStartBitrate, (int), (override));
+ MOCK_METHOD(void,
+ SendKeyFrame,
+ (const std::vector<VideoFrameType>&),
+ (override));
+ MOCK_METHOD(void,
+ OnLossNotification,
+ (const VideoEncoder::LossNotification&),
+ (override));
+ MOCK_METHOD(void,
+ OnBitrateUpdated,
+ (DataRate, DataRate, DataRate, uint8_t, int64_t, double),
+ (override));
+ MOCK_METHOD(void,
+ SetFecControllerOverride,
+ (FecControllerOverride*),
+ (override));
+ MOCK_METHOD(void, Stop, (), (override));
+
+ MOCK_METHOD(void,
+ MockedConfigureEncoder,
+ (const VideoEncoderConfig&, size_t));
+ MOCK_METHOD(void,
+ MockedConfigureEncoder,
+ (const VideoEncoderConfig&, size_t, SetParametersCallback));
+ // gtest generates implicit copy which is not allowed on VideoEncoderConfig,
+ // so we can't mock ConfigureEncoder directly.
+ void ConfigureEncoder(VideoEncoderConfig config,
+ size_t max_data_payload_length) {
+ MockedConfigureEncoder(config, max_data_payload_length);
+ }
+ void ConfigureEncoder(VideoEncoderConfig config,
+ size_t max_data_payload_length,
+ SetParametersCallback) {
+ MockedConfigureEncoder(config, max_data_payload_length);
+ }
+};
+
+} // namespace webrtc
+
+#endif // VIDEO_TEST_MOCK_VIDEO_STREAM_ENCODER_H_
diff --git a/third_party/libwebrtc/video/transport_adapter.cc b/third_party/libwebrtc/video/transport_adapter.cc
new file mode 100644
index 0000000000..5d6ccc8819
--- /dev/null
+++ b/third_party/libwebrtc/video/transport_adapter.cc
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video/transport_adapter.h"
+
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+namespace internal {
+
+TransportAdapter::TransportAdapter(Transport* transport)
+ : transport_(transport), enabled_(false) {
+ RTC_DCHECK(nullptr != transport);
+}
+
+TransportAdapter::~TransportAdapter() = default;
+
+bool TransportAdapter::SendRtp(const uint8_t* packet,
+ size_t length,
+ const PacketOptions& options) {
+ if (!enabled_.load())
+ return false;
+
+ return transport_->SendRtp(packet, length, options);
+}
+
+bool TransportAdapter::SendRtcp(const uint8_t* packet, size_t length) {
+ if (!enabled_.load())
+ return false;
+
+ return transport_->SendRtcp(packet, length);
+}
+
+void TransportAdapter::Enable() {
+ enabled_.store(true);
+}
+
+void TransportAdapter::Disable() {
+ enabled_.store(false);
+}
+
+} // namespace internal
+} // namespace webrtc
diff --git a/third_party/libwebrtc/video/transport_adapter.h b/third_party/libwebrtc/video/transport_adapter.h
new file mode 100644
index 0000000000..95dd308601
--- /dev/null
+++ b/third_party/libwebrtc/video/transport_adapter.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#ifndef VIDEO_TRANSPORT_ADAPTER_H_
+#define VIDEO_TRANSPORT_ADAPTER_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <atomic>
+
+#include "api/call/transport.h"
+
+namespace webrtc {
+namespace internal {
+
+class TransportAdapter : public Transport {
+ public:
+ explicit TransportAdapter(Transport* transport);
+ ~TransportAdapter() override;
+
+ bool SendRtp(const uint8_t* packet,
+ size_t length,
+ const PacketOptions& options) override;
+ bool SendRtcp(const uint8_t* packet, size_t length) override;
+
+ void Enable();
+ void Disable();
+
+ private:
+ Transport* transport_;
+ std::atomic<bool> enabled_;
+};
+} // namespace internal
+} // namespace webrtc
+
+#endif // VIDEO_TRANSPORT_ADAPTER_H_
diff --git a/third_party/libwebrtc/video/unique_timestamp_counter.cc b/third_party/libwebrtc/video/unique_timestamp_counter.cc
new file mode 100644
index 0000000000..14cc039ec9
--- /dev/null
+++ b/third_party/libwebrtc/video/unique_timestamp_counter.cc
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video/unique_timestamp_counter.h"
+
+#include <cstdint>
+#include <memory>
+#include <set>
+
+namespace webrtc {
+namespace {
+
+constexpr int kMaxHistory = 1000;
+
+} // namespace
+
+UniqueTimestampCounter::UniqueTimestampCounter()
+ : latest_(std::make_unique<uint32_t[]>(kMaxHistory)) {}
+
+void UniqueTimestampCounter::Add(uint32_t value) {
+ if (value == last_ || !search_index_.insert(value).second) {
+ // Already known.
+ return;
+ }
+ int index = unique_seen_ % kMaxHistory;
+ if (unique_seen_ >= kMaxHistory) {
+ search_index_.erase(latest_[index]);
+ }
+ latest_[index] = value;
+ last_ = value;
+ ++unique_seen_;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/video/unique_timestamp_counter.h b/third_party/libwebrtc/video/unique_timestamp_counter.h
new file mode 100644
index 0000000000..5dfb758bce
--- /dev/null
+++ b/third_party/libwebrtc/video/unique_timestamp_counter.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#ifndef VIDEO_UNIQUE_TIMESTAMP_COUNTER_H_
+#define VIDEO_UNIQUE_TIMESTAMP_COUNTER_H_
+
+#include <cstdint>
+#include <memory>
+#include <set>
+
+namespace webrtc {
+
+// Counts number of uniquely seen frames (aka pictures, aka temporal units)
+// identified by their rtp timestamp.
+class UniqueTimestampCounter {
+ public:
+ UniqueTimestampCounter();
+ UniqueTimestampCounter(const UniqueTimestampCounter&) = delete;
+ UniqueTimestampCounter& operator=(const UniqueTimestampCounter&) = delete;
+ ~UniqueTimestampCounter() = default;
+
+ void Add(uint32_t timestamp);
+ // Returns number of different `timestamp` passed to the UniqueCounter.
+ int GetUniqueSeen() const { return unique_seen_; }
+
+ private:
+ int unique_seen_ = 0;
+ // Stores several last seen unique values for quick search.
+ std::set<uint32_t> search_index_;
+ // The same unique values in the circular buffer in the insertion order.
+ std::unique_ptr<uint32_t[]> latest_;
+ // Last inserted value for optimization purpose.
+ int64_t last_ = -1;
+};
+
+} // namespace webrtc
+
+#endif // VIDEO_UNIQUE_TIMESTAMP_COUNTER_H_
diff --git a/third_party/libwebrtc/video/unique_timestamp_counter_gn/moz.build b/third_party/libwebrtc/video/unique_timestamp_counter_gn/moz.build
new file mode 100644
index 0000000000..de254b0f67
--- /dev/null
+++ b/third_party/libwebrtc/video/unique_timestamp_counter_gn/moz.build
@@ -0,0 +1,217 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+DEFINES["WEBRTC_STRICT_FIELD_TRIALS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "!/third_party/libwebrtc/gen",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/video/unique_timestamp_counter.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "mips32":
+
+ DEFINES["MIPS32_LE"] = True
+ DEFINES["MIPS_FPU_LE"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "mips64":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("unique_timestamp_counter_gn")
diff --git a/third_party/libwebrtc/video/unique_timestamp_counter_unittest.cc b/third_party/libwebrtc/video/unique_timestamp_counter_unittest.cc
new file mode 100644
index 0000000000..b703e84576
--- /dev/null
+++ b/third_party/libwebrtc/video/unique_timestamp_counter_unittest.cc
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include "video/unique_timestamp_counter.h"
+
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+
+TEST(UniqueTimestampCounterTest, InitiallyZero) {
+ UniqueTimestampCounter counter;
+ EXPECT_EQ(counter.GetUniqueSeen(), 0);
+}
+
+TEST(UniqueTimestampCounterTest, CountsUniqueValues) {
+ UniqueTimestampCounter counter;
+ counter.Add(100);
+ counter.Add(100);
+ counter.Add(200);
+ counter.Add(150);
+ counter.Add(100);
+ EXPECT_EQ(counter.GetUniqueSeen(), 3);
+}
+
+TEST(UniqueTimestampCounterTest, ForgetsOldValuesAfter1000NewValues) {
+ const int kNumValues = 1500;
+ const int kMaxHistory = 1000;
+ const uint32_t value = 0xFFFFFFF0;
+ UniqueTimestampCounter counter;
+ for (int i = 0; i < kNumValues; ++i) {
+ counter.Add(value + 10 * i);
+ }
+ ASSERT_EQ(counter.GetUniqueSeen(), kNumValues);
+ // Slightly old values not affect number of seen unique values.
+ for (int i = kNumValues - kMaxHistory; i < kNumValues; ++i) {
+ counter.Add(value + 10 * i);
+ }
+ EXPECT_EQ(counter.GetUniqueSeen(), kNumValues);
+ // Very old value will be treated as unique.
+ counter.Add(value);
+ EXPECT_EQ(counter.GetUniqueSeen(), kNumValues + 1);
+}
+
+} // namespace
+} // namespace webrtc
diff --git a/third_party/libwebrtc/video/video_analyzer.cc b/third_party/libwebrtc/video/video_analyzer.cc
new file mode 100644
index 0000000000..6d70762f34
--- /dev/null
+++ b/third_party/libwebrtc/video/video_analyzer.cc
@@ -0,0 +1,1047 @@
+/*
+ * Copyright 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include "video/video_analyzer.h"
+
+#include <inttypes.h>
+
+#include <algorithm>
+#include <utility>
+
+#include "absl/algorithm/container.h"
+#include "absl/flags/flag.h"
+#include "absl/flags/parse.h"
+#include "absl/strings/string_view.h"
+#include "api/test/metrics/global_metrics_logger_and_exporter.h"
+#include "api/test/metrics/metric.h"
+#include "common_video/libyuv/include/webrtc_libyuv.h"
+#include "modules/rtp_rtcp/source/create_video_rtp_depacketizer.h"
+#include "modules/rtp_rtcp/source/rtp_packet.h"
+#include "modules/rtp_rtcp/source/rtp_util.h"
+#include "rtc_base/cpu_time.h"
+#include "rtc_base/memory_usage.h"
+#include "rtc_base/task_queue_for_test.h"
+#include "rtc_base/task_utils/repeating_task.h"
+#include "rtc_base/time_utils.h"
+#include "system_wrappers/include/cpu_info.h"
+#include "test/call_test.h"
+#include "test/testsupport/file_utils.h"
+#include "test/testsupport/frame_writer.h"
+#include "test/testsupport/test_artifacts.h"
+
+ABSL_FLAG(bool,
+ save_worst_frame,
+ false,
+ "Enable saving a frame with the lowest PSNR to a jpeg file in the "
+ "test_artifacts_dir");
+
+namespace webrtc {
+namespace {
+
+using ::webrtc::test::GetGlobalMetricsLogger;
+using ::webrtc::test::ImprovementDirection;
+using ::webrtc::test::Metric;
+using ::webrtc::test::Unit;
+
+constexpr TimeDelta kSendStatsPollingInterval = TimeDelta::Seconds(1);
+constexpr size_t kMaxComparisons = 10;
+// How often is keep alive message printed.
+constexpr TimeDelta kKeepAliveInterval = TimeDelta::Seconds(30);
+// Interval between checking that the test is over.
+constexpr TimeDelta kProbingInterval = TimeDelta::Millis(500);
+constexpr int kKeepAliveIntervalIterations =
+ kKeepAliveInterval.ms() / kProbingInterval.ms();
+
+bool IsFlexfec(int payload_type) {
+ return payload_type == test::CallTest::kFlexfecPayloadType;
+}
+
+} // namespace
+
+VideoAnalyzer::VideoAnalyzer(test::LayerFilteringTransport* transport,
+ const std::string& test_label,
+ double avg_psnr_threshold,
+ double avg_ssim_threshold,
+ int duration_frames,
+ TimeDelta test_duration,
+ FILE* graph_data_output_file,
+ const std::string& graph_title,
+ uint32_t ssrc_to_analyze,
+ uint32_t rtx_ssrc_to_analyze,
+ size_t selected_stream,
+ int selected_sl,
+ int selected_tl,
+ bool is_quick_test_enabled,
+ Clock* clock,
+ std::string rtp_dump_name,
+ TaskQueueBase* task_queue)
+ : transport_(transport),
+ receiver_(nullptr),
+ call_(nullptr),
+ send_stream_(nullptr),
+ receive_stream_(nullptr),
+ audio_receive_stream_(nullptr),
+ captured_frame_forwarder_(this, clock, duration_frames, test_duration),
+ test_label_(test_label),
+ graph_data_output_file_(graph_data_output_file),
+ graph_title_(graph_title),
+ ssrc_to_analyze_(ssrc_to_analyze),
+ rtx_ssrc_to_analyze_(rtx_ssrc_to_analyze),
+ selected_stream_(selected_stream),
+ selected_sl_(selected_sl),
+ selected_tl_(selected_tl),
+ mean_decode_time_ms_(0.0),
+ freeze_count_(0),
+ total_freezes_duration_ms_(0),
+ total_inter_frame_delay_(0),
+ total_squared_inter_frame_delay_(0),
+ decode_frame_rate_(0),
+ render_frame_rate_(0),
+ last_fec_bytes_(0),
+ frames_to_process_(duration_frames),
+ test_end_(clock->CurrentTime() + test_duration),
+ frames_recorded_(0),
+ frames_processed_(0),
+ captured_frames_(0),
+ dropped_frames_(0),
+ dropped_frames_before_first_encode_(0),
+ dropped_frames_before_rendering_(0),
+ last_render_time_(0),
+ last_render_delta_ms_(0),
+ last_unfreeze_time_ms_(0),
+ rtp_timestamp_delta_(0),
+ cpu_time_(0),
+ wallclock_time_(0),
+ avg_psnr_threshold_(avg_psnr_threshold),
+ avg_ssim_threshold_(avg_ssim_threshold),
+ is_quick_test_enabled_(is_quick_test_enabled),
+ quit_(false),
+ done_(true, false),
+ vp8_depacketizer_(CreateVideoRtpDepacketizer(kVideoCodecVP8)),
+ vp9_depacketizer_(CreateVideoRtpDepacketizer(kVideoCodecVP9)),
+ clock_(clock),
+ start_ms_(clock->TimeInMilliseconds()),
+ task_queue_(task_queue) {
+ // Create thread pool for CPU-expensive PSNR/SSIM calculations.
+
+ // Try to use about as many threads as cores, but leave kMinCoresLeft alone,
+ // so that we don't accidentally starve "real" worker threads (codec etc).
+ // Also, don't allocate more than kMaxComparisonThreads, even if there are
+ // spare cores.
+
+ uint32_t num_cores = CpuInfo::DetectNumberOfCores();
+ RTC_DCHECK_GE(num_cores, 1);
+ static const uint32_t kMinCoresLeft = 4;
+ static const uint32_t kMaxComparisonThreads = 8;
+
+ if (num_cores <= kMinCoresLeft) {
+ num_cores = 1;
+ } else {
+ num_cores -= kMinCoresLeft;
+ num_cores = std::min(num_cores, kMaxComparisonThreads);
+ }
+
+ for (uint32_t i = 0; i < num_cores; ++i) {
+ comparison_thread_pool_.push_back(rtc::PlatformThread::SpawnJoinable(
+ [this] {
+ while (CompareFrames()) {
+ }
+ },
+ "Analyzer"));
+ }
+
+ if (!rtp_dump_name.empty()) {
+ fprintf(stdout, "Writing rtp dump to %s\n", rtp_dump_name.c_str());
+ rtp_file_writer_.reset(test::RtpFileWriter::Create(
+ test::RtpFileWriter::kRtpDump, rtp_dump_name));
+ }
+}
+
+VideoAnalyzer::~VideoAnalyzer() {
+ {
+ MutexLock lock(&comparison_lock_);
+ quit_ = true;
+ }
+ // Joins all threads.
+ comparison_thread_pool_.clear();
+}
+
+void VideoAnalyzer::SetReceiver(PacketReceiver* receiver) {
+ receiver_ = receiver;
+}
+
+void VideoAnalyzer::SetSource(
+ rtc::VideoSourceInterface<VideoFrame>* video_source,
+ bool respect_sink_wants) {
+ if (respect_sink_wants)
+ captured_frame_forwarder_.SetSource(video_source);
+ rtc::VideoSinkWants wants;
+ video_source->AddOrUpdateSink(InputInterface(), wants);
+}
+
+void VideoAnalyzer::SetCall(Call* call) {
+ MutexLock lock(&lock_);
+ RTC_DCHECK(!call_);
+ call_ = call;
+}
+
+void VideoAnalyzer::SetSendStream(VideoSendStream* stream) {
+ MutexLock lock(&lock_);
+ RTC_DCHECK(!send_stream_);
+ send_stream_ = stream;
+}
+
+void VideoAnalyzer::SetReceiveStream(VideoReceiveStreamInterface* stream) {
+ MutexLock lock(&lock_);
+ RTC_DCHECK(!receive_stream_);
+ receive_stream_ = stream;
+}
+
+void VideoAnalyzer::SetAudioReceiveStream(
+ AudioReceiveStreamInterface* recv_stream) {
+ MutexLock lock(&lock_);
+ RTC_CHECK(!audio_receive_stream_);
+ audio_receive_stream_ = recv_stream;
+}
+
+rtc::VideoSinkInterface<VideoFrame>* VideoAnalyzer::InputInterface() {
+ return &captured_frame_forwarder_;
+}
+
+rtc::VideoSourceInterface<VideoFrame>* VideoAnalyzer::OutputInterface() {
+ return &captured_frame_forwarder_;
+}
+
+void VideoAnalyzer::DeliverRtcpPacket(rtc::CopyOnWriteBuffer packet) {
+ return receiver_->DeliverRtcpPacket(std::move(packet));
+}
+
+void VideoAnalyzer::DeliverRtpPacket(
+ MediaType media_type,
+ RtpPacketReceived packet,
+ PacketReceiver::OnUndemuxablePacketHandler undemuxable_packet_handler) {
+ if (rtp_file_writer_) {
+ test::RtpPacket p;
+ memcpy(p.data, packet.Buffer().data(), packet.size());
+ p.length = packet.size();
+ p.original_length = packet.size();
+ p.time_ms = clock_->TimeInMilliseconds() - start_ms_;
+ rtp_file_writer_->WritePacket(&p);
+ }
+
+ if (!IsFlexfec(packet.PayloadType()) &&
+ (packet.Ssrc() == ssrc_to_analyze_ ||
+ packet.Ssrc() == rtx_ssrc_to_analyze_)) {
+ // Ignore FlexFEC timestamps, to avoid collisions with media timestamps.
+ // (FlexFEC and media are sent on different SSRCs, which have different
+ // timestamps spaces.)
+ // Also ignore packets from wrong SSRC, but include retransmits.
+ MutexLock lock(&lock_);
+ int64_t timestamp =
+ wrap_handler_.Unwrap(packet.Timestamp() - rtp_timestamp_delta_);
+ recv_times_[timestamp] = clock_->CurrentNtpInMilliseconds();
+ }
+
+ return receiver_->DeliverRtpPacket(media_type, std::move(packet),
+ std::move(undemuxable_packet_handler));
+}
+
+void VideoAnalyzer::PreEncodeOnFrame(const VideoFrame& video_frame) {
+ MutexLock lock(&lock_);
+ if (!first_encoded_timestamp_) {
+ while (frames_.front().timestamp() != video_frame.timestamp()) {
+ ++dropped_frames_before_first_encode_;
+ frames_.pop_front();
+ RTC_CHECK(!frames_.empty());
+ }
+ first_encoded_timestamp_ = video_frame.timestamp();
+ }
+}
+
+void VideoAnalyzer::PostEncodeOnFrame(size_t stream_id, uint32_t timestamp) {
+ MutexLock lock(&lock_);
+ if (!first_sent_timestamp_ && stream_id == selected_stream_) {
+ first_sent_timestamp_ = timestamp;
+ }
+}
+
+bool VideoAnalyzer::SendRtp(const uint8_t* packet,
+ size_t length,
+ const PacketOptions& options) {
+ RtpPacket rtp_packet;
+ rtp_packet.Parse(packet, length);
+
+ int64_t current_time = clock_->CurrentNtpInMilliseconds();
+
+ bool result = transport_->SendRtp(packet, length, options);
+ {
+ MutexLock lock(&lock_);
+ if (rtp_timestamp_delta_ == 0 && rtp_packet.Ssrc() == ssrc_to_analyze_) {
+ RTC_CHECK(static_cast<bool>(first_sent_timestamp_));
+ rtp_timestamp_delta_ = rtp_packet.Timestamp() - *first_sent_timestamp_;
+ }
+
+ if (!IsFlexfec(rtp_packet.PayloadType()) &&
+ rtp_packet.Ssrc() == ssrc_to_analyze_) {
+ // Ignore FlexFEC timestamps, to avoid collisions with media timestamps.
+ // (FlexFEC and media are sent on different SSRCs, which have different
+ // timestamps spaces.)
+ // Also ignore packets from wrong SSRC and retransmits.
+ int64_t timestamp =
+ wrap_handler_.Unwrap(rtp_packet.Timestamp() - rtp_timestamp_delta_);
+ send_times_[timestamp] = current_time;
+
+ if (IsInSelectedSpatialAndTemporalLayer(rtp_packet)) {
+ encoded_frame_sizes_[timestamp] += rtp_packet.payload_size();
+ }
+ }
+ }
+ return result;
+}
+
+bool VideoAnalyzer::SendRtcp(const uint8_t* packet, size_t length) {
+ return transport_->SendRtcp(packet, length);
+}
+
+void VideoAnalyzer::OnFrame(const VideoFrame& video_frame) {
+ int64_t render_time_ms = clock_->CurrentNtpInMilliseconds();
+
+ MutexLock lock(&lock_);
+
+ StartExcludingCpuThreadTime();
+
+ int64_t send_timestamp =
+ wrap_handler_.Unwrap(video_frame.timestamp() - rtp_timestamp_delta_);
+
+ while (wrap_handler_.Unwrap(frames_.front().timestamp()) < send_timestamp) {
+ if (!last_rendered_frame_) {
+ // No previous frame rendered, this one was dropped after sending but
+ // before rendering.
+ ++dropped_frames_before_rendering_;
+ } else {
+ AddFrameComparison(frames_.front(), *last_rendered_frame_, true,
+ render_time_ms);
+ }
+ frames_.pop_front();
+ RTC_DCHECK(!frames_.empty());
+ }
+
+ VideoFrame reference_frame = frames_.front();
+ frames_.pop_front();
+ int64_t reference_timestamp =
+ wrap_handler_.Unwrap(reference_frame.timestamp());
+ if (send_timestamp == reference_timestamp - 1) {
+ // TODO(ivica): Make this work for > 2 streams.
+ // Look at RTPSender::BuildRTPHeader.
+ ++send_timestamp;
+ }
+ ASSERT_EQ(reference_timestamp, send_timestamp);
+
+ AddFrameComparison(reference_frame, video_frame, false, render_time_ms);
+
+ last_rendered_frame_ = video_frame;
+
+ StopExcludingCpuThreadTime();
+}
+
+void VideoAnalyzer::Wait() {
+ // Frame comparisons can be very expensive. Wait for test to be done, but
+ // at time-out check if frames_processed is going up. If so, give it more
+ // time, otherwise fail. Hopefully this will reduce test flakiness.
+
+ RepeatingTaskHandle stats_polling_task = RepeatingTaskHandle::DelayedStart(
+ task_queue_, kSendStatsPollingInterval, [this] {
+ PollStats();
+ return kSendStatsPollingInterval;
+ });
+
+ int last_frames_processed = -1;
+ int last_frames_captured = -1;
+ int iteration = 0;
+
+ while (!done_.Wait(kProbingInterval)) {
+ int frames_processed;
+ int frames_captured;
+ {
+ MutexLock lock(&comparison_lock_);
+ frames_processed = frames_processed_;
+ frames_captured = captured_frames_;
+ }
+
+ // Print some output so test infrastructure won't think we've crashed.
+ const char* kKeepAliveMessages[3] = {
+ "Uh, I'm-I'm not quite dead, sir.",
+ "Uh, I-I think uh, I could pull through, sir.",
+ "Actually, I think I'm all right to come with you--"};
+ if (++iteration % kKeepAliveIntervalIterations == 0) {
+ printf("- %s\n", kKeepAliveMessages[iteration % 3]);
+ }
+
+ if (last_frames_processed == -1) {
+ last_frames_processed = frames_processed;
+ last_frames_captured = frames_captured;
+ continue;
+ }
+ if (frames_processed == last_frames_processed &&
+ last_frames_captured == frames_captured &&
+ clock_->CurrentTime() > test_end_) {
+ done_.Set();
+ break;
+ }
+ last_frames_processed = frames_processed;
+ last_frames_captured = frames_captured;
+ }
+
+ if (iteration > 0)
+ printf("- Farewell, sweet Concorde!\n");
+
+ SendTask(task_queue_, [&] { stats_polling_task.Stop(); });
+
+ PrintResults();
+ if (graph_data_output_file_)
+ PrintSamplesToFile();
+}
+
+void VideoAnalyzer::StartMeasuringCpuProcessTime() {
+ MutexLock lock(&cpu_measurement_lock_);
+ cpu_time_ -= rtc::GetProcessCpuTimeNanos();
+ wallclock_time_ -= rtc::SystemTimeNanos();
+}
+
+void VideoAnalyzer::StopMeasuringCpuProcessTime() {
+ MutexLock lock(&cpu_measurement_lock_);
+ cpu_time_ += rtc::GetProcessCpuTimeNanos();
+ wallclock_time_ += rtc::SystemTimeNanos();
+}
+
+void VideoAnalyzer::StartExcludingCpuThreadTime() {
+ MutexLock lock(&cpu_measurement_lock_);
+ cpu_time_ += rtc::GetThreadCpuTimeNanos();
+}
+
+void VideoAnalyzer::StopExcludingCpuThreadTime() {
+ MutexLock lock(&cpu_measurement_lock_);
+ cpu_time_ -= rtc::GetThreadCpuTimeNanos();
+}
+
+double VideoAnalyzer::GetCpuUsagePercent() {
+ MutexLock lock(&cpu_measurement_lock_);
+ return static_cast<double>(cpu_time_) / wallclock_time_ * 100.0;
+}
+
+bool VideoAnalyzer::IsInSelectedSpatialAndTemporalLayer(
+ const RtpPacket& rtp_packet) {
+ if (rtp_packet.PayloadType() == test::CallTest::kPayloadTypeVP8) {
+ auto parsed_payload = vp8_depacketizer_->Parse(rtp_packet.PayloadBuffer());
+ RTC_DCHECK(parsed_payload);
+ const auto& vp8_header = absl::get<RTPVideoHeaderVP8>(
+ parsed_payload->video_header.video_type_header);
+ int temporal_idx = vp8_header.temporalIdx;
+ return selected_tl_ < 0 || temporal_idx == kNoTemporalIdx ||
+ temporal_idx <= selected_tl_;
+ }
+
+ if (rtp_packet.PayloadType() == test::CallTest::kPayloadTypeVP9) {
+ auto parsed_payload = vp9_depacketizer_->Parse(rtp_packet.PayloadBuffer());
+ RTC_DCHECK(parsed_payload);
+ const auto& vp9_header = absl::get<RTPVideoHeaderVP9>(
+ parsed_payload->video_header.video_type_header);
+ int temporal_idx = vp9_header.temporal_idx;
+ int spatial_idx = vp9_header.spatial_idx;
+ return (selected_tl_ < 0 || temporal_idx == kNoTemporalIdx ||
+ temporal_idx <= selected_tl_) &&
+ (selected_sl_ < 0 || spatial_idx == kNoSpatialIdx ||
+ spatial_idx <= selected_sl_);
+ }
+
+ return true;
+}
+
+void VideoAnalyzer::PollStats() {
+ // Do not grab `comparison_lock_`, before `GetStats()` completes.
+ // Otherwise a deadlock may occur:
+ // 1) `comparison_lock_` is acquired after `lock_`
+ // 2) `lock_` is acquired after internal pacer lock in SendRtp()
+ // 3) internal pacer lock is acquired by GetStats().
+ Call::Stats call_stats = call_->GetStats();
+
+ MutexLock lock(&comparison_lock_);
+
+ send_bandwidth_bps_.AddSample(call_stats.send_bandwidth_bps);
+
+ VideoSendStream::Stats send_stats = send_stream_->GetStats();
+ // It's not certain that we yet have estimates for any of these stats.
+ // Check that they are positive before mixing them in.
+ if (send_stats.encode_frame_rate > 0)
+ encode_frame_rate_.AddSample(send_stats.encode_frame_rate);
+ if (send_stats.avg_encode_time_ms > 0)
+ encode_time_ms_.AddSample(send_stats.avg_encode_time_ms);
+ if (send_stats.encode_usage_percent > 0)
+ encode_usage_percent_.AddSample(send_stats.encode_usage_percent);
+ if (send_stats.media_bitrate_bps > 0)
+ media_bitrate_bps_.AddSample(send_stats.media_bitrate_bps);
+ size_t fec_bytes = 0;
+ for (const auto& kv : send_stats.substreams) {
+ fec_bytes += kv.second.rtp_stats.fec.payload_bytes +
+ kv.second.rtp_stats.fec.padding_bytes;
+ }
+ fec_bitrate_bps_.AddSample((fec_bytes - last_fec_bytes_) * 8);
+ last_fec_bytes_ = fec_bytes;
+
+ if (receive_stream_ != nullptr) {
+ VideoReceiveStreamInterface::Stats receive_stats =
+ receive_stream_->GetStats();
+
+ // Freeze metrics.
+ freeze_count_ = receive_stats.freeze_count;
+ total_freezes_duration_ms_ = receive_stats.total_freezes_duration_ms;
+ total_inter_frame_delay_ = receive_stats.total_inter_frame_delay;
+ total_squared_inter_frame_delay_ =
+ receive_stats.total_squared_inter_frame_delay;
+
+ // `total_decode_time_ms` gives a good estimate of the mean decode time,
+ // `decode_ms` is used to keep track of the standard deviation.
+ if (receive_stats.frames_decoded > 0)
+ mean_decode_time_ms_ = receive_stats.total_decode_time.ms<double>() /
+ receive_stats.frames_decoded;
+ if (receive_stats.decode_ms > 0)
+ decode_time_ms_.AddSample(receive_stats.decode_ms);
+ if (receive_stats.max_decode_ms > 0)
+ decode_time_max_ms_.AddSample(receive_stats.max_decode_ms);
+ if (receive_stats.width > 0 && receive_stats.height > 0) {
+ pixels_.AddSample(receive_stats.width * receive_stats.height);
+ }
+
+ // `frames_decoded` and `frames_rendered` are used because they are more
+ // accurate than `decode_frame_rate` and `render_frame_rate`.
+ // The latter two are calculated on a momentary basis.
+ if (total_inter_frame_delay_ > 0) {
+ decode_frame_rate_ =
+ receive_stats.frames_decoded / total_inter_frame_delay_;
+ render_frame_rate_ =
+ receive_stats.frames_rendered / total_inter_frame_delay_;
+ }
+ }
+
+ if (audio_receive_stream_ != nullptr) {
+ AudioReceiveStreamInterface::Stats receive_stats =
+ audio_receive_stream_->GetStats(/*get_and_clear_legacy_stats=*/true);
+ audio_expand_rate_.AddSample(receive_stats.expand_rate);
+ audio_accelerate_rate_.AddSample(receive_stats.accelerate_rate);
+ audio_jitter_buffer_ms_.AddSample(receive_stats.jitter_buffer_ms);
+ }
+
+ memory_usage_.AddSample(rtc::GetProcessResidentSizeBytes());
+}
+
+bool VideoAnalyzer::CompareFrames() {
+ if (AllFramesRecorded())
+ return false;
+
+ FrameComparison comparison;
+
+ if (!PopComparison(&comparison)) {
+ // Wait until new comparison task is available, or test is done.
+ // If done, wake up remaining threads waiting.
+ comparison_available_event_.Wait(TimeDelta::Seconds(1));
+ if (AllFramesRecorded()) {
+ comparison_available_event_.Set();
+ return false;
+ }
+ return true; // Try again.
+ }
+
+ StartExcludingCpuThreadTime();
+
+ PerformFrameComparison(comparison);
+
+ StopExcludingCpuThreadTime();
+
+ if (FrameProcessed()) {
+ done_.Set();
+ comparison_available_event_.Set();
+ return false;
+ }
+
+ return true;
+}
+
+bool VideoAnalyzer::PopComparison(VideoAnalyzer::FrameComparison* comparison) {
+ MutexLock lock(&comparison_lock_);
+ // If AllFramesRecorded() is true, it means we have already popped
+ // frames_to_process_ frames from comparisons_, so there is no more work
+ // for this thread to be done. frames_processed_ might still be lower if
+ // all comparisons are not done, but those frames are currently being
+ // worked on by other threads.
+ if (comparisons_.empty() || AllFramesRecordedLocked())
+ return false;
+
+ *comparison = comparisons_.front();
+ comparisons_.pop_front();
+
+ FrameRecorded();
+ return true;
+}
+
+void VideoAnalyzer::FrameRecorded() {
+ ++frames_recorded_;
+}
+
+bool VideoAnalyzer::AllFramesRecorded() {
+ MutexLock lock(&comparison_lock_);
+ return AllFramesRecordedLocked();
+}
+
+bool VideoAnalyzer::AllFramesRecordedLocked() {
+ RTC_DCHECK(frames_recorded_ <= frames_to_process_);
+ return frames_recorded_ == frames_to_process_ ||
+ (clock_->CurrentTime() > test_end_ && comparisons_.empty()) || quit_;
+}
+
+bool VideoAnalyzer::FrameProcessed() {
+ MutexLock lock(&comparison_lock_);
+ ++frames_processed_;
+ RTC_DCHECK_LE(frames_processed_, frames_to_process_);
+ return frames_processed_ == frames_to_process_ ||
+ (clock_->CurrentTime() > test_end_ && comparisons_.empty());
+}
+
+void VideoAnalyzer::PrintResults() {
+ StopMeasuringCpuProcessTime();
+ int dropped_frames_diff;
+ {
+ MutexLock lock(&lock_);
+ dropped_frames_diff = dropped_frames_before_first_encode_ +
+ dropped_frames_before_rendering_ + frames_.size();
+ }
+ MutexLock lock(&comparison_lock_);
+ PrintResult("psnr_dB", psnr_, Unit::kUnitless,
+ ImprovementDirection::kBiggerIsBetter);
+ PrintResult("ssim", ssim_, Unit::kUnitless,
+ ImprovementDirection::kBiggerIsBetter);
+ PrintResult("sender_time", sender_time_, Unit::kMilliseconds,
+ ImprovementDirection::kSmallerIsBetter);
+ PrintResult("receiver_time", receiver_time_, Unit::kMilliseconds,
+ ImprovementDirection::kSmallerIsBetter);
+ PrintResult("network_time", network_time_, Unit::kMilliseconds,
+ ImprovementDirection::kSmallerIsBetter);
+ PrintResult("total_delay_incl_network", end_to_end_, Unit::kMilliseconds,
+ ImprovementDirection::kSmallerIsBetter);
+ PrintResult("time_between_rendered_frames", rendered_delta_,
+ Unit::kMilliseconds, ImprovementDirection::kSmallerIsBetter);
+ PrintResult("encode_frame_rate_fps", encode_frame_rate_, Unit::kHertz,
+ ImprovementDirection::kBiggerIsBetter);
+ PrintResult("encode_time", encode_time_ms_, Unit::kMilliseconds,
+ ImprovementDirection::kSmallerIsBetter);
+ PrintResult("media_bitrate", media_bitrate_bps_ / 1000.0,
+ Unit::kKilobitsPerSecond, ImprovementDirection::kNeitherIsBetter);
+ PrintResult("fec_bitrate", fec_bitrate_bps_ / 1000.0,
+ Unit::kKilobitsPerSecond, ImprovementDirection::kNeitherIsBetter);
+ PrintResult("send_bandwidth", send_bandwidth_bps_ / 1000.0,
+ Unit::kKilobitsPerSecond, ImprovementDirection::kNeitherIsBetter);
+ PrintResult("pixels_per_frame", pixels_, Unit::kCount,
+ ImprovementDirection::kBiggerIsBetter);
+
+ GetGlobalMetricsLogger()->LogSingleValueMetric(
+ "decode_frame_rate_fps", test_label_, decode_frame_rate_, Unit::kHertz,
+ ImprovementDirection::kBiggerIsBetter);
+ GetGlobalMetricsLogger()->LogSingleValueMetric(
+ "render_frame_rate_fps", test_label_, render_frame_rate_, Unit::kHertz,
+ ImprovementDirection::kBiggerIsBetter);
+
+ // Record the time from the last freeze until the last rendered frame to
+ // ensure we cover the full timespan of the session. Otherwise the metric
+ // would penalize an early freeze followed by no freezes until the end.
+ time_between_freezes_.AddSample(last_render_time_ - last_unfreeze_time_ms_);
+
+ // Freeze metrics.
+ PrintResult("time_between_freezes", time_between_freezes_,
+ Unit::kMilliseconds, ImprovementDirection::kBiggerIsBetter);
+
+ const double freeze_count_double = static_cast<double>(freeze_count_);
+ const double total_freezes_duration_ms_double =
+ static_cast<double>(total_freezes_duration_ms_);
+ const double total_frames_duration_ms_double =
+ total_inter_frame_delay_ * rtc::kNumMillisecsPerSec;
+
+ if (total_frames_duration_ms_double > 0) {
+ GetGlobalMetricsLogger()->LogSingleValueMetric(
+ "freeze_duration_ratio", test_label_,
+ total_freezes_duration_ms_double / total_frames_duration_ms_double,
+ Unit::kUnitless, ImprovementDirection::kSmallerIsBetter);
+ RTC_DCHECK_LE(total_freezes_duration_ms_double,
+ total_frames_duration_ms_double);
+
+ constexpr double ms_per_minute = 60 * 1000;
+ const double total_frames_duration_min =
+ total_frames_duration_ms_double / ms_per_minute;
+ if (total_frames_duration_min > 0) {
+ GetGlobalMetricsLogger()->LogSingleValueMetric(
+ "freeze_count_per_minute", test_label_,
+ freeze_count_double / total_frames_duration_min, Unit::kUnitless,
+ ImprovementDirection::kSmallerIsBetter);
+ }
+ }
+
+ GetGlobalMetricsLogger()->LogSingleValueMetric(
+ "freeze_duration_average", test_label_,
+ freeze_count_double > 0
+ ? total_freezes_duration_ms_double / freeze_count_double
+ : 0,
+ Unit::kMilliseconds, ImprovementDirection::kSmallerIsBetter);
+
+ if (total_squared_inter_frame_delay_ > 0) {
+ GetGlobalMetricsLogger()->LogSingleValueMetric(
+ "harmonic_frame_rate_fps", test_label_,
+ total_frames_duration_ms_double /
+ (1000 * total_squared_inter_frame_delay_),
+ Unit::kHertz, ImprovementDirection::kBiggerIsBetter);
+ }
+
+ if (worst_frame_) {
+ GetGlobalMetricsLogger()->LogSingleValueMetric(
+ "min_psnr_dB", test_label_, worst_frame_->psnr, Unit::kUnitless,
+ ImprovementDirection::kBiggerIsBetter);
+ }
+
+ if (receive_stream_ != nullptr) {
+ PrintResultWithExternalMean("decode_time", mean_decode_time_ms_,
+ decode_time_ms_, Unit::kMilliseconds,
+ ImprovementDirection::kSmallerIsBetter);
+ }
+ dropped_frames_ += dropped_frames_diff;
+ GetGlobalMetricsLogger()->LogSingleValueMetric(
+ "dropped_frames", test_label_, dropped_frames_, Unit::kCount,
+ ImprovementDirection::kSmallerIsBetter);
+ GetGlobalMetricsLogger()->LogSingleValueMetric(
+ "cpu_usage_%", test_label_, GetCpuUsagePercent(), Unit::kUnitless,
+ ImprovementDirection::kSmallerIsBetter);
+
+#if defined(WEBRTC_WIN)
+ // On Linux and Mac in Resident Set some unused pages may be counted.
+ // Therefore this metric will depend on order in which tests are run and
+ // will be flaky.
+ PrintResult("memory_usage", memory_usage_, Unit::kBytes,
+ ImprovementDirection::kSmallerIsBetter);
+#endif
+
+ // Saving only the worst frame for manual analysis. Intention here is to
+ // only detect video corruptions and not to track picture quality. Thus,
+ // jpeg is used here.
+ if (absl::GetFlag(FLAGS_save_worst_frame) && worst_frame_) {
+ std::string output_dir;
+ test::GetTestArtifactsDir(&output_dir);
+ std::string output_path =
+ test::JoinFilename(output_dir, test_label_ + ".jpg");
+ RTC_LOG(LS_INFO) << "Saving worst frame to " << output_path;
+ test::JpegFrameWriter frame_writer(output_path);
+ RTC_CHECK(
+ frame_writer.WriteFrame(worst_frame_->frame, 100 /*best quality*/));
+ }
+
+ if (audio_receive_stream_ != nullptr) {
+ PrintResult("audio_expand_rate", audio_expand_rate_, Unit::kUnitless,
+ ImprovementDirection::kSmallerIsBetter);
+ PrintResult("audio_accelerate_rate", audio_accelerate_rate_,
+ Unit::kUnitless, ImprovementDirection::kSmallerIsBetter);
+ PrintResult("audio_jitter_buffer", audio_jitter_buffer_ms_,
+ Unit::kMilliseconds, ImprovementDirection::kNeitherIsBetter);
+ }
+
+ // Disable quality check for quick test, as quality checks may fail
+ // because too few samples were collected.
+ if (!is_quick_test_enabled_) {
+ EXPECT_GT(psnr_.GetAverage(), avg_psnr_threshold_);
+ EXPECT_GT(ssim_.GetAverage(), avg_ssim_threshold_);
+ }
+}
+
+void VideoAnalyzer::PerformFrameComparison(
+ const VideoAnalyzer::FrameComparison& comparison) {
+ // Perform expensive psnr and ssim calculations while not holding lock.
+ double psnr = -1.0;
+ double ssim = -1.0;
+ if (comparison.reference && !comparison.dropped) {
+ psnr = I420PSNR(&*comparison.reference, &*comparison.render);
+ ssim = I420SSIM(&*comparison.reference, &*comparison.render);
+ }
+
+ MutexLock lock(&comparison_lock_);
+
+ if (psnr >= 0.0 && (!worst_frame_ || worst_frame_->psnr > psnr)) {
+ worst_frame_.emplace(FrameWithPsnr{psnr, *comparison.render});
+ }
+
+ if (graph_data_output_file_) {
+ samples_.push_back(Sample(comparison.dropped, comparison.input_time_ms,
+ comparison.send_time_ms, comparison.recv_time_ms,
+ comparison.render_time_ms,
+ comparison.encoded_frame_size, psnr, ssim));
+ }
+ if (psnr >= 0.0)
+ psnr_.AddSample(psnr);
+ if (ssim >= 0.0)
+ ssim_.AddSample(ssim);
+
+ if (comparison.dropped) {
+ ++dropped_frames_;
+ return;
+ }
+ if (last_unfreeze_time_ms_ == 0)
+ last_unfreeze_time_ms_ = comparison.render_time_ms;
+ if (last_render_time_ != 0) {
+ const int64_t render_delta_ms =
+ comparison.render_time_ms - last_render_time_;
+ rendered_delta_.AddSample(render_delta_ms);
+ if (last_render_delta_ms_ != 0 &&
+ render_delta_ms - last_render_delta_ms_ > 150) {
+ time_between_freezes_.AddSample(last_render_time_ -
+ last_unfreeze_time_ms_);
+ last_unfreeze_time_ms_ = comparison.render_time_ms;
+ }
+ last_render_delta_ms_ = render_delta_ms;
+ }
+ last_render_time_ = comparison.render_time_ms;
+
+ sender_time_.AddSample(comparison.send_time_ms - comparison.input_time_ms);
+ if (comparison.recv_time_ms > 0) {
+ // If recv_time_ms == 0, this frame consisted of a packets which were all
+ // lost in the transport. Since we were able to render the frame, however,
+ // the dropped packets were recovered by FlexFEC. The FlexFEC recovery
+ // happens internally in Call, and we can therefore here not know which
+ // FEC packets that protected the lost media packets. Consequently, we
+ // were not able to record a meaningful recv_time_ms. We therefore skip
+ // this sample.
+ //
+ // The reasoning above does not hold for ULPFEC and RTX, as for those
+ // strategies the timestamp of the received packets is set to the
+ // timestamp of the protected/retransmitted media packet. I.e., then
+ // recv_time_ms != 0, even though the media packets were lost.
+ receiver_time_.AddSample(comparison.render_time_ms -
+ comparison.recv_time_ms);
+ network_time_.AddSample(comparison.recv_time_ms - comparison.send_time_ms);
+ }
+ end_to_end_.AddSample(comparison.render_time_ms - comparison.input_time_ms);
+ encoded_frame_size_.AddSample(comparison.encoded_frame_size);
+}
+
+void VideoAnalyzer::PrintResult(absl::string_view result_type,
+ const SamplesStatsCounter& stats,
+ Unit unit,
+ ImprovementDirection improvement_direction) {
+ GetGlobalMetricsLogger()->LogMetric(result_type, test_label_, stats, unit,
+ improvement_direction);
+}
+
+void VideoAnalyzer::PrintResultWithExternalMean(
+ absl::string_view result_type,
+ double mean,
+ const SamplesStatsCounter& stats,
+ Unit unit,
+ ImprovementDirection improvement_direction) {
+ // If the true mean is different than the sample mean, the sample variance is
+ // too low. The sample variance given a known mean is obtained by adding the
+ // squared error between the true mean and the sample mean.
+ double compensated_variance =
+ stats.IsEmpty()
+ ? 0.0
+ : stats.GetVariance() + pow(mean - stats.GetAverage(), 2.0);
+ GetGlobalMetricsLogger()->LogMetric(
+ result_type, test_label_,
+ Metric::Stats{.mean = mean, .stddev = std::sqrt(compensated_variance)},
+ unit, improvement_direction);
+}
+
+void VideoAnalyzer::PrintSamplesToFile() {
+ FILE* out = graph_data_output_file_;
+ MutexLock lock(&comparison_lock_);
+ absl::c_sort(samples_, [](const Sample& A, const Sample& B) -> bool {
+ return A.input_time_ms < B.input_time_ms;
+ });
+
+ fprintf(out, "%s\n", graph_title_.c_str());
+ fprintf(out, "%zu\n", samples_.size());
+ fprintf(out,
+ "dropped "
+ "input_time_ms "
+ "send_time_ms "
+ "recv_time_ms "
+ "render_time_ms "
+ "encoded_frame_size "
+ "psnr "
+ "ssim "
+ "encode_time_ms\n");
+ for (const Sample& sample : samples_) {
+ fprintf(out,
+ "%d %" PRId64 " %" PRId64 " %" PRId64 " %" PRId64 " %zu %lf %lf\n",
+ sample.dropped, sample.input_time_ms, sample.send_time_ms,
+ sample.recv_time_ms, sample.render_time_ms,
+ sample.encoded_frame_size, sample.psnr, sample.ssim);
+ }
+}
+
+void VideoAnalyzer::AddCapturedFrameForComparison(
+ const VideoFrame& video_frame) {
+ bool must_capture = false;
+ {
+ MutexLock lock(&comparison_lock_);
+ must_capture = captured_frames_ < frames_to_process_;
+ if (must_capture) {
+ ++captured_frames_;
+ }
+ }
+ if (must_capture) {
+ MutexLock lock(&lock_);
+ frames_.push_back(video_frame);
+ }
+}
+
+void VideoAnalyzer::AddFrameComparison(const VideoFrame& reference,
+ const VideoFrame& render,
+ bool dropped,
+ int64_t render_time_ms) {
+ int64_t reference_timestamp = wrap_handler_.Unwrap(reference.timestamp());
+ int64_t send_time_ms = send_times_[reference_timestamp];
+ send_times_.erase(reference_timestamp);
+ int64_t recv_time_ms = recv_times_[reference_timestamp];
+ recv_times_.erase(reference_timestamp);
+
+ // TODO(ivica): Make this work for > 2 streams.
+ auto it = encoded_frame_sizes_.find(reference_timestamp);
+ if (it == encoded_frame_sizes_.end())
+ it = encoded_frame_sizes_.find(reference_timestamp - 1);
+ size_t encoded_size = it == encoded_frame_sizes_.end() ? 0 : it->second;
+ if (it != encoded_frame_sizes_.end())
+ encoded_frame_sizes_.erase(it);
+
+ MutexLock lock(&comparison_lock_);
+ if (comparisons_.size() < kMaxComparisons) {
+ comparisons_.push_back(FrameComparison(
+ reference, render, dropped, reference.ntp_time_ms(), send_time_ms,
+ recv_time_ms, render_time_ms, encoded_size));
+ } else {
+ comparisons_.push_back(FrameComparison(dropped, reference.ntp_time_ms(),
+ send_time_ms, recv_time_ms,
+ render_time_ms, encoded_size));
+ }
+ comparison_available_event_.Set();
+}
+
+VideoAnalyzer::FrameComparison::FrameComparison()
+ : dropped(false),
+ input_time_ms(0),
+ send_time_ms(0),
+ recv_time_ms(0),
+ render_time_ms(0),
+ encoded_frame_size(0) {}
+
+VideoAnalyzer::FrameComparison::FrameComparison(const VideoFrame& reference,
+ const VideoFrame& render,
+ bool dropped,
+ int64_t input_time_ms,
+ int64_t send_time_ms,
+ int64_t recv_time_ms,
+ int64_t render_time_ms,
+ size_t encoded_frame_size)
+ : reference(reference),
+ render(render),
+ dropped(dropped),
+ input_time_ms(input_time_ms),
+ send_time_ms(send_time_ms),
+ recv_time_ms(recv_time_ms),
+ render_time_ms(render_time_ms),
+ encoded_frame_size(encoded_frame_size) {}
+
+VideoAnalyzer::FrameComparison::FrameComparison(bool dropped,
+ int64_t input_time_ms,
+ int64_t send_time_ms,
+ int64_t recv_time_ms,
+ int64_t render_time_ms,
+ size_t encoded_frame_size)
+ : dropped(dropped),
+ input_time_ms(input_time_ms),
+ send_time_ms(send_time_ms),
+ recv_time_ms(recv_time_ms),
+ render_time_ms(render_time_ms),
+ encoded_frame_size(encoded_frame_size) {}
+
+VideoAnalyzer::Sample::Sample(int dropped,
+ int64_t input_time_ms,
+ int64_t send_time_ms,
+ int64_t recv_time_ms,
+ int64_t render_time_ms,
+ size_t encoded_frame_size,
+ double psnr,
+ double ssim)
+ : dropped(dropped),
+ input_time_ms(input_time_ms),
+ send_time_ms(send_time_ms),
+ recv_time_ms(recv_time_ms),
+ render_time_ms(render_time_ms),
+ encoded_frame_size(encoded_frame_size),
+ psnr(psnr),
+ ssim(ssim) {}
+
+VideoAnalyzer::CapturedFrameForwarder::CapturedFrameForwarder(
+ VideoAnalyzer* analyzer,
+ Clock* clock,
+ int frames_to_capture,
+ TimeDelta test_duration)
+ : analyzer_(analyzer),
+ send_stream_input_(nullptr),
+ video_source_(nullptr),
+ clock_(clock),
+ captured_frames_(0),
+ frames_to_capture_(frames_to_capture),
+ test_end_(clock->CurrentTime() + test_duration) {}
+
+void VideoAnalyzer::CapturedFrameForwarder::SetSource(
+ VideoSourceInterface<VideoFrame>* video_source) {
+ video_source_ = video_source;
+}
+
+void VideoAnalyzer::CapturedFrameForwarder::OnFrame(
+ const VideoFrame& video_frame) {
+ VideoFrame copy = video_frame;
+ // Frames from the capturer does not have a rtp timestamp.
+ // Create one so it can be used for comparison.
+ RTC_DCHECK_EQ(0, video_frame.timestamp());
+ if (video_frame.ntp_time_ms() == 0)
+ copy.set_ntp_time_ms(clock_->CurrentNtpInMilliseconds());
+ copy.set_timestamp(copy.ntp_time_ms() * 90);
+ analyzer_->AddCapturedFrameForComparison(copy);
+ MutexLock lock(&lock_);
+ ++captured_frames_;
+ if (send_stream_input_ && clock_->CurrentTime() <= test_end_ &&
+ captured_frames_ <= frames_to_capture_) {
+ send_stream_input_->OnFrame(copy);
+ }
+}
+
+void VideoAnalyzer::CapturedFrameForwarder::AddOrUpdateSink(
+ rtc::VideoSinkInterface<VideoFrame>* sink,
+ const rtc::VideoSinkWants& wants) {
+ {
+ MutexLock lock(&lock_);
+ RTC_DCHECK(!send_stream_input_ || send_stream_input_ == sink);
+ send_stream_input_ = sink;
+ }
+ if (video_source_) {
+ video_source_->AddOrUpdateSink(this, wants);
+ }
+}
+
+void VideoAnalyzer::CapturedFrameForwarder::RemoveSink(
+ rtc::VideoSinkInterface<VideoFrame>* sink) {
+ MutexLock lock(&lock_);
+ RTC_DCHECK(sink == send_stream_input_);
+ send_stream_input_ = nullptr;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/video/video_analyzer.h b/third_party/libwebrtc/video/video_analyzer.h
new file mode 100644
index 0000000000..448af7ebb7
--- /dev/null
+++ b/third_party/libwebrtc/video/video_analyzer.h
@@ -0,0 +1,322 @@
+/*
+ * Copyright 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#ifndef VIDEO_VIDEO_ANALYZER_H_
+#define VIDEO_VIDEO_ANALYZER_H_
+
+#include <deque>
+#include <map>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "absl/strings/string_view.h"
+#include "api/numerics/samples_stats_counter.h"
+#include "api/task_queue/task_queue_base.h"
+#include "api/test/metrics/metric.h"
+#include "api/video/video_source_interface.h"
+#include "modules/rtp_rtcp/source/rtp_packet.h"
+#include "modules/rtp_rtcp/source/video_rtp_depacketizer.h"
+#include "rtc_base/event.h"
+#include "rtc_base/numerics/running_statistics.h"
+#include "rtc_base/numerics/sequence_number_unwrapper.h"
+#include "rtc_base/platform_thread.h"
+#include "rtc_base/synchronization/mutex.h"
+#include "test/layer_filtering_transport.h"
+#include "test/rtp_file_writer.h"
+
+namespace webrtc {
+
+class VideoAnalyzer : public PacketReceiver,
+ public Transport,
+ public rtc::VideoSinkInterface<VideoFrame> {
+ public:
+ VideoAnalyzer(test::LayerFilteringTransport* transport,
+ const std::string& test_label,
+ double avg_psnr_threshold,
+ double avg_ssim_threshold,
+ int duration_frames,
+ TimeDelta test_duration,
+ FILE* graph_data_output_file,
+ const std::string& graph_title,
+ uint32_t ssrc_to_analyze,
+ uint32_t rtx_ssrc_to_analyze,
+ size_t selected_stream,
+ int selected_sl,
+ int selected_tl,
+ bool is_quick_test_enabled,
+ Clock* clock,
+ std::string rtp_dump_name,
+ TaskQueueBase* task_queue);
+ ~VideoAnalyzer();
+
+ virtual void SetReceiver(PacketReceiver* receiver);
+ void SetSource(rtc::VideoSourceInterface<VideoFrame>* video_source,
+ bool respect_sink_wants);
+ void SetCall(Call* call);
+ void SetSendStream(VideoSendStream* stream);
+ void SetReceiveStream(VideoReceiveStreamInterface* stream);
+ void SetAudioReceiveStream(AudioReceiveStreamInterface* recv_stream);
+
+ rtc::VideoSinkInterface<VideoFrame>* InputInterface();
+ rtc::VideoSourceInterface<VideoFrame>* OutputInterface();
+
+ void DeliverRtcpPacket(rtc::CopyOnWriteBuffer packet) override;
+ void DeliverRtpPacket(MediaType media_type,
+ RtpPacketReceived packet,
+ PacketReceiver::OnUndemuxablePacketHandler
+ undemuxable_packet_handler) override;
+
+ void PreEncodeOnFrame(const VideoFrame& video_frame);
+ void PostEncodeOnFrame(size_t stream_id, uint32_t timestamp);
+
+ bool SendRtp(const uint8_t* packet,
+ size_t length,
+ const PacketOptions& options) override;
+
+ bool SendRtcp(const uint8_t* packet, size_t length) override;
+ void OnFrame(const VideoFrame& video_frame) override;
+ void Wait();
+
+ void StartMeasuringCpuProcessTime();
+ void StopMeasuringCpuProcessTime();
+ void StartExcludingCpuThreadTime() RTC_LOCKS_EXCLUDED(cpu_measurement_lock_);
+ void StopExcludingCpuThreadTime() RTC_LOCKS_EXCLUDED(cpu_measurement_lock_);
+ double GetCpuUsagePercent() RTC_LOCKS_EXCLUDED(cpu_measurement_lock_);
+
+ test::LayerFilteringTransport* const transport_;
+ PacketReceiver* receiver_;
+
+ private:
+ struct FrameComparison {
+ FrameComparison();
+ FrameComparison(const VideoFrame& reference,
+ const VideoFrame& render,
+ bool dropped,
+ int64_t input_time_ms,
+ int64_t send_time_ms,
+ int64_t recv_time_ms,
+ int64_t render_time_ms,
+ size_t encoded_frame_size);
+ FrameComparison(bool dropped,
+ int64_t input_time_ms,
+ int64_t send_time_ms,
+ int64_t recv_time_ms,
+ int64_t render_time_ms,
+ size_t encoded_frame_size);
+
+ absl::optional<VideoFrame> reference;
+ absl::optional<VideoFrame> render;
+ bool dropped;
+ int64_t input_time_ms;
+ int64_t send_time_ms;
+ int64_t recv_time_ms;
+ int64_t render_time_ms;
+ size_t encoded_frame_size;
+ };
+
+ struct Sample {
+ Sample(int dropped,
+ int64_t input_time_ms,
+ int64_t send_time_ms,
+ int64_t recv_time_ms,
+ int64_t render_time_ms,
+ size_t encoded_frame_size,
+ double psnr,
+ double ssim);
+
+ int dropped;
+ int64_t input_time_ms;
+ int64_t send_time_ms;
+ int64_t recv_time_ms;
+ int64_t render_time_ms;
+ size_t encoded_frame_size;
+ double psnr;
+ double ssim;
+ };
+
+ // Implements VideoSinkInterface to receive captured frames from a
+ // FrameGeneratorCapturer. Implements VideoSourceInterface to be able to act
+ // as a source to VideoSendStream.
+ // It forwards all input frames to the VideoAnalyzer for later comparison and
+ // forwards the captured frames to the VideoSendStream.
+ class CapturedFrameForwarder : public rtc::VideoSinkInterface<VideoFrame>,
+ public rtc::VideoSourceInterface<VideoFrame> {
+ public:
+ CapturedFrameForwarder(VideoAnalyzer* analyzer,
+ Clock* clock,
+ int frames_to_capture,
+ TimeDelta test_duration);
+ void SetSource(rtc::VideoSourceInterface<VideoFrame>* video_source);
+
+ private:
+ void OnFrame(const VideoFrame& video_frame)
+ RTC_LOCKS_EXCLUDED(lock_) override;
+
+ // Called when `send_stream_.SetSource()` is called.
+ void AddOrUpdateSink(rtc::VideoSinkInterface<VideoFrame>* sink,
+ const rtc::VideoSinkWants& wants)
+ RTC_LOCKS_EXCLUDED(lock_) override;
+
+ // Called by `send_stream_` when `send_stream_.SetSource()` is called.
+ void RemoveSink(rtc::VideoSinkInterface<VideoFrame>* sink)
+ RTC_LOCKS_EXCLUDED(lock_) override;
+
+ VideoAnalyzer* const analyzer_;
+ Mutex lock_;
+ rtc::VideoSinkInterface<VideoFrame>* send_stream_input_
+ RTC_GUARDED_BY(lock_);
+ VideoSourceInterface<VideoFrame>* video_source_;
+ Clock* clock_;
+ int captured_frames_ RTC_GUARDED_BY(lock_);
+ const int frames_to_capture_;
+ const Timestamp test_end_;
+ };
+
+ struct FrameWithPsnr {
+ double psnr;
+ VideoFrame frame;
+ };
+
+ bool IsInSelectedSpatialAndTemporalLayer(const RtpPacket& rtp_packet);
+
+ void AddFrameComparison(const VideoFrame& reference,
+ const VideoFrame& render,
+ bool dropped,
+ int64_t render_time_ms)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(lock_);
+
+ void PollStats() RTC_LOCKS_EXCLUDED(comparison_lock_);
+ static void FrameComparisonThread(void* obj);
+ bool CompareFrames();
+ bool PopComparison(FrameComparison* comparison);
+ // Increment counter for number of frames received for comparison.
+ void FrameRecorded() RTC_EXCLUSIVE_LOCKS_REQUIRED(comparison_lock_);
+ // Returns true if all frames to be compared have been taken from the queue.
+ bool AllFramesRecorded() RTC_LOCKS_EXCLUDED(comparison_lock_);
+ bool AllFramesRecordedLocked() RTC_EXCLUSIVE_LOCKS_REQUIRED(comparison_lock_);
+ // Increase count of number of frames processed. Returns true if this was the
+ // last frame to be processed.
+ bool FrameProcessed() RTC_LOCKS_EXCLUDED(comparison_lock_);
+ void PrintResults() RTC_LOCKS_EXCLUDED(lock_, comparison_lock_);
+ void PerformFrameComparison(const FrameComparison& comparison)
+ RTC_LOCKS_EXCLUDED(comparison_lock_);
+ void PrintResult(absl::string_view result_type,
+ const SamplesStatsCounter& stats,
+ webrtc::test::Unit unit,
+ webrtc::test::ImprovementDirection improvement_direction);
+ void PrintResultWithExternalMean(
+ absl::string_view result_type,
+ double mean,
+ const SamplesStatsCounter& stats,
+ webrtc::test::Unit unit,
+ webrtc::test::ImprovementDirection improvement_direction);
+ void PrintSamplesToFile(void) RTC_LOCKS_EXCLUDED(comparison_lock_);
+ void AddCapturedFrameForComparison(const VideoFrame& video_frame)
+ RTC_LOCKS_EXCLUDED(lock_, comparison_lock_);
+
+ Call* call_;
+ VideoSendStream* send_stream_;
+ VideoReceiveStreamInterface* receive_stream_;
+ AudioReceiveStreamInterface* audio_receive_stream_;
+ CapturedFrameForwarder captured_frame_forwarder_;
+ const std::string test_label_;
+ FILE* const graph_data_output_file_;
+ const std::string graph_title_;
+ const uint32_t ssrc_to_analyze_;
+ const uint32_t rtx_ssrc_to_analyze_;
+ const size_t selected_stream_;
+ const int selected_sl_;
+ const int selected_tl_;
+
+ Mutex comparison_lock_;
+ std::vector<Sample> samples_ RTC_GUARDED_BY(comparison_lock_);
+ SamplesStatsCounter sender_time_ RTC_GUARDED_BY(comparison_lock_);
+ SamplesStatsCounter receiver_time_ RTC_GUARDED_BY(comparison_lock_);
+ SamplesStatsCounter network_time_ RTC_GUARDED_BY(comparison_lock_);
+ SamplesStatsCounter psnr_ RTC_GUARDED_BY(comparison_lock_);
+ SamplesStatsCounter ssim_ RTC_GUARDED_BY(comparison_lock_);
+ SamplesStatsCounter end_to_end_ RTC_GUARDED_BY(comparison_lock_);
+ SamplesStatsCounter rendered_delta_ RTC_GUARDED_BY(comparison_lock_);
+ SamplesStatsCounter encoded_frame_size_ RTC_GUARDED_BY(comparison_lock_);
+ SamplesStatsCounter encode_frame_rate_ RTC_GUARDED_BY(comparison_lock_);
+ SamplesStatsCounter encode_time_ms_ RTC_GUARDED_BY(comparison_lock_);
+ SamplesStatsCounter encode_usage_percent_ RTC_GUARDED_BY(comparison_lock_);
+ double mean_decode_time_ms_ RTC_GUARDED_BY(comparison_lock_);
+ SamplesStatsCounter decode_time_ms_ RTC_GUARDED_BY(comparison_lock_);
+ SamplesStatsCounter decode_time_max_ms_ RTC_GUARDED_BY(comparison_lock_);
+ SamplesStatsCounter media_bitrate_bps_ RTC_GUARDED_BY(comparison_lock_);
+ SamplesStatsCounter fec_bitrate_bps_ RTC_GUARDED_BY(comparison_lock_);
+ SamplesStatsCounter send_bandwidth_bps_ RTC_GUARDED_BY(comparison_lock_);
+ SamplesStatsCounter memory_usage_ RTC_GUARDED_BY(comparison_lock_);
+ SamplesStatsCounter audio_expand_rate_ RTC_GUARDED_BY(comparison_lock_);
+ SamplesStatsCounter audio_accelerate_rate_ RTC_GUARDED_BY(comparison_lock_);
+ SamplesStatsCounter audio_jitter_buffer_ms_ RTC_GUARDED_BY(comparison_lock_);
+ SamplesStatsCounter pixels_ RTC_GUARDED_BY(comparison_lock_);
+ // Rendered frame with worst PSNR is saved for further analysis.
+ absl::optional<FrameWithPsnr> worst_frame_ RTC_GUARDED_BY(comparison_lock_);
+ // Freeze metrics.
+ SamplesStatsCounter time_between_freezes_ RTC_GUARDED_BY(comparison_lock_);
+ uint32_t freeze_count_ RTC_GUARDED_BY(comparison_lock_);
+ uint32_t total_freezes_duration_ms_ RTC_GUARDED_BY(comparison_lock_);
+ double total_inter_frame_delay_ RTC_GUARDED_BY(comparison_lock_);
+ double total_squared_inter_frame_delay_ RTC_GUARDED_BY(comparison_lock_);
+
+ double decode_frame_rate_ RTC_GUARDED_BY(comparison_lock_);
+ double render_frame_rate_ RTC_GUARDED_BY(comparison_lock_);
+
+ size_t last_fec_bytes_;
+
+ Mutex lock_ RTC_ACQUIRED_BEFORE(comparison_lock_)
+ RTC_ACQUIRED_BEFORE(cpu_measurement_lock_);
+ const int frames_to_process_;
+ const Timestamp test_end_;
+ int frames_recorded_ RTC_GUARDED_BY(comparison_lock_);
+ int frames_processed_ RTC_GUARDED_BY(comparison_lock_);
+ int captured_frames_ RTC_GUARDED_BY(comparison_lock_);
+ int dropped_frames_ RTC_GUARDED_BY(comparison_lock_);
+ int dropped_frames_before_first_encode_ RTC_GUARDED_BY(lock_);
+ int dropped_frames_before_rendering_ RTC_GUARDED_BY(lock_);
+ int64_t last_render_time_ RTC_GUARDED_BY(comparison_lock_);
+ int64_t last_render_delta_ms_ RTC_GUARDED_BY(comparison_lock_);
+ int64_t last_unfreeze_time_ms_ RTC_GUARDED_BY(comparison_lock_);
+ uint32_t rtp_timestamp_delta_ RTC_GUARDED_BY(lock_);
+
+ Mutex cpu_measurement_lock_;
+ int64_t cpu_time_ RTC_GUARDED_BY(cpu_measurement_lock_);
+ int64_t wallclock_time_ RTC_GUARDED_BY(cpu_measurement_lock_);
+
+ std::deque<VideoFrame> frames_ RTC_GUARDED_BY(lock_);
+ absl::optional<VideoFrame> last_rendered_frame_ RTC_GUARDED_BY(lock_);
+ RtpTimestampUnwrapper wrap_handler_ RTC_GUARDED_BY(lock_);
+ std::map<int64_t, int64_t> send_times_ RTC_GUARDED_BY(lock_);
+ std::map<int64_t, int64_t> recv_times_ RTC_GUARDED_BY(lock_);
+ std::map<int64_t, size_t> encoded_frame_sizes_ RTC_GUARDED_BY(lock_);
+ absl::optional<uint32_t> first_encoded_timestamp_ RTC_GUARDED_BY(lock_);
+ absl::optional<uint32_t> first_sent_timestamp_ RTC_GUARDED_BY(lock_);
+ const double avg_psnr_threshold_;
+ const double avg_ssim_threshold_;
+ bool is_quick_test_enabled_;
+
+ std::vector<rtc::PlatformThread> comparison_thread_pool_;
+ rtc::Event comparison_available_event_;
+ std::deque<FrameComparison> comparisons_ RTC_GUARDED_BY(comparison_lock_);
+ bool quit_ RTC_GUARDED_BY(comparison_lock_);
+ rtc::Event done_;
+
+ std::unique_ptr<VideoRtpDepacketizer> vp8_depacketizer_;
+ std::unique_ptr<VideoRtpDepacketizer> vp9_depacketizer_;
+ std::unique_ptr<test::RtpFileWriter> rtp_file_writer_;
+ Clock* const clock_;
+ const int64_t start_ms_;
+ TaskQueueBase* task_queue_;
+};
+
+} // namespace webrtc
+#endif // VIDEO_VIDEO_ANALYZER_H_
diff --git a/third_party/libwebrtc/video/video_gn/moz.build b/third_party/libwebrtc/video/video_gn/moz.build
new file mode 100644
index 0000000000..a0f2d5b6ef
--- /dev/null
+++ b/third_party/libwebrtc/video/video_gn/moz.build
@@ -0,0 +1,255 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+DEFINES["WEBRTC_STRICT_FIELD_TRIALS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "!/third_party/libwebrtc/gen",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+SOURCES += [
+ "/third_party/libwebrtc/video/rtp_video_stream_receiver2.cc"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/video/buffered_frame_decryptor.cc",
+ "/third_party/libwebrtc/video/call_stats2.cc",
+ "/third_party/libwebrtc/video/encoder_rtcp_feedback.cc",
+ "/third_party/libwebrtc/video/quality_limitation_reason_tracker.cc",
+ "/third_party/libwebrtc/video/quality_threshold.cc",
+ "/third_party/libwebrtc/video/receive_statistics_proxy2.cc",
+ "/third_party/libwebrtc/video/report_block_stats.cc",
+ "/third_party/libwebrtc/video/rtp_streams_synchronizer2.cc",
+ "/third_party/libwebrtc/video/rtp_video_stream_receiver_frame_transformer_delegate.cc",
+ "/third_party/libwebrtc/video/send_delay_stats.cc",
+ "/third_party/libwebrtc/video/send_statistics_proxy.cc",
+ "/third_party/libwebrtc/video/stats_counter.cc",
+ "/third_party/libwebrtc/video/stream_synchronization.cc",
+ "/third_party/libwebrtc/video/transport_adapter.cc",
+ "/third_party/libwebrtc/video/video_quality_observer2.cc",
+ "/third_party/libwebrtc/video/video_receive_stream2.cc",
+ "/third_party/libwebrtc/video/video_send_stream.cc",
+ "/third_party/libwebrtc/video/video_send_stream_impl.cc",
+ "/third_party/libwebrtc/video/video_stream_decoder2.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "GLESv2",
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "crypt32",
+ "iphlpapi",
+ "secur32",
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "mips32":
+
+ DEFINES["MIPS32_LE"] = True
+ DEFINES["MIPS_FPU_LE"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "mips64":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("video_gn")
diff --git a/third_party/libwebrtc/video/video_loopback.cc b/third_party/libwebrtc/video/video_loopback.cc
new file mode 100644
index 0000000000..ba0a0e5745
--- /dev/null
+++ b/third_party/libwebrtc/video/video_loopback.cc
@@ -0,0 +1,456 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include "video/video_loopback.h"
+
+#include <stdio.h>
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "absl/flags/flag.h"
+#include "absl/flags/parse.h"
+#include "absl/types/optional.h"
+#include "api/test/simulated_network.h"
+#include "api/test/video_quality_test_fixture.h"
+#include "api/transport/bitrate_settings.h"
+#include "api/video_codecs/video_codec.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "system_wrappers/include/field_trial.h"
+#include "test/field_trial.h"
+#include "test/gtest.h"
+#include "test/run_test.h"
+#include "video/video_quality_test.h"
+
+// Flags common with screenshare loopback, with different default values.
+ABSL_FLAG(int, width, 640, "Video width.");
+
+ABSL_FLAG(int, height, 480, "Video height.");
+
+ABSL_FLAG(int, fps, 30, "Frames per second.");
+
+ABSL_FLAG(int, capture_device_index, 0, "Capture device to select");
+
+ABSL_FLAG(int, min_bitrate, 50, "Call and stream min bitrate in kbps.");
+
+ABSL_FLAG(int, start_bitrate, 300, "Call start bitrate in kbps.");
+
+ABSL_FLAG(int, target_bitrate, 800, "Stream target bitrate in kbps.");
+
+ABSL_FLAG(int, max_bitrate, 800, "Call and stream max bitrate in kbps.");
+
+ABSL_FLAG(bool,
+ suspend_below_min_bitrate,
+ false,
+ "Suspends video below the configured min bitrate.");
+
+ABSL_FLAG(int,
+ num_temporal_layers,
+ 1,
+ "Number of temporal layers. Set to 1-4 to override.");
+
+ABSL_FLAG(int,
+ inter_layer_pred,
+ 2,
+ "Inter-layer prediction mode. "
+ "0 - enabled, 1 - disabled, 2 - enabled only for key pictures.");
+
+// Flags common with screenshare loopback, with equal default values.
+ABSL_FLAG(std::string, codec, "VP8", "Video codec to use.");
+
+ABSL_FLAG(int,
+ selected_tl,
+ -1,
+ "Temporal layer to show or analyze. -1 to disable filtering.");
+
+ABSL_FLAG(
+ int,
+ duration,
+ 0,
+ "Duration of the test in seconds. If 0, rendered will be shown instead.");
+
+ABSL_FLAG(std::string, output_filename, "", "Target graph data filename.");
+
+ABSL_FLAG(std::string,
+ graph_title,
+ "",
+ "If empty, title will be generated automatically.");
+
+ABSL_FLAG(int, loss_percent, 0, "Percentage of packets randomly lost.");
+
+ABSL_FLAG(int,
+ avg_burst_loss_length,
+ -1,
+ "Average burst length of lost packets.");
+
+ABSL_FLAG(int,
+ link_capacity,
+ 0,
+ "Capacity (kbps) of the fake link. 0 means infinite.");
+
+ABSL_FLAG(int, queue_size, 0, "Size of the bottleneck link queue in packets.");
+
+ABSL_FLAG(int,
+ avg_propagation_delay_ms,
+ 0,
+ "Average link propagation delay in ms.");
+
+ABSL_FLAG(std::string,
+ rtc_event_log_name,
+ "",
+ "Filename for rtc event log. Two files "
+ "with \"_send\" and \"_recv\" suffixes will be created.");
+
+ABSL_FLAG(std::string,
+ rtp_dump_name,
+ "",
+ "Filename for dumped received RTP stream.");
+
+ABSL_FLAG(int,
+ std_propagation_delay_ms,
+ 0,
+ "Link propagation delay standard deviation in ms.");
+
+ABSL_FLAG(int, num_streams, 0, "Number of streams to show or analyze.");
+
+ABSL_FLAG(int,
+ selected_stream,
+ 0,
+ "ID of the stream to show or analyze. "
+ "Set to the number of streams to show them all.");
+
+ABSL_FLAG(int, num_spatial_layers, 1, "Number of spatial layers to use.");
+
+ABSL_FLAG(int,
+ selected_sl,
+ -1,
+ "Spatial layer to show or analyze. -1 to disable filtering.");
+
+ABSL_FLAG(std::string,
+ stream0,
+ "",
+ "Comma separated values describing VideoStream for stream #0.");
+
+ABSL_FLAG(std::string,
+ stream1,
+ "",
+ "Comma separated values describing VideoStream for stream #1.");
+
+ABSL_FLAG(std::string,
+ sl0,
+ "",
+ "Comma separated values describing SpatialLayer for layer #0.");
+
+ABSL_FLAG(std::string,
+ sl1,
+ "",
+ "Comma separated values describing SpatialLayer for layer #1.");
+
+ABSL_FLAG(std::string,
+ sl2,
+ "",
+ "Comma separated values describing SpatialLayer for layer #2.");
+
+ABSL_FLAG(std::string,
+ encoded_frame_path,
+ "",
+ "The base path for encoded frame logs. Created files will have "
+ "the form <encoded_frame_path>.<n>.(recv|send.<m>).ivf");
+
+ABSL_FLAG(bool, logs, false, "print logs to stderr");
+
+ABSL_FLAG(bool, send_side_bwe, true, "Use send-side bandwidth estimation");
+
+ABSL_FLAG(bool, generic_descriptor, false, "Use the generic frame descriptor.");
+
+ABSL_FLAG(bool, dependency_descriptor, false, "Use the dependency descriptor.");
+
+ABSL_FLAG(bool, allow_reordering, false, "Allow packet reordering to occur");
+
+ABSL_FLAG(bool, use_ulpfec, false, "Use RED+ULPFEC forward error correction.");
+
+ABSL_FLAG(bool, use_flexfec, false, "Use FlexFEC forward error correction.");
+
+ABSL_FLAG(bool, audio, false, "Add audio stream");
+
+ABSL_FLAG(bool,
+ use_real_adm,
+ false,
+ "Use real ADM instead of fake (no effect if audio is false)");
+
+ABSL_FLAG(bool,
+ audio_video_sync,
+ false,
+ "Sync audio and video stream (no effect if"
+ " audio is false)");
+
+ABSL_FLAG(bool,
+ audio_dtx,
+ false,
+ "Enable audio DTX (no effect if audio is false)");
+
+ABSL_FLAG(bool, video, true, "Add video stream");
+
+ABSL_FLAG(
+ std::string,
+ force_fieldtrials,
+ "",
+ "Field trials control experimental feature code which can be forced. "
+ "E.g. running with --force_fieldtrials=WebRTC-FooFeature/Enabled/"
+ " will assign the group Enable to field trial WebRTC-FooFeature. Multiple "
+ "trials are separated by \"/\"");
+
+// Video-specific flags.
+ABSL_FLAG(std::string,
+ clip,
+ "",
+ "Name of the clip to show. If empty, using chroma generator.");
+
+namespace webrtc {
+namespace {
+
+size_t Width() {
+ return static_cast<size_t>(absl::GetFlag(FLAGS_width));
+}
+
+size_t Height() {
+ return static_cast<size_t>(absl::GetFlag(FLAGS_height));
+}
+
+int Fps() {
+ return absl::GetFlag(FLAGS_fps);
+}
+
+size_t GetCaptureDevice() {
+ return static_cast<size_t>(absl::GetFlag(FLAGS_capture_device_index));
+}
+
+int MinBitrateKbps() {
+ return absl::GetFlag(FLAGS_min_bitrate);
+}
+
+int StartBitrateKbps() {
+ return absl::GetFlag(FLAGS_start_bitrate);
+}
+
+int TargetBitrateKbps() {
+ return absl::GetFlag(FLAGS_target_bitrate);
+}
+
+int MaxBitrateKbps() {
+ return absl::GetFlag(FLAGS_max_bitrate);
+}
+
+int NumTemporalLayers() {
+ return absl::GetFlag(FLAGS_num_temporal_layers);
+}
+
+InterLayerPredMode InterLayerPred() {
+ if (absl::GetFlag(FLAGS_inter_layer_pred) == 0) {
+ return InterLayerPredMode::kOn;
+ } else if (absl::GetFlag(FLAGS_inter_layer_pred) == 1) {
+ return InterLayerPredMode::kOff;
+ } else {
+ RTC_DCHECK_EQ(absl::GetFlag(FLAGS_inter_layer_pred), 2);
+ return InterLayerPredMode::kOnKeyPic;
+ }
+}
+
+std::string Codec() {
+ return absl::GetFlag(FLAGS_codec);
+}
+
+int SelectedTL() {
+ return absl::GetFlag(FLAGS_selected_tl);
+}
+
+int DurationSecs() {
+ return absl::GetFlag(FLAGS_duration);
+}
+
+std::string OutputFilename() {
+ return absl::GetFlag(FLAGS_output_filename);
+}
+
+std::string GraphTitle() {
+ return absl::GetFlag(FLAGS_graph_title);
+}
+
+int LossPercent() {
+ return static_cast<int>(absl::GetFlag(FLAGS_loss_percent));
+}
+
+int AvgBurstLossLength() {
+ return static_cast<int>(absl::GetFlag(FLAGS_avg_burst_loss_length));
+}
+
+int LinkCapacityKbps() {
+ return static_cast<int>(absl::GetFlag(FLAGS_link_capacity));
+}
+
+int QueueSize() {
+ return static_cast<int>(absl::GetFlag(FLAGS_queue_size));
+}
+
+int AvgPropagationDelayMs() {
+ return static_cast<int>(absl::GetFlag(FLAGS_avg_propagation_delay_ms));
+}
+
+std::string RtcEventLogName() {
+ return absl::GetFlag(FLAGS_rtc_event_log_name);
+}
+
+std::string RtpDumpName() {
+ return absl::GetFlag(FLAGS_rtp_dump_name);
+}
+
+int StdPropagationDelayMs() {
+ return absl::GetFlag(FLAGS_std_propagation_delay_ms);
+}
+
+int NumStreams() {
+ return absl::GetFlag(FLAGS_num_streams);
+}
+
+int SelectedStream() {
+ return absl::GetFlag(FLAGS_selected_stream);
+}
+
+int NumSpatialLayers() {
+ return absl::GetFlag(FLAGS_num_spatial_layers);
+}
+
+int SelectedSL() {
+ return absl::GetFlag(FLAGS_selected_sl);
+}
+
+std::string Stream0() {
+ return absl::GetFlag(FLAGS_stream0);
+}
+
+std::string Stream1() {
+ return absl::GetFlag(FLAGS_stream1);
+}
+
+std::string SL0() {
+ return absl::GetFlag(FLAGS_sl0);
+}
+
+std::string SL1() {
+ return absl::GetFlag(FLAGS_sl1);
+}
+
+std::string SL2() {
+ return absl::GetFlag(FLAGS_sl2);
+}
+
+std::string EncodedFramePath() {
+ return absl::GetFlag(FLAGS_encoded_frame_path);
+}
+
+std::string Clip() {
+ return absl::GetFlag(FLAGS_clip);
+}
+
+} // namespace
+
+void Loopback() {
+ BuiltInNetworkBehaviorConfig pipe_config;
+ pipe_config.loss_percent = LossPercent();
+ pipe_config.avg_burst_loss_length = AvgBurstLossLength();
+ pipe_config.link_capacity_kbps = LinkCapacityKbps();
+ pipe_config.queue_length_packets = QueueSize();
+ pipe_config.queue_delay_ms = AvgPropagationDelayMs();
+ pipe_config.delay_standard_deviation_ms = StdPropagationDelayMs();
+ pipe_config.allow_reordering = absl::GetFlag(FLAGS_allow_reordering);
+
+ BitrateConstraints call_bitrate_config;
+ call_bitrate_config.min_bitrate_bps = MinBitrateKbps() * 1000;
+ call_bitrate_config.start_bitrate_bps = StartBitrateKbps() * 1000;
+ call_bitrate_config.max_bitrate_bps = -1; // Don't cap bandwidth estimate.
+
+ VideoQualityTest::Params params;
+ params.call.send_side_bwe = absl::GetFlag(FLAGS_send_side_bwe);
+ params.call.generic_descriptor = absl::GetFlag(FLAGS_generic_descriptor);
+ params.call.dependency_descriptor =
+ absl::GetFlag(FLAGS_dependency_descriptor);
+ params.call.call_bitrate_config = call_bitrate_config;
+
+ params.video[0].enabled = absl::GetFlag(FLAGS_video);
+ params.video[0].width = Width();
+ params.video[0].height = Height();
+ params.video[0].fps = Fps();
+ params.video[0].min_bitrate_bps = MinBitrateKbps() * 1000;
+ params.video[0].target_bitrate_bps = TargetBitrateKbps() * 1000;
+ params.video[0].max_bitrate_bps = MaxBitrateKbps() * 1000;
+ params.video[0].suspend_below_min_bitrate =
+ absl::GetFlag(FLAGS_suspend_below_min_bitrate);
+ params.video[0].codec = Codec();
+ params.video[0].num_temporal_layers = NumTemporalLayers();
+ params.video[0].selected_tl = SelectedTL();
+ params.video[0].min_transmit_bps = 0;
+ params.video[0].ulpfec = absl::GetFlag(FLAGS_use_ulpfec);
+ params.video[0].flexfec = absl::GetFlag(FLAGS_use_flexfec);
+ params.video[0].automatic_scaling = NumStreams() < 2;
+ params.video[0].clip_path = Clip();
+ params.video[0].capture_device_index = GetCaptureDevice();
+ params.audio.enabled = absl::GetFlag(FLAGS_audio);
+ params.audio.sync_video = absl::GetFlag(FLAGS_audio_video_sync);
+ params.audio.dtx = absl::GetFlag(FLAGS_audio_dtx);
+ params.audio.use_real_adm = absl::GetFlag(FLAGS_use_real_adm);
+ params.logging.rtc_event_log_name = RtcEventLogName();
+ params.logging.rtp_dump_name = RtpDumpName();
+ params.logging.encoded_frame_base_path = EncodedFramePath();
+ params.screenshare[0].enabled = false;
+ params.analyzer.test_label = "video";
+ params.analyzer.test_durations_secs = DurationSecs();
+ params.analyzer.graph_data_output_filename = OutputFilename();
+ params.analyzer.graph_title = GraphTitle();
+ params.config = pipe_config;
+
+ if (NumStreams() > 1 && Stream0().empty() && Stream1().empty()) {
+ params.ss[0].infer_streams = true;
+ }
+
+ std::vector<std::string> stream_descriptors;
+ stream_descriptors.push_back(Stream0());
+ stream_descriptors.push_back(Stream1());
+ std::vector<std::string> SL_descriptors;
+ SL_descriptors.push_back(SL0());
+ SL_descriptors.push_back(SL1());
+ SL_descriptors.push_back(SL2());
+ VideoQualityTest::FillScalabilitySettings(
+ &params, 0, stream_descriptors, NumStreams(), SelectedStream(),
+ NumSpatialLayers(), SelectedSL(), InterLayerPred(), SL_descriptors);
+
+ auto fixture = std::make_unique<VideoQualityTest>(nullptr);
+ if (DurationSecs()) {
+ fixture->RunWithAnalyzer(params);
+ } else {
+ fixture->RunWithRenderers(params);
+ }
+}
+
+int RunLoopbackTest(int argc, char* argv[]) {
+ ::testing::InitGoogleTest(&argc, argv);
+ absl::ParseCommandLine(argc, argv);
+
+ rtc::LogMessage::SetLogToStderr(absl::GetFlag(FLAGS_logs));
+
+ // InitFieldTrialsFromString stores the char*, so the char array must outlive
+ // the application.
+ const std::string field_trials = absl::GetFlag(FLAGS_force_fieldtrials);
+ webrtc::field_trial::InitFieldTrialsFromString(field_trials.c_str());
+
+ webrtc::test::RunTest(webrtc::Loopback);
+ return 0;
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/video/video_loopback.h b/third_party/libwebrtc/video/video_loopback.h
new file mode 100644
index 0000000000..51c7707640
--- /dev/null
+++ b/third_party/libwebrtc/video/video_loopback.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VIDEO_VIDEO_LOOPBACK_H_
+#define VIDEO_VIDEO_LOOPBACK_H_
+
+namespace webrtc {
+// Expose the main test method.
+int RunLoopbackTest(int argc, char* argv[]);
+} // namespace webrtc
+
+#endif // VIDEO_VIDEO_LOOPBACK_H_
diff --git a/third_party/libwebrtc/video/video_loopback_main.cc b/third_party/libwebrtc/video/video_loopback_main.cc
new file mode 100644
index 0000000000..f4e5cdd8a5
--- /dev/null
+++ b/third_party/libwebrtc/video/video_loopback_main.cc
@@ -0,0 +1,15 @@
+/*
+ * Copyright 2019 The WebRTC Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video/video_loopback.h"
+
+int main(int argc, char* argv[]) {
+ webrtc::RunLoopbackTest(argc, argv);
+}
diff --git a/third_party/libwebrtc/video/video_loopback_main.mm b/third_party/libwebrtc/video/video_loopback_main.mm
new file mode 100644
index 0000000000..61b47a54da
--- /dev/null
+++ b/third_party/libwebrtc/video/video_loopback_main.mm
@@ -0,0 +1,17 @@
+/*
+ * Copyright 2019 The WebRTC Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#import "video/video_loopback.h"
+
+int main(int argc, char* argv[]) {
+ @autoreleasepool {
+ webrtc::RunLoopbackTest(argc, argv);
+ }
+}
diff --git a/third_party/libwebrtc/video/video_quality_observer2.cc b/third_party/libwebrtc/video/video_quality_observer2.cc
new file mode 100644
index 0000000000..0afc2f5235
--- /dev/null
+++ b/third_party/libwebrtc/video/video_quality_observer2.cc
@@ -0,0 +1,296 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video/video_quality_observer2.h"
+
+#include <algorithm>
+#include <cmath>
+#include <cstdint>
+#include <string>
+
+#include "rtc_base/logging.h"
+#include "rtc_base/strings/string_builder.h"
+#include "system_wrappers/include/metrics.h"
+#include "video/video_receive_stream2.h"
+
+namespace webrtc {
+namespace internal {
+const uint32_t VideoQualityObserver::kMinFrameSamplesToDetectFreeze = 5;
+const uint32_t VideoQualityObserver::kMinIncreaseForFreezeMs = 150;
+const uint32_t VideoQualityObserver::kAvgInterframeDelaysWindowSizeFrames = 30;
+
+namespace {
+constexpr int kMinVideoDurationMs = 3000;
+constexpr int kMinRequiredSamples = 1;
+constexpr int kPixelsInHighResolution =
+ 960 * 540; // CPU-adapted HD still counts.
+constexpr int kPixelsInMediumResolution = 640 * 360;
+constexpr int kBlockyQpThresholdVp8 = 70;
+constexpr int kBlockyQpThresholdVp9 = 180;
+constexpr int kMaxNumCachedBlockyFrames = 100;
+// TODO(ilnik): Add H264/HEVC thresholds.
+} // namespace
+
+VideoQualityObserver::VideoQualityObserver()
+ : last_frame_rendered_ms_(-1),
+ num_frames_rendered_(0),
+ first_frame_rendered_ms_(-1),
+ last_frame_pixels_(0),
+ is_last_frame_blocky_(false),
+ last_unfreeze_time_ms_(0),
+ render_interframe_delays_(kAvgInterframeDelaysWindowSizeFrames),
+ sum_squared_interframe_delays_secs_(0.0),
+ time_in_resolution_ms_(3, 0),
+ current_resolution_(Resolution::Low),
+ num_resolution_downgrades_(0),
+ time_in_blocky_video_ms_(0),
+ is_paused_(false) {}
+
+void VideoQualityObserver::UpdateHistograms(bool screenshare) {
+ // TODO(bugs.webrtc.org/11489): Called on the decoder thread - which _might_
+ // be the same as the construction thread.
+
+ // Don't report anything on an empty video stream.
+ if (num_frames_rendered_ == 0) {
+ return;
+ }
+
+ char log_stream_buf[2 * 1024];
+ rtc::SimpleStringBuilder log_stream(log_stream_buf);
+
+ if (last_frame_rendered_ms_ > last_unfreeze_time_ms_) {
+ smooth_playback_durations_.Add(last_frame_rendered_ms_ -
+ last_unfreeze_time_ms_);
+ }
+
+ std::string uma_prefix =
+ screenshare ? "WebRTC.Video.Screenshare" : "WebRTC.Video";
+
+ auto mean_time_between_freezes =
+ smooth_playback_durations_.Avg(kMinRequiredSamples);
+ if (mean_time_between_freezes) {
+ RTC_HISTOGRAM_COUNTS_SPARSE_100000(uma_prefix + ".MeanTimeBetweenFreezesMs",
+ *mean_time_between_freezes);
+ log_stream << uma_prefix << ".MeanTimeBetweenFreezesMs "
+ << *mean_time_between_freezes << "\n";
+ }
+ auto avg_freeze_length = freezes_durations_.Avg(kMinRequiredSamples);
+ if (avg_freeze_length) {
+ RTC_HISTOGRAM_COUNTS_SPARSE_100000(uma_prefix + ".MeanFreezeDurationMs",
+ *avg_freeze_length);
+ log_stream << uma_prefix << ".MeanFreezeDurationMs " << *avg_freeze_length
+ << "\n";
+ }
+
+ int64_t video_duration_ms =
+ last_frame_rendered_ms_ - first_frame_rendered_ms_;
+
+ if (video_duration_ms >= kMinVideoDurationMs) {
+ int time_spent_in_hd_percentage = static_cast<int>(
+ time_in_resolution_ms_[Resolution::High] * 100 / video_duration_ms);
+ RTC_HISTOGRAM_COUNTS_SPARSE_100(uma_prefix + ".TimeInHdPercentage",
+ time_spent_in_hd_percentage);
+ log_stream << uma_prefix << ".TimeInHdPercentage "
+ << time_spent_in_hd_percentage << "\n";
+
+ int time_with_blocky_video_percentage =
+ static_cast<int>(time_in_blocky_video_ms_ * 100 / video_duration_ms);
+ RTC_HISTOGRAM_COUNTS_SPARSE_100(uma_prefix + ".TimeInBlockyVideoPercentage",
+ time_with_blocky_video_percentage);
+ log_stream << uma_prefix << ".TimeInBlockyVideoPercentage "
+ << time_with_blocky_video_percentage << "\n";
+
+ int num_resolution_downgrades_per_minute =
+ num_resolution_downgrades_ * 60000 / video_duration_ms;
+ if (!screenshare) {
+ RTC_HISTOGRAM_COUNTS_SPARSE_100(
+ uma_prefix + ".NumberResolutionDownswitchesPerMinute",
+ num_resolution_downgrades_per_minute);
+ log_stream << uma_prefix << ".NumberResolutionDownswitchesPerMinute "
+ << num_resolution_downgrades_per_minute << "\n";
+ }
+
+ int num_freezes_per_minute =
+ freezes_durations_.NumSamples() * 60000 / video_duration_ms;
+ RTC_HISTOGRAM_COUNTS_SPARSE_100(uma_prefix + ".NumberFreezesPerMinute",
+ num_freezes_per_minute);
+ log_stream << uma_prefix << ".NumberFreezesPerMinute "
+ << num_freezes_per_minute << "\n";
+
+ if (sum_squared_interframe_delays_secs_ > 0.0) {
+ int harmonic_framerate_fps = std::round(
+ video_duration_ms / (1000 * sum_squared_interframe_delays_secs_));
+ RTC_HISTOGRAM_COUNTS_SPARSE_100(uma_prefix + ".HarmonicFrameRate",
+ harmonic_framerate_fps);
+ log_stream << uma_prefix << ".HarmonicFrameRate "
+ << harmonic_framerate_fps << "\n";
+ }
+ }
+ RTC_LOG(LS_INFO) << log_stream.str();
+}
+
+void VideoQualityObserver::OnRenderedFrame(
+ const VideoFrameMetaData& frame_meta) {
+ RTC_DCHECK_LE(last_frame_rendered_ms_, frame_meta.decode_timestamp.ms());
+ RTC_DCHECK_LE(last_unfreeze_time_ms_, frame_meta.decode_timestamp.ms());
+
+ if (num_frames_rendered_ == 0) {
+ first_frame_rendered_ms_ = last_unfreeze_time_ms_ =
+ frame_meta.decode_timestamp.ms();
+ }
+
+ auto blocky_frame_it = blocky_frames_.find(frame_meta.rtp_timestamp);
+
+ if (num_frames_rendered_ > 0) {
+ // Process inter-frame delay.
+ const int64_t interframe_delay_ms =
+ frame_meta.decode_timestamp.ms() - last_frame_rendered_ms_;
+ const double interframe_delays_secs = interframe_delay_ms / 1000.0;
+
+ // Sum of squared inter frame intervals is used to calculate the harmonic
+ // frame rate metric. The metric aims to reflect overall experience related
+ // to smoothness of video playback and includes both freezes and pauses.
+ sum_squared_interframe_delays_secs_ +=
+ interframe_delays_secs * interframe_delays_secs;
+
+ if (!is_paused_) {
+ render_interframe_delays_.AddSample(interframe_delay_ms);
+
+ bool was_freeze = false;
+ if (render_interframe_delays_.Size() >= kMinFrameSamplesToDetectFreeze) {
+ const absl::optional<int64_t> avg_interframe_delay =
+ render_interframe_delays_.GetAverageRoundedDown();
+ RTC_DCHECK(avg_interframe_delay);
+ was_freeze = interframe_delay_ms >=
+ std::max(3 * *avg_interframe_delay,
+ *avg_interframe_delay + kMinIncreaseForFreezeMs);
+ }
+
+ if (was_freeze) {
+ freezes_durations_.Add(interframe_delay_ms);
+ smooth_playback_durations_.Add(last_frame_rendered_ms_ -
+ last_unfreeze_time_ms_);
+ last_unfreeze_time_ms_ = frame_meta.decode_timestamp.ms();
+ } else {
+ // Count spatial metrics if there were no freeze.
+ time_in_resolution_ms_[current_resolution_] += interframe_delay_ms;
+
+ if (is_last_frame_blocky_) {
+ time_in_blocky_video_ms_ += interframe_delay_ms;
+ }
+ }
+ }
+ }
+
+ if (is_paused_) {
+ // If the stream was paused since the previous frame, do not count the
+ // pause toward smooth playback. Explicitly count the part before it and
+ // start the new smooth playback interval from this frame.
+ is_paused_ = false;
+ if (last_frame_rendered_ms_ > last_unfreeze_time_ms_) {
+ smooth_playback_durations_.Add(last_frame_rendered_ms_ -
+ last_unfreeze_time_ms_);
+ }
+ last_unfreeze_time_ms_ = frame_meta.decode_timestamp.ms();
+
+ if (num_frames_rendered_ > 0) {
+ pauses_durations_.Add(frame_meta.decode_timestamp.ms() -
+ last_frame_rendered_ms_);
+ }
+ }
+
+ int64_t pixels = frame_meta.width * frame_meta.height;
+ if (pixels >= kPixelsInHighResolution) {
+ current_resolution_ = Resolution::High;
+ } else if (pixels >= kPixelsInMediumResolution) {
+ current_resolution_ = Resolution::Medium;
+ } else {
+ current_resolution_ = Resolution::Low;
+ }
+
+ if (pixels < last_frame_pixels_) {
+ ++num_resolution_downgrades_;
+ }
+
+ last_frame_pixels_ = pixels;
+ last_frame_rendered_ms_ = frame_meta.decode_timestamp.ms();
+
+ is_last_frame_blocky_ = blocky_frame_it != blocky_frames_.end();
+ if (is_last_frame_blocky_) {
+ blocky_frames_.erase(blocky_frames_.begin(), ++blocky_frame_it);
+ }
+
+ ++num_frames_rendered_;
+}
+
+void VideoQualityObserver::OnDecodedFrame(uint32_t rtp_frame_timestamp,
+ absl::optional<uint8_t> qp,
+ VideoCodecType codec) {
+ if (!qp)
+ return;
+
+ absl::optional<int> qp_blocky_threshold;
+ // TODO(ilnik): add other codec types when we have QP for them.
+ switch (codec) {
+ case kVideoCodecVP8:
+ qp_blocky_threshold = kBlockyQpThresholdVp8;
+ break;
+ case kVideoCodecVP9:
+ qp_blocky_threshold = kBlockyQpThresholdVp9;
+ break;
+ default:
+ qp_blocky_threshold = absl::nullopt;
+ }
+
+ RTC_DCHECK(blocky_frames_.find(rtp_frame_timestamp) == blocky_frames_.end());
+
+ if (qp_blocky_threshold && *qp > *qp_blocky_threshold) {
+ // Cache blocky frame. Its duration will be calculated in render callback.
+ if (blocky_frames_.size() > kMaxNumCachedBlockyFrames) {
+ RTC_LOG(LS_WARNING) << "Overflow of blocky frames cache.";
+ blocky_frames_.erase(
+ blocky_frames_.begin(),
+ std::next(blocky_frames_.begin(), kMaxNumCachedBlockyFrames / 2));
+ }
+
+ blocky_frames_.insert(rtp_frame_timestamp);
+ }
+}
+
+void VideoQualityObserver::OnStreamInactive() {
+ is_paused_ = true;
+}
+
+uint32_t VideoQualityObserver::NumFreezes() const {
+ return freezes_durations_.NumSamples();
+}
+
+uint32_t VideoQualityObserver::NumPauses() const {
+ return pauses_durations_.NumSamples();
+}
+
+uint32_t VideoQualityObserver::TotalFreezesDurationMs() const {
+ return freezes_durations_.Sum(kMinRequiredSamples).value_or(0);
+}
+
+uint32_t VideoQualityObserver::TotalPausesDurationMs() const {
+ return pauses_durations_.Sum(kMinRequiredSamples).value_or(0);
+}
+
+uint32_t VideoQualityObserver::TotalFramesDurationMs() const {
+ return last_frame_rendered_ms_ - first_frame_rendered_ms_;
+}
+
+double VideoQualityObserver::SumSquaredFrameDurationsSec() const {
+ return sum_squared_interframe_delays_secs_;
+}
+
+} // namespace internal
+} // namespace webrtc
diff --git a/third_party/libwebrtc/video/video_quality_observer2.h b/third_party/libwebrtc/video/video_quality_observer2.h
new file mode 100644
index 0000000000..35877858d4
--- /dev/null
+++ b/third_party/libwebrtc/video/video_quality_observer2.h
@@ -0,0 +1,101 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VIDEO_VIDEO_QUALITY_OBSERVER2_H_
+#define VIDEO_VIDEO_QUALITY_OBSERVER2_H_
+
+#include <stdint.h>
+
+#include <set>
+#include <vector>
+
+#include "absl/types/optional.h"
+#include "api/video/video_codec_type.h"
+#include "api/video/video_content_type.h"
+#include "rtc_base/numerics/moving_average.h"
+#include "rtc_base/numerics/sample_counter.h"
+
+namespace webrtc {
+namespace internal {
+// Declared in video_receive_stream2.h.
+struct VideoFrameMetaData;
+
+// Calculates spatial and temporal quality metrics and reports them to UMA
+// stats.
+class VideoQualityObserver {
+ public:
+ // Use either VideoQualityObserver::kBlockyQpThresholdVp8 or
+ // VideoQualityObserver::kBlockyQpThresholdVp9.
+ VideoQualityObserver();
+ ~VideoQualityObserver() = default;
+
+ void OnDecodedFrame(uint32_t rtp_frame_timestamp,
+ absl::optional<uint8_t> qp,
+ VideoCodecType codec);
+
+ void OnRenderedFrame(const VideoFrameMetaData& frame_meta);
+
+ void OnStreamInactive();
+
+ uint32_t NumFreezes() const;
+ uint32_t NumPauses() const;
+ uint32_t TotalFreezesDurationMs() const;
+ uint32_t TotalPausesDurationMs() const;
+ uint32_t TotalFramesDurationMs() const;
+ double SumSquaredFrameDurationsSec() const;
+
+ // Set `screenshare` to true if the last decoded frame was for screenshare.
+ void UpdateHistograms(bool screenshare);
+
+ static const uint32_t kMinFrameSamplesToDetectFreeze;
+ static const uint32_t kMinIncreaseForFreezeMs;
+ static const uint32_t kAvgInterframeDelaysWindowSizeFrames;
+
+ private:
+ enum Resolution {
+ Low = 0,
+ Medium = 1,
+ High = 2,
+ };
+
+ int64_t last_frame_rendered_ms_;
+ int64_t num_frames_rendered_;
+ int64_t first_frame_rendered_ms_;
+ int64_t last_frame_pixels_;
+ bool is_last_frame_blocky_;
+ // Decoded timestamp of the last delayed frame.
+ int64_t last_unfreeze_time_ms_;
+ rtc::MovingAverage render_interframe_delays_;
+ double sum_squared_interframe_delays_secs_;
+ // An inter-frame delay is counted as a freeze if it's significantly longer
+ // than average inter-frame delay.
+ rtc::SampleCounter freezes_durations_;
+ rtc::SampleCounter pauses_durations_;
+ // Time between freezes.
+ rtc::SampleCounter smooth_playback_durations_;
+ // Counters for time spent in different resolutions. Time between each two
+ // Consecutive frames is counted to bin corresponding to the first frame
+ // resolution.
+ std::vector<int64_t> time_in_resolution_ms_;
+ // Resolution of the last decoded frame. Resolution enum is used as an index.
+ Resolution current_resolution_;
+ int num_resolution_downgrades_;
+ // Similar to resolution, time spent in high-QP video.
+ int64_t time_in_blocky_video_ms_;
+ bool is_paused_;
+
+ // Set of decoded frames with high QP value.
+ std::set<int64_t> blocky_frames_;
+};
+
+} // namespace internal
+} // namespace webrtc
+
+#endif // VIDEO_VIDEO_QUALITY_OBSERVER2_H_
diff --git a/third_party/libwebrtc/video/video_quality_test.cc b/third_party/libwebrtc/video/video_quality_test.cc
new file mode 100644
index 0000000000..5d179178f2
--- /dev/null
+++ b/third_party/libwebrtc/video/video_quality_test.cc
@@ -0,0 +1,1577 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include "video/video_quality_test.h"
+
+#include <stdio.h>
+
+#if defined(WEBRTC_WIN)
+#include <conio.h>
+#endif
+
+#include <algorithm>
+#include <deque>
+#include <map>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "api/fec_controller_override.h"
+#include "api/rtc_event_log_output_file.h"
+#include "api/task_queue/default_task_queue_factory.h"
+#include "api/task_queue/task_queue_base.h"
+#include "api/test/create_frame_generator.h"
+#include "api/video/builtin_video_bitrate_allocator_factory.h"
+#include "api/video_codecs/video_encoder.h"
+#include "call/fake_network_pipe.h"
+#include "call/simulated_network.h"
+#include "media/base/media_constants.h"
+#include "media/engine/adm_helpers.h"
+#include "media/engine/encoder_simulcast_proxy.h"
+#include "media/engine/fake_video_codec_factory.h"
+#include "media/engine/internal_encoder_factory.h"
+#include "media/engine/webrtc_video_engine.h"
+#include "modules/audio_device/include/audio_device.h"
+#include "modules/audio_mixer/audio_mixer_impl.h"
+#include "modules/video_coding/codecs/h264/include/h264.h"
+#include "modules/video_coding/codecs/multiplex/include/multiplex_decoder_adapter.h"
+#include "modules/video_coding/codecs/multiplex/include/multiplex_encoder_adapter.h"
+#include "modules/video_coding/codecs/vp8/include/vp8.h"
+#include "modules/video_coding/codecs/vp9/include/vp9.h"
+#include "modules/video_coding/utility/ivf_file_writer.h"
+#include "rtc_base/strings/string_builder.h"
+#include "rtc_base/task_queue_for_test.h"
+#include "test/platform_video_capturer.h"
+#include "test/testsupport/file_utils.h"
+#include "test/video_renderer.h"
+#include "video/frame_dumping_decoder.h"
+#ifdef WEBRTC_WIN
+#include "modules/audio_device/include/audio_device_factory.h"
+#endif
+#include "video/config/encoder_stream_factory.h"
+
+namespace webrtc {
+
+namespace {
+enum : int { // The first valid value is 1.
+ kAbsSendTimeExtensionId = 1,
+ kGenericFrameDescriptorExtensionId00,
+ kGenericFrameDescriptorExtensionId01,
+ kTransportSequenceNumberExtensionId,
+ kVideoContentTypeExtensionId,
+ kVideoTimingExtensionId,
+};
+
+constexpr char kSyncGroup[] = "av_sync";
+constexpr int kOpusMinBitrateBps = 6000;
+constexpr int kOpusBitrateFbBps = 32000;
+constexpr int kFramesSentInQuickTest = 1;
+constexpr uint32_t kThumbnailSendSsrcStart = 0xE0000;
+constexpr uint32_t kThumbnailRtxSsrcStart = 0xF0000;
+
+constexpr int kDefaultMaxQp = cricket::WebRtcVideoChannel::kDefaultQpMax;
+
+const VideoEncoder::Capabilities kCapabilities(false);
+
+std::pair<uint32_t, uint32_t> GetMinMaxBitratesBps(const VideoCodec& codec,
+ size_t spatial_idx) {
+ uint32_t min_bitrate = codec.minBitrate;
+ uint32_t max_bitrate = codec.maxBitrate;
+ if (spatial_idx < codec.numberOfSimulcastStreams) {
+ min_bitrate =
+ std::max(min_bitrate, codec.simulcastStream[spatial_idx].minBitrate);
+ max_bitrate =
+ std::min(max_bitrate, codec.simulcastStream[spatial_idx].maxBitrate);
+ }
+ if (codec.codecType == VideoCodecType::kVideoCodecVP9 &&
+ spatial_idx < codec.VP9().numberOfSpatialLayers) {
+ min_bitrate =
+ std::max(min_bitrate, codec.spatialLayers[spatial_idx].minBitrate);
+ max_bitrate =
+ std::min(max_bitrate, codec.spatialLayers[spatial_idx].maxBitrate);
+ }
+ max_bitrate = std::max(max_bitrate, min_bitrate);
+ return {min_bitrate * 1000, max_bitrate * 1000};
+}
+
+class VideoStreamFactory
+ : public VideoEncoderConfig::VideoStreamFactoryInterface {
+ public:
+ explicit VideoStreamFactory(const std::vector<VideoStream>& streams)
+ : streams_(streams) {}
+
+ private:
+ std::vector<VideoStream> CreateEncoderStreams(
+ int frame_width,
+ int frame_height,
+ const VideoEncoderConfig& encoder_config) override {
+ // The highest layer must match the incoming resolution.
+ std::vector<VideoStream> streams = streams_;
+ streams[streams_.size() - 1].height = frame_height;
+ streams[streams_.size() - 1].width = frame_width;
+
+ streams[0].bitrate_priority = encoder_config.bitrate_priority;
+ return streams;
+ }
+
+ std::vector<VideoStream> streams_;
+};
+
+// This wrapper provides two features needed by the video quality tests:
+// 1. Invoke VideoAnalyzer callbacks before and after encoding each frame.
+// 2. Write the encoded frames to file, one file per simulcast layer.
+class QualityTestVideoEncoder : public VideoEncoder,
+ private EncodedImageCallback {
+ public:
+ QualityTestVideoEncoder(std::unique_ptr<VideoEncoder> encoder,
+ VideoAnalyzer* analyzer,
+ std::vector<FileWrapper> files,
+ double overshoot_factor)
+ : encoder_(std::move(encoder)),
+ overshoot_factor_(overshoot_factor),
+ analyzer_(analyzer) {
+ for (FileWrapper& file : files) {
+ writers_.push_back(
+ IvfFileWriter::Wrap(std::move(file), /* byte_limit= */ 100000000));
+ }
+ }
+
+ // Implement VideoEncoder
+ void SetFecControllerOverride(
+ FecControllerOverride* fec_controller_override) {
+ // Ignored.
+ }
+
+ int32_t InitEncode(const VideoCodec* codec_settings,
+ const Settings& settings) override {
+ codec_settings_ = *codec_settings;
+ return encoder_->InitEncode(codec_settings, settings);
+ }
+
+ int32_t RegisterEncodeCompleteCallback(
+ EncodedImageCallback* callback) override {
+ callback_ = callback;
+ return encoder_->RegisterEncodeCompleteCallback(this);
+ }
+
+ int32_t Release() override { return encoder_->Release(); }
+
+ int32_t Encode(const VideoFrame& frame,
+ const std::vector<VideoFrameType>* frame_types) {
+ if (analyzer_) {
+ analyzer_->PreEncodeOnFrame(frame);
+ }
+ return encoder_->Encode(frame, frame_types);
+ }
+
+ void SetRates(const RateControlParameters& parameters) override {
+ RTC_DCHECK_GT(overshoot_factor_, 0.0);
+ if (overshoot_factor_ == 1.0) {
+ encoder_->SetRates(parameters);
+ return;
+ }
+
+ // Simulating encoder overshooting target bitrate, by configuring actual
+ // encoder too high. Take care not to adjust past limits of config,
+ // otherwise encoders may crash on DCHECK.
+ VideoBitrateAllocation overshot_allocation;
+ for (size_t si = 0; si < kMaxSpatialLayers; ++si) {
+ const uint32_t spatial_layer_bitrate_bps =
+ parameters.bitrate.GetSpatialLayerSum(si);
+ if (spatial_layer_bitrate_bps == 0) {
+ continue;
+ }
+
+ uint32_t min_bitrate_bps;
+ uint32_t max_bitrate_bps;
+ std::tie(min_bitrate_bps, max_bitrate_bps) =
+ GetMinMaxBitratesBps(codec_settings_, si);
+ double overshoot_factor = overshoot_factor_;
+ const uint32_t corrected_bitrate = rtc::checked_cast<uint32_t>(
+ overshoot_factor * spatial_layer_bitrate_bps);
+ if (corrected_bitrate < min_bitrate_bps) {
+ overshoot_factor = min_bitrate_bps / spatial_layer_bitrate_bps;
+ } else if (corrected_bitrate > max_bitrate_bps) {
+ overshoot_factor = max_bitrate_bps / spatial_layer_bitrate_bps;
+ }
+
+ for (size_t ti = 0; ti < kMaxTemporalStreams; ++ti) {
+ if (parameters.bitrate.HasBitrate(si, ti)) {
+ overshot_allocation.SetBitrate(
+ si, ti,
+ rtc::checked_cast<uint32_t>(
+ overshoot_factor * parameters.bitrate.GetBitrate(si, ti)));
+ }
+ }
+ }
+
+ return encoder_->SetRates(
+ RateControlParameters(overshot_allocation, parameters.framerate_fps,
+ parameters.bandwidth_allocation));
+ }
+
+ void OnPacketLossRateUpdate(float packet_loss_rate) override {
+ encoder_->OnPacketLossRateUpdate(packet_loss_rate);
+ }
+
+ void OnRttUpdate(int64_t rtt_ms) override { encoder_->OnRttUpdate(rtt_ms); }
+
+ void OnLossNotification(const LossNotification& loss_notification) override {
+ encoder_->OnLossNotification(loss_notification);
+ }
+
+ EncoderInfo GetEncoderInfo() const override {
+ EncoderInfo info = encoder_->GetEncoderInfo();
+ if (overshoot_factor_ != 1.0) {
+ // We're simulating bad encoder, don't forward trusted setting
+ // from eg libvpx.
+ info.has_trusted_rate_controller = false;
+ }
+ return info;
+ }
+
+ private:
+ // Implement EncodedImageCallback
+ Result OnEncodedImage(const EncodedImage& encoded_image,
+ const CodecSpecificInfo* codec_specific_info) override {
+ if (codec_specific_info) {
+ int simulcast_index;
+ if (codec_specific_info->codecType == kVideoCodecVP9) {
+ simulcast_index = 0;
+ } else {
+ simulcast_index = encoded_image.SpatialIndex().value_or(0);
+ }
+ RTC_DCHECK_GE(simulcast_index, 0);
+ if (analyzer_) {
+ analyzer_->PostEncodeOnFrame(simulcast_index,
+ encoded_image.Timestamp());
+ }
+ if (static_cast<size_t>(simulcast_index) < writers_.size()) {
+ writers_[simulcast_index]->WriteFrame(encoded_image,
+ codec_specific_info->codecType);
+ }
+ }
+
+ return callback_->OnEncodedImage(encoded_image, codec_specific_info);
+ }
+
+ void OnDroppedFrame(DropReason reason) override {
+ callback_->OnDroppedFrame(reason);
+ }
+
+ const std::unique_ptr<VideoEncoder> encoder_;
+ const double overshoot_factor_;
+ VideoAnalyzer* const analyzer_;
+ std::vector<std::unique_ptr<IvfFileWriter>> writers_;
+ EncodedImageCallback* callback_ = nullptr;
+ VideoCodec codec_settings_;
+};
+
+#if defined(WEBRTC_WIN) && !defined(WINUWP)
+void PressEnterToContinue(TaskQueueBase* task_queue) {
+ puts(">> Press ENTER to continue...");
+
+ while (!_kbhit() || _getch() != '\r') {
+ // Drive the message loop for the thread running the task_queue
+ SendTask(task_queue, [&]() {
+ MSG msg;
+ if (PeekMessage(&msg, NULL, 0, 0, PM_REMOVE)) {
+ TranslateMessage(&msg);
+ DispatchMessage(&msg);
+ }
+ });
+ }
+}
+#else
+void PressEnterToContinue(TaskQueueBase* /*task_queue*/) {
+ puts(">> Press ENTER to continue...");
+ while (getc(stdin) != '\n' && !feof(stdin))
+ ; // NOLINT
+}
+#endif
+
+} // namespace
+
+std::unique_ptr<VideoDecoder> VideoQualityTest::CreateVideoDecoder(
+ const SdpVideoFormat& format) {
+ std::unique_ptr<VideoDecoder> decoder;
+ if (format.name == "multiplex") {
+ decoder = std::make_unique<MultiplexDecoderAdapter>(
+ decoder_factory_.get(), SdpVideoFormat(cricket::kVp9CodecName));
+ } else if (format.name == "FakeCodec") {
+ decoder = webrtc::FakeVideoDecoderFactory::CreateVideoDecoder();
+ } else {
+ decoder = decoder_factory_->CreateVideoDecoder(format);
+ }
+ if (!params_.logging.encoded_frame_base_path.empty()) {
+ rtc::StringBuilder str;
+ str << receive_logs_++;
+ std::string path =
+ params_.logging.encoded_frame_base_path + "." + str.str() + ".recv.ivf";
+ decoder = CreateFrameDumpingDecoderWrapper(
+ std::move(decoder), FileWrapper::OpenWriteOnly(path));
+ }
+ return decoder;
+}
+
+std::unique_ptr<VideoEncoder> VideoQualityTest::CreateVideoEncoder(
+ const SdpVideoFormat& format,
+ VideoAnalyzer* analyzer) {
+ std::unique_ptr<VideoEncoder> encoder;
+ if (format.name == "VP8") {
+ encoder =
+ std::make_unique<EncoderSimulcastProxy>(encoder_factory_.get(), format);
+ } else if (format.name == "multiplex") {
+ encoder = std::make_unique<MultiplexEncoderAdapter>(
+ encoder_factory_.get(), SdpVideoFormat(cricket::kVp9CodecName));
+ } else if (format.name == "FakeCodec") {
+ encoder = webrtc::FakeVideoEncoderFactory::CreateVideoEncoder();
+ } else {
+ encoder = encoder_factory_->CreateVideoEncoder(format);
+ }
+
+ std::vector<FileWrapper> encoded_frame_dump_files;
+ if (!params_.logging.encoded_frame_base_path.empty()) {
+ char ss_buf[100];
+ rtc::SimpleStringBuilder sb(ss_buf);
+ sb << send_logs_++;
+ std::string prefix =
+ params_.logging.encoded_frame_base_path + "." + sb.str() + ".send.";
+ encoded_frame_dump_files.push_back(
+ FileWrapper::OpenWriteOnly(prefix + "1.ivf"));
+ encoded_frame_dump_files.push_back(
+ FileWrapper::OpenWriteOnly(prefix + "2.ivf"));
+ encoded_frame_dump_files.push_back(
+ FileWrapper::OpenWriteOnly(prefix + "3.ivf"));
+ }
+
+ double overshoot_factor = 1.0;
+ // Match format to either of the streams in dual-stream mode in order to get
+ // the overshoot factor. This is not very robust but we can't know for sure
+ // which stream this encoder is meant for, from within the factory.
+ if (format ==
+ SdpVideoFormat(params_.video[0].codec, params_.video[0].sdp_params)) {
+ overshoot_factor = params_.video[0].encoder_overshoot_factor;
+ } else if (format == SdpVideoFormat(params_.video[1].codec,
+ params_.video[1].sdp_params)) {
+ overshoot_factor = params_.video[1].encoder_overshoot_factor;
+ }
+ if (overshoot_factor == 0.0) {
+ // If params were zero-initialized, set to 1.0 instead.
+ overshoot_factor = 1.0;
+ }
+
+ if (analyzer || !encoded_frame_dump_files.empty() || overshoot_factor > 1.0) {
+ encoder = std::make_unique<QualityTestVideoEncoder>(
+ std::move(encoder), analyzer, std::move(encoded_frame_dump_files),
+ overshoot_factor);
+ }
+
+ return encoder;
+}
+
+VideoQualityTest::VideoQualityTest(
+ std::unique_ptr<InjectionComponents> injection_components)
+ : clock_(Clock::GetRealTimeClock()),
+ task_queue_factory_(CreateDefaultTaskQueueFactory()),
+ rtc_event_log_factory_(task_queue_factory_.get()),
+ video_decoder_factory_([this](const SdpVideoFormat& format) {
+ return this->CreateVideoDecoder(format);
+ }),
+ video_encoder_factory_([this](const SdpVideoFormat& format) {
+ return this->CreateVideoEncoder(format, nullptr);
+ }),
+ video_encoder_factory_with_analyzer_(
+ [this](const SdpVideoFormat& format) {
+ return this->CreateVideoEncoder(format, analyzer_.get());
+ }),
+ video_bitrate_allocator_factory_(
+ CreateBuiltinVideoBitrateAllocatorFactory()),
+ receive_logs_(0),
+ send_logs_(0),
+ injection_components_(std::move(injection_components)),
+ num_video_streams_(0) {
+ if (injection_components_ == nullptr) {
+ injection_components_ = std::make_unique<InjectionComponents>();
+ }
+ if (injection_components_->video_decoder_factory != nullptr) {
+ decoder_factory_ = std::move(injection_components_->video_decoder_factory);
+ } else {
+ decoder_factory_ = std::make_unique<InternalDecoderFactory>();
+ }
+ if (injection_components_->video_encoder_factory != nullptr) {
+ encoder_factory_ = std::move(injection_components_->video_encoder_factory);
+ } else {
+ encoder_factory_ = std::make_unique<InternalEncoderFactory>();
+ }
+
+ fec_controller_factory_ =
+ std::move(injection_components_->fec_controller_factory);
+ network_state_predictor_factory_ =
+ std::move(injection_components_->network_state_predictor_factory);
+ network_controller_factory_ =
+ std::move(injection_components_->network_controller_factory);
+
+ // Register header extensions that are used by transport to identify
+ // extensions when parsing incomig packets.
+ RegisterRtpExtension(RtpExtension(RtpExtension::kTransportSequenceNumberUri,
+ kTransportSequenceNumberExtensionId));
+ RegisterRtpExtension(
+ RtpExtension(RtpExtension::kAbsSendTimeUri, kAbsSendTimeExtensionId));
+ RegisterRtpExtension(RtpExtension(RtpExtension::kGenericFrameDescriptorUri00,
+ kGenericFrameDescriptorExtensionId00));
+ RegisterRtpExtension(RtpExtension(RtpExtension::kDependencyDescriptorUri,
+ kRtpExtensionDependencyDescriptor));
+ RegisterRtpExtension(RtpExtension(RtpExtension::kVideoContentTypeUri,
+ kVideoContentTypeExtensionId));
+ RegisterRtpExtension(
+ RtpExtension(RtpExtension::kVideoTimingUri, kVideoTimingExtensionId));
+}
+
+VideoQualityTest::InjectionComponents::InjectionComponents() = default;
+
+VideoQualityTest::InjectionComponents::~InjectionComponents() = default;
+
+void VideoQualityTest::TestBody() {}
+
+std::string VideoQualityTest::GenerateGraphTitle() const {
+ rtc::StringBuilder ss;
+ ss << params_.video[0].codec;
+ ss << " (" << params_.video[0].target_bitrate_bps / 1000 << "kbps";
+ ss << ", " << params_.video[0].fps << " FPS";
+ if (params_.screenshare[0].scroll_duration)
+ ss << ", " << params_.screenshare[0].scroll_duration << "s scroll";
+ if (params_.ss[0].streams.size() > 1)
+ ss << ", Stream #" << params_.ss[0].selected_stream;
+ if (params_.ss[0].num_spatial_layers > 1)
+ ss << ", Layer #" << params_.ss[0].selected_sl;
+ ss << ")";
+ return ss.Release();
+}
+
+void VideoQualityTest::CheckParamsAndInjectionComponents() {
+ if (injection_components_ == nullptr) {
+ injection_components_ = std::make_unique<InjectionComponents>();
+ }
+ if (!params_.config && injection_components_->sender_network == nullptr &&
+ injection_components_->receiver_network == nullptr) {
+ params_.config = BuiltInNetworkBehaviorConfig();
+ }
+ RTC_CHECK(
+ (params_.config && injection_components_->sender_network == nullptr &&
+ injection_components_->receiver_network == nullptr) ||
+ (!params_.config && injection_components_->sender_network != nullptr &&
+ injection_components_->receiver_network != nullptr));
+ for (size_t video_idx = 0; video_idx < num_video_streams_; ++video_idx) {
+ // Iterate over primary and secondary video streams.
+ if (!params_.video[video_idx].enabled)
+ return;
+ // Add a default stream in none specified.
+ if (params_.ss[video_idx].streams.empty())
+ params_.ss[video_idx].streams.push_back(
+ VideoQualityTest::DefaultVideoStream(params_, video_idx));
+ if (params_.ss[video_idx].num_spatial_layers == 0)
+ params_.ss[video_idx].num_spatial_layers = 1;
+
+ if (params_.config) {
+ if (params_.config->loss_percent != 0 ||
+ params_.config->queue_length_packets != 0) {
+ // Since LayerFilteringTransport changes the sequence numbers, we can't
+ // use that feature with pack loss, since the NACK request would end up
+ // retransmitting the wrong packets.
+ RTC_CHECK(params_.ss[video_idx].selected_sl == -1 ||
+ params_.ss[video_idx].selected_sl ==
+ params_.ss[video_idx].num_spatial_layers - 1);
+ RTC_CHECK(params_.video[video_idx].selected_tl == -1 ||
+ params_.video[video_idx].selected_tl ==
+ params_.video[video_idx].num_temporal_layers - 1);
+ }
+ }
+
+ // TODO(ivica): Should max_bitrate_bps == -1 represent inf max bitrate, as
+ // it does in some parts of the code?
+ RTC_CHECK_GE(params_.video[video_idx].max_bitrate_bps,
+ params_.video[video_idx].target_bitrate_bps);
+ RTC_CHECK_GE(params_.video[video_idx].target_bitrate_bps,
+ params_.video[video_idx].min_bitrate_bps);
+ int selected_stream = params_.ss[video_idx].selected_stream;
+ if (params_.video[video_idx].selected_tl > -1) {
+ RTC_CHECK_LT(selected_stream, params_.ss[video_idx].streams.size())
+ << "Can not use --selected_tl when --selected_stream is all streams";
+ int stream_tl = params_.ss[video_idx]
+ .streams[selected_stream]
+ .num_temporal_layers.value_or(1);
+ RTC_CHECK_LT(params_.video[video_idx].selected_tl, stream_tl);
+ }
+ RTC_CHECK_LE(params_.ss[video_idx].selected_stream,
+ params_.ss[video_idx].streams.size());
+ for (const VideoStream& stream : params_.ss[video_idx].streams) {
+ RTC_CHECK_GE(stream.min_bitrate_bps, 0);
+ RTC_CHECK_GE(stream.target_bitrate_bps, stream.min_bitrate_bps);
+ RTC_CHECK_GE(stream.max_bitrate_bps, stream.target_bitrate_bps);
+ }
+ // TODO(ivica): Should we check if the sum of all streams/layers is equal to
+ // the total bitrate? We anyway have to update them in the case bitrate
+ // estimator changes the total bitrates.
+ RTC_CHECK_GE(params_.ss[video_idx].num_spatial_layers, 1);
+ RTC_CHECK_LE(params_.ss[video_idx].selected_sl,
+ params_.ss[video_idx].num_spatial_layers);
+ RTC_CHECK(
+ params_.ss[video_idx].spatial_layers.empty() ||
+ params_.ss[video_idx].spatial_layers.size() ==
+ static_cast<size_t>(params_.ss[video_idx].num_spatial_layers));
+ if (params_.video[video_idx].codec == "VP8") {
+ RTC_CHECK_EQ(params_.ss[video_idx].num_spatial_layers, 1);
+ } else if (params_.video[video_idx].codec == "VP9") {
+ RTC_CHECK_EQ(params_.ss[video_idx].streams.size(), 1);
+ }
+ RTC_CHECK_GE(params_.call.num_thumbnails, 0);
+ if (params_.call.num_thumbnails > 0) {
+ RTC_CHECK_EQ(params_.ss[video_idx].num_spatial_layers, 1);
+ RTC_CHECK_EQ(params_.ss[video_idx].streams.size(), 3);
+ RTC_CHECK_EQ(params_.video[video_idx].num_temporal_layers, 3);
+ RTC_CHECK_EQ(params_.video[video_idx].codec, "VP8");
+ }
+ // Dual streams with FEC not supported in tests yet.
+ RTC_CHECK(!params_.video[video_idx].flexfec || num_video_streams_ == 1);
+ RTC_CHECK(!params_.video[video_idx].ulpfec || num_video_streams_ == 1);
+ }
+}
+
+// Static.
+std::vector<int> VideoQualityTest::ParseCSV(const std::string& str) {
+ // Parse comma separated nonnegative integers, where some elements may be
+ // empty. The empty values are replaced with -1.
+ // E.g. "10,-20,,30,40" --> {10, 20, -1, 30,40}
+ // E.g. ",,10,,20," --> {-1, -1, 10, -1, 20, -1}
+ std::vector<int> result;
+ if (str.empty())
+ return result;
+
+ const char* p = str.c_str();
+ int value = -1;
+ int pos;
+ while (*p) {
+ if (*p == ',') {
+ result.push_back(value);
+ value = -1;
+ ++p;
+ continue;
+ }
+ RTC_CHECK_EQ(sscanf(p, "%d%n", &value, &pos), 1)
+ << "Unexpected non-number value.";
+ p += pos;
+ }
+ result.push_back(value);
+ return result;
+}
+
+// Static.
+VideoStream VideoQualityTest::DefaultVideoStream(const Params& params,
+ size_t video_idx) {
+ VideoStream stream;
+ stream.width = params.video[video_idx].width;
+ stream.height = params.video[video_idx].height;
+ stream.max_framerate = params.video[video_idx].fps;
+ stream.min_bitrate_bps = params.video[video_idx].min_bitrate_bps;
+ stream.target_bitrate_bps = params.video[video_idx].target_bitrate_bps;
+ stream.max_bitrate_bps = params.video[video_idx].max_bitrate_bps;
+ stream.max_qp = kDefaultMaxQp;
+ stream.num_temporal_layers = params.video[video_idx].num_temporal_layers;
+ stream.active = true;
+ return stream;
+}
+
+// Static.
+VideoStream VideoQualityTest::DefaultThumbnailStream() {
+ VideoStream stream;
+ stream.width = 320;
+ stream.height = 180;
+ stream.max_framerate = 7;
+ stream.min_bitrate_bps = 7500;
+ stream.target_bitrate_bps = 37500;
+ stream.max_bitrate_bps = 50000;
+ stream.max_qp = kDefaultMaxQp;
+ return stream;
+}
+
+// Static.
+void VideoQualityTest::FillScalabilitySettings(
+ Params* params,
+ size_t video_idx,
+ const std::vector<std::string>& stream_descriptors,
+ int num_streams,
+ size_t selected_stream,
+ int num_spatial_layers,
+ int selected_sl,
+ InterLayerPredMode inter_layer_pred,
+ const std::vector<std::string>& sl_descriptors) {
+ if (params->ss[video_idx].streams.empty() &&
+ params->ss[video_idx].infer_streams) {
+ webrtc::VideoEncoder::EncoderInfo encoder_info;
+ webrtc::VideoEncoderConfig encoder_config;
+ encoder_config.codec_type =
+ PayloadStringToCodecType(params->video[video_idx].codec);
+ encoder_config.content_type =
+ params->screenshare[video_idx].enabled
+ ? webrtc::VideoEncoderConfig::ContentType::kScreen
+ : webrtc::VideoEncoderConfig::ContentType::kRealtimeVideo;
+ encoder_config.max_bitrate_bps = params->video[video_idx].max_bitrate_bps;
+ encoder_config.min_transmit_bitrate_bps =
+ params->video[video_idx].min_transmit_bps;
+ encoder_config.number_of_streams = num_streams;
+ encoder_config.spatial_layers = params->ss[video_idx].spatial_layers;
+ encoder_config.simulcast_layers = std::vector<VideoStream>(num_streams);
+ encoder_config.video_stream_factory =
+ rtc::make_ref_counted<cricket::EncoderStreamFactory>(
+ params->video[video_idx].codec, kDefaultMaxQp,
+ params->screenshare[video_idx].enabled, true, encoder_info);
+ params->ss[video_idx].streams =
+ encoder_config.video_stream_factory->CreateEncoderStreams(
+ params->video[video_idx].width, params->video[video_idx].height,
+ encoder_config);
+ } else {
+ // Read VideoStream and SpatialLayer elements from a list of comma separated
+ // lists. To use a default value for an element, use -1 or leave empty.
+ // Validity checks performed in CheckParamsAndInjectionComponents.
+ RTC_CHECK(params->ss[video_idx].streams.empty());
+ for (const auto& descriptor : stream_descriptors) {
+ if (descriptor.empty())
+ continue;
+ VideoStream stream =
+ VideoQualityTest::DefaultVideoStream(*params, video_idx);
+ std::vector<int> v = VideoQualityTest::ParseCSV(descriptor);
+ if (v[0] != -1)
+ stream.width = static_cast<size_t>(v[0]);
+ if (v[1] != -1)
+ stream.height = static_cast<size_t>(v[1]);
+ if (v[2] != -1)
+ stream.max_framerate = v[2];
+ if (v[3] != -1)
+ stream.min_bitrate_bps = v[3];
+ if (v[4] != -1)
+ stream.target_bitrate_bps = v[4];
+ if (v[5] != -1)
+ stream.max_bitrate_bps = v[5];
+ if (v.size() > 6 && v[6] != -1)
+ stream.max_qp = v[6];
+ if (v.size() > 7 && v[7] != -1) {
+ stream.num_temporal_layers = v[7];
+ } else {
+ // Automatic TL thresholds for more than two layers not supported.
+ RTC_CHECK_LE(params->video[video_idx].num_temporal_layers, 2);
+ }
+ params->ss[video_idx].streams.push_back(stream);
+ }
+ }
+
+ params->ss[video_idx].num_spatial_layers = std::max(1, num_spatial_layers);
+ params->ss[video_idx].selected_stream = selected_stream;
+
+ params->ss[video_idx].selected_sl = selected_sl;
+ params->ss[video_idx].inter_layer_pred = inter_layer_pred;
+ RTC_CHECK(params->ss[video_idx].spatial_layers.empty());
+ for (const auto& descriptor : sl_descriptors) {
+ if (descriptor.empty())
+ continue;
+ std::vector<int> v = VideoQualityTest::ParseCSV(descriptor);
+ RTC_CHECK_EQ(v.size(), 8);
+
+ SpatialLayer layer = {0};
+ layer.width = v[0];
+ layer.height = v[1];
+ layer.maxFramerate = v[2];
+ layer.numberOfTemporalLayers = v[3];
+ layer.maxBitrate = v[4];
+ layer.minBitrate = v[5];
+ layer.targetBitrate = v[6];
+ layer.qpMax = v[7];
+ layer.active = true;
+
+ params->ss[video_idx].spatial_layers.push_back(layer);
+ }
+}
+
+void VideoQualityTest::SetupVideo(Transport* send_transport,
+ Transport* recv_transport) {
+ size_t total_streams_used = 0;
+ video_receive_configs_.clear();
+ video_send_configs_.clear();
+ video_encoder_configs_.clear();
+ bool decode_all_receive_streams = true;
+ size_t num_video_substreams = params_.ss[0].streams.size();
+ RTC_CHECK(num_video_streams_ > 0);
+ video_encoder_configs_.resize(num_video_streams_);
+ std::string generic_codec_name;
+ webrtc::VideoEncoder::EncoderInfo encoder_info;
+ for (size_t video_idx = 0; video_idx < num_video_streams_; ++video_idx) {
+ VideoSendStream::Config config(send_transport);
+ config.rtp.extmap_allow_mixed = true;
+ video_send_configs_.push_back(std::move(config));
+ video_encoder_configs_.push_back(VideoEncoderConfig());
+ num_video_substreams = params_.ss[video_idx].streams.size();
+ RTC_CHECK_GT(num_video_substreams, 0);
+ for (size_t i = 0; i < num_video_substreams; ++i)
+ video_send_configs_[video_idx].rtp.ssrcs.push_back(
+ kVideoSendSsrcs[total_streams_used + i]);
+
+ int payload_type;
+ if (params_.video[video_idx].codec == "H264") {
+ payload_type = kPayloadTypeH264;
+ } else if (params_.video[video_idx].codec == "VP8") {
+ payload_type = kPayloadTypeVP8;
+ } else if (params_.video[video_idx].codec == "VP9") {
+ payload_type = kPayloadTypeVP9;
+ } else if (params_.video[video_idx].codec == "multiplex") {
+ payload_type = kPayloadTypeVP9;
+ } else if (params_.video[video_idx].codec == "FakeCodec") {
+ payload_type = kFakeVideoSendPayloadType;
+ } else {
+ RTC_CHECK(generic_codec_name.empty() ||
+ generic_codec_name == params_.video[video_idx].codec)
+ << "Supplying multiple generic codecs is unsupported.";
+ RTC_LOG(LS_INFO) << "Treating codec " << params_.video[video_idx].codec
+ << " as generic.";
+ payload_type = kPayloadTypeGeneric;
+ generic_codec_name = params_.video[video_idx].codec;
+ }
+ video_send_configs_[video_idx].encoder_settings.encoder_factory =
+ (video_idx == 0) ? &video_encoder_factory_with_analyzer_
+ : &video_encoder_factory_;
+ video_send_configs_[video_idx].encoder_settings.bitrate_allocator_factory =
+ video_bitrate_allocator_factory_.get();
+
+ video_send_configs_[video_idx].rtp.payload_name =
+ params_.video[video_idx].codec;
+ video_send_configs_[video_idx].rtp.payload_type = payload_type;
+ video_send_configs_[video_idx].rtp.nack.rtp_history_ms = kNackRtpHistoryMs;
+ video_send_configs_[video_idx].rtp.rtx.payload_type = kSendRtxPayloadType;
+ for (size_t i = 0; i < num_video_substreams; ++i) {
+ video_send_configs_[video_idx].rtp.rtx.ssrcs.push_back(
+ kSendRtxSsrcs[i + total_streams_used]);
+ }
+ video_send_configs_[video_idx].rtp.extensions.clear();
+ if (params_.call.send_side_bwe) {
+ video_send_configs_[video_idx].rtp.extensions.emplace_back(
+ RtpExtension::kTransportSequenceNumberUri,
+ kTransportSequenceNumberExtensionId);
+ } else {
+ video_send_configs_[video_idx].rtp.extensions.emplace_back(
+ RtpExtension::kAbsSendTimeUri, kAbsSendTimeExtensionId);
+ }
+
+ if (params_.call.generic_descriptor) {
+ video_send_configs_[video_idx].rtp.extensions.emplace_back(
+ RtpExtension::kGenericFrameDescriptorUri00,
+ kGenericFrameDescriptorExtensionId00);
+ }
+
+ if (params_.call.dependency_descriptor) {
+ video_send_configs_[video_idx].rtp.extensions.emplace_back(
+ RtpExtension::kDependencyDescriptorUri,
+ kRtpExtensionDependencyDescriptor);
+ }
+
+ video_send_configs_[video_idx].rtp.extensions.emplace_back(
+ RtpExtension::kVideoContentTypeUri, kVideoContentTypeExtensionId);
+ video_send_configs_[video_idx].rtp.extensions.emplace_back(
+ RtpExtension::kVideoTimingUri, kVideoTimingExtensionId);
+
+ video_encoder_configs_[video_idx].video_format.name =
+ params_.video[video_idx].codec;
+
+ video_encoder_configs_[video_idx].video_format.parameters =
+ params_.video[video_idx].sdp_params;
+
+ video_encoder_configs_[video_idx].codec_type =
+ PayloadStringToCodecType(params_.video[video_idx].codec);
+
+ video_encoder_configs_[video_idx].min_transmit_bitrate_bps =
+ params_.video[video_idx].min_transmit_bps;
+
+ video_send_configs_[video_idx].suspend_below_min_bitrate =
+ params_.video[video_idx].suspend_below_min_bitrate;
+
+ video_encoder_configs_[video_idx].number_of_streams =
+ params_.ss[video_idx].streams.size();
+ video_encoder_configs_[video_idx].max_bitrate_bps = 0;
+ for (size_t i = 0; i < params_.ss[video_idx].streams.size(); ++i) {
+ video_encoder_configs_[video_idx].max_bitrate_bps +=
+ params_.ss[video_idx].streams[i].max_bitrate_bps;
+ }
+ video_encoder_configs_[video_idx].simulcast_layers =
+ std::vector<VideoStream>(params_.ss[video_idx].streams.size());
+ if (!params_.ss[video_idx].infer_streams) {
+ video_encoder_configs_[video_idx].simulcast_layers =
+ params_.ss[video_idx].streams;
+ }
+ video_encoder_configs_[video_idx].video_stream_factory =
+ rtc::make_ref_counted<cricket::EncoderStreamFactory>(
+ params_.video[video_idx].codec,
+ params_.ss[video_idx].streams[0].max_qp,
+ params_.screenshare[video_idx].enabled, true, encoder_info);
+
+ video_encoder_configs_[video_idx].spatial_layers =
+ params_.ss[video_idx].spatial_layers;
+
+ video_encoder_configs_[video_idx].frame_drop_enabled = true;
+
+ decode_all_receive_streams = params_.ss[video_idx].selected_stream ==
+ params_.ss[video_idx].streams.size();
+ absl::optional<int> decode_sub_stream;
+ if (!decode_all_receive_streams)
+ decode_sub_stream = params_.ss[video_idx].selected_stream;
+ CreateMatchingVideoReceiveConfigs(
+ video_send_configs_[video_idx], recv_transport, &video_decoder_factory_,
+ decode_sub_stream, true, kNackRtpHistoryMs);
+
+ if (params_.screenshare[video_idx].enabled) {
+ // Fill out codec settings.
+ video_encoder_configs_[video_idx].content_type =
+ VideoEncoderConfig::ContentType::kScreen;
+ degradation_preference_ = DegradationPreference::MAINTAIN_RESOLUTION;
+ if (params_.video[video_idx].codec == "VP8") {
+ VideoCodecVP8 vp8_settings = VideoEncoder::GetDefaultVp8Settings();
+ vp8_settings.denoisingOn = false;
+ vp8_settings.numberOfTemporalLayers = static_cast<unsigned char>(
+ params_.video[video_idx].num_temporal_layers);
+ video_encoder_configs_[video_idx].encoder_specific_settings =
+ rtc::make_ref_counted<
+ VideoEncoderConfig::Vp8EncoderSpecificSettings>(vp8_settings);
+ } else if (params_.video[video_idx].codec == "VP9") {
+ VideoCodecVP9 vp9_settings = VideoEncoder::GetDefaultVp9Settings();
+ vp9_settings.denoisingOn = false;
+ vp9_settings.automaticResizeOn = false;
+ vp9_settings.numberOfTemporalLayers = static_cast<unsigned char>(
+ params_.video[video_idx].num_temporal_layers);
+ vp9_settings.numberOfSpatialLayers = static_cast<unsigned char>(
+ params_.ss[video_idx].num_spatial_layers);
+ vp9_settings.interLayerPred = params_.ss[video_idx].inter_layer_pred;
+ // High FPS vp9 screenshare requires flexible mode.
+ if (params_.ss[video_idx].num_spatial_layers > 1) {
+ vp9_settings.flexibleMode = true;
+ }
+ video_encoder_configs_[video_idx].encoder_specific_settings =
+ rtc::make_ref_counted<
+ VideoEncoderConfig::Vp9EncoderSpecificSettings>(vp9_settings);
+ }
+ } else if (params_.ss[video_idx].num_spatial_layers > 1) {
+ // If SVC mode without screenshare, still need to set codec specifics.
+ RTC_CHECK(params_.video[video_idx].codec == "VP9");
+ VideoCodecVP9 vp9_settings = VideoEncoder::GetDefaultVp9Settings();
+ vp9_settings.numberOfTemporalLayers = static_cast<unsigned char>(
+ params_.video[video_idx].num_temporal_layers);
+ vp9_settings.numberOfSpatialLayers =
+ static_cast<unsigned char>(params_.ss[video_idx].num_spatial_layers);
+ vp9_settings.interLayerPred = params_.ss[video_idx].inter_layer_pred;
+ vp9_settings.automaticResizeOn = false;
+ video_encoder_configs_[video_idx].encoder_specific_settings =
+ rtc::make_ref_counted<VideoEncoderConfig::Vp9EncoderSpecificSettings>(
+ vp9_settings);
+ RTC_DCHECK_EQ(video_encoder_configs_[video_idx].simulcast_layers.size(),
+ 1);
+ // Min bitrate will be enforced by spatial layer config instead.
+ video_encoder_configs_[video_idx].simulcast_layers[0].min_bitrate_bps = 0;
+ } else if (params_.video[video_idx].automatic_scaling) {
+ if (params_.video[video_idx].codec == "VP8") {
+ VideoCodecVP8 vp8_settings = VideoEncoder::GetDefaultVp8Settings();
+ vp8_settings.automaticResizeOn = true;
+ video_encoder_configs_[video_idx].encoder_specific_settings =
+ rtc::make_ref_counted<
+ VideoEncoderConfig::Vp8EncoderSpecificSettings>(vp8_settings);
+ } else if (params_.video[video_idx].codec == "VP9") {
+ VideoCodecVP9 vp9_settings = VideoEncoder::GetDefaultVp9Settings();
+ // Only enable quality scaler for single spatial layer.
+ vp9_settings.automaticResizeOn =
+ params_.ss[video_idx].num_spatial_layers == 1;
+ video_encoder_configs_[video_idx].encoder_specific_settings =
+ rtc::make_ref_counted<
+ VideoEncoderConfig::Vp9EncoderSpecificSettings>(vp9_settings);
+ } else if (params_.video[video_idx].codec == "H264") {
+ // Quality scaling is always on for H.264.
+ } else if (params_.video[video_idx].codec == cricket::kAv1CodecName) {
+ // TODO(bugs.webrtc.org/11404): Propagate the flag to
+ // aom_codec_enc_cfg_t::rc_resize_mode in Av1 encoder wrapper.
+ // Until then do nothing, specially do not crash.
+ } else {
+ RTC_DCHECK_NOTREACHED()
+ << "Automatic scaling not supported for codec "
+ << params_.video[video_idx].codec << ", stream " << video_idx;
+ }
+ } else {
+ // Default mode. Single SL, no automatic_scaling,
+ if (params_.video[video_idx].codec == "VP8") {
+ VideoCodecVP8 vp8_settings = VideoEncoder::GetDefaultVp8Settings();
+ vp8_settings.automaticResizeOn = false;
+ video_encoder_configs_[video_idx].encoder_specific_settings =
+ rtc::make_ref_counted<
+ VideoEncoderConfig::Vp8EncoderSpecificSettings>(vp8_settings);
+ } else if (params_.video[video_idx].codec == "VP9") {
+ VideoCodecVP9 vp9_settings = VideoEncoder::GetDefaultVp9Settings();
+ vp9_settings.automaticResizeOn = false;
+ video_encoder_configs_[video_idx].encoder_specific_settings =
+ rtc::make_ref_counted<
+ VideoEncoderConfig::Vp9EncoderSpecificSettings>(vp9_settings);
+ } else if (params_.video[video_idx].codec == "H264") {
+ video_encoder_configs_[video_idx].encoder_specific_settings = nullptr;
+ }
+ }
+ total_streams_used += num_video_substreams;
+ }
+
+ // FEC supported only for single video stream mode yet.
+ if (params_.video[0].flexfec) {
+ if (decode_all_receive_streams) {
+ SetSendFecConfig(GetVideoSendConfig()->rtp.ssrcs);
+ } else {
+ SetSendFecConfig({kVideoSendSsrcs[params_.ss[0].selected_stream]});
+ }
+
+ CreateMatchingFecConfig(recv_transport, *GetVideoSendConfig());
+ if (params_.call.send_side_bwe) {
+ GetFlexFecConfig()->rtp.extensions.push_back(
+ RtpExtension(RtpExtension::kTransportSequenceNumberUri,
+ kTransportSequenceNumberExtensionId));
+ } else {
+ GetFlexFecConfig()->rtp.extensions.push_back(
+ RtpExtension(RtpExtension::kAbsSendTimeUri, kAbsSendTimeExtensionId));
+ }
+ }
+
+ if (params_.video[0].ulpfec) {
+ SetSendUlpFecConfig(GetVideoSendConfig());
+ if (decode_all_receive_streams) {
+ for (auto& receive_config : video_receive_configs_) {
+ SetReceiveUlpFecConfig(&receive_config);
+ }
+ } else {
+ SetReceiveUlpFecConfig(
+ &video_receive_configs_[params_.ss[0].selected_stream]);
+ }
+ }
+}
+
+void VideoQualityTest::SetupThumbnails(Transport* send_transport,
+ Transport* recv_transport) {
+ for (int i = 0; i < params_.call.num_thumbnails; ++i) {
+ // Thumbnails will be send in the other way: from receiver_call to
+ // sender_call.
+ VideoSendStream::Config thumbnail_send_config(recv_transport);
+ thumbnail_send_config.rtp.ssrcs.push_back(kThumbnailSendSsrcStart + i);
+ thumbnail_send_config.encoder_settings.encoder_factory =
+ &video_encoder_factory_;
+ thumbnail_send_config.encoder_settings.bitrate_allocator_factory =
+ video_bitrate_allocator_factory_.get();
+ thumbnail_send_config.rtp.payload_name = params_.video[0].codec;
+ thumbnail_send_config.rtp.payload_type = kPayloadTypeVP8;
+ thumbnail_send_config.rtp.nack.rtp_history_ms = kNackRtpHistoryMs;
+ thumbnail_send_config.rtp.rtx.payload_type = kSendRtxPayloadType;
+ thumbnail_send_config.rtp.rtx.ssrcs.push_back(kThumbnailRtxSsrcStart + i);
+ thumbnail_send_config.rtp.extensions.clear();
+ if (params_.call.send_side_bwe) {
+ thumbnail_send_config.rtp.extensions.push_back(
+ RtpExtension(RtpExtension::kTransportSequenceNumberUri,
+ kTransportSequenceNumberExtensionId));
+ } else {
+ thumbnail_send_config.rtp.extensions.push_back(
+ RtpExtension(RtpExtension::kAbsSendTimeUri, kAbsSendTimeExtensionId));
+ }
+
+ VideoEncoderConfig thumbnail_encoder_config;
+ thumbnail_encoder_config.codec_type = kVideoCodecVP8;
+ thumbnail_encoder_config.video_format.name = "VP8";
+ thumbnail_encoder_config.min_transmit_bitrate_bps = 7500;
+ thumbnail_send_config.suspend_below_min_bitrate =
+ params_.video[0].suspend_below_min_bitrate;
+ thumbnail_encoder_config.number_of_streams = 1;
+ thumbnail_encoder_config.max_bitrate_bps = 50000;
+ std::vector<VideoStream> streams{params_.ss[0].streams[0]};
+ thumbnail_encoder_config.video_stream_factory =
+ rtc::make_ref_counted<VideoStreamFactory>(streams);
+ thumbnail_encoder_config.spatial_layers = params_.ss[0].spatial_layers;
+
+ thumbnail_encoder_configs_.push_back(thumbnail_encoder_config.Copy());
+ thumbnail_send_configs_.push_back(thumbnail_send_config.Copy());
+
+ AddMatchingVideoReceiveConfigs(
+ &thumbnail_receive_configs_, thumbnail_send_config, send_transport,
+ &video_decoder_factory_, absl::nullopt, false, kNackRtpHistoryMs);
+ }
+ for (size_t i = 0; i < thumbnail_send_configs_.size(); ++i) {
+ thumbnail_send_streams_.push_back(receiver_call_->CreateVideoSendStream(
+ thumbnail_send_configs_[i].Copy(),
+ thumbnail_encoder_configs_[i].Copy()));
+ }
+ for (size_t i = 0; i < thumbnail_receive_configs_.size(); ++i) {
+ thumbnail_receive_streams_.push_back(sender_call_->CreateVideoReceiveStream(
+ thumbnail_receive_configs_[i].Copy()));
+ }
+}
+
+void VideoQualityTest::DestroyThumbnailStreams() {
+ for (VideoSendStream* thumbnail_send_stream : thumbnail_send_streams_) {
+ receiver_call_->DestroyVideoSendStream(thumbnail_send_stream);
+ }
+ thumbnail_send_streams_.clear();
+ for (VideoReceiveStreamInterface* thumbnail_receive_stream :
+ thumbnail_receive_streams_) {
+ sender_call_->DestroyVideoReceiveStream(thumbnail_receive_stream);
+ }
+ thumbnail_send_streams_.clear();
+ thumbnail_receive_streams_.clear();
+ for (std::unique_ptr<rtc::VideoSourceInterface<VideoFrame>>& video_capturer :
+ thumbnail_capturers_) {
+ video_capturer.reset();
+ }
+}
+
+void VideoQualityTest::SetupThumbnailCapturers(size_t num_thumbnail_streams) {
+ VideoStream thumbnail = DefaultThumbnailStream();
+ for (size_t i = 0; i < num_thumbnail_streams; ++i) {
+ auto frame_generator_capturer =
+ std::make_unique<test::FrameGeneratorCapturer>(
+ clock_,
+ test::CreateSquareFrameGenerator(static_cast<int>(thumbnail.width),
+ static_cast<int>(thumbnail.height),
+ absl::nullopt, absl::nullopt),
+ thumbnail.max_framerate, *task_queue_factory_);
+ EXPECT_TRUE(frame_generator_capturer->Init());
+ thumbnail_capturers_.push_back(std::move(frame_generator_capturer));
+ }
+}
+
+std::unique_ptr<test::FrameGeneratorInterface>
+VideoQualityTest::CreateFrameGenerator(size_t video_idx) {
+ // Setup frame generator.
+ const size_t kWidth = 1850;
+ const size_t kHeight = 1110;
+ std::unique_ptr<test::FrameGeneratorInterface> frame_generator;
+ if (params_.screenshare[video_idx].generate_slides) {
+ frame_generator = test::CreateSlideFrameGenerator(
+ kWidth, kHeight,
+ params_.screenshare[video_idx].slide_change_interval *
+ params_.video[video_idx].fps);
+ } else {
+ std::vector<std::string> slides = params_.screenshare[video_idx].slides;
+ if (slides.empty()) {
+ slides.push_back(test::ResourcePath("web_screenshot_1850_1110", "yuv"));
+ slides.push_back(test::ResourcePath("presentation_1850_1110", "yuv"));
+ slides.push_back(test::ResourcePath("photo_1850_1110", "yuv"));
+ slides.push_back(test::ResourcePath("difficult_photo_1850_1110", "yuv"));
+ }
+ if (params_.screenshare[video_idx].scroll_duration == 0) {
+ // Cycle image every slide_change_interval seconds.
+ frame_generator = test::CreateFromYuvFileFrameGenerator(
+ slides, kWidth, kHeight,
+ params_.screenshare[video_idx].slide_change_interval *
+ params_.video[video_idx].fps);
+ } else {
+ RTC_CHECK_LE(params_.video[video_idx].width, kWidth);
+ RTC_CHECK_LE(params_.video[video_idx].height, kHeight);
+ RTC_CHECK_GT(params_.screenshare[video_idx].slide_change_interval, 0);
+ const int kPauseDurationMs =
+ (params_.screenshare[video_idx].slide_change_interval -
+ params_.screenshare[video_idx].scroll_duration) *
+ 1000;
+ RTC_CHECK_LE(params_.screenshare[video_idx].scroll_duration,
+ params_.screenshare[video_idx].slide_change_interval);
+
+ frame_generator = test::CreateScrollingInputFromYuvFilesFrameGenerator(
+ clock_, slides, kWidth, kHeight, params_.video[video_idx].width,
+ params_.video[video_idx].height,
+ params_.screenshare[video_idx].scroll_duration * 1000,
+ kPauseDurationMs);
+ }
+ }
+ return frame_generator;
+}
+
+void VideoQualityTest::CreateCapturers() {
+ RTC_DCHECK(video_sources_.empty());
+ video_sources_.resize(num_video_streams_);
+ for (size_t video_idx = 0; video_idx < num_video_streams_; ++video_idx) {
+ std::unique_ptr<test::FrameGeneratorInterface> frame_generator;
+ if (params_.screenshare[video_idx].enabled) {
+ frame_generator = CreateFrameGenerator(video_idx);
+ } else if (params_.video[video_idx].clip_path == "Generator") {
+ frame_generator = test::CreateSquareFrameGenerator(
+ static_cast<int>(params_.video[video_idx].width),
+ static_cast<int>(params_.video[video_idx].height), absl::nullopt,
+ absl::nullopt);
+ } else if (params_.video[video_idx].clip_path == "GeneratorI420A") {
+ frame_generator = test::CreateSquareFrameGenerator(
+ static_cast<int>(params_.video[video_idx].width),
+ static_cast<int>(params_.video[video_idx].height),
+ test::FrameGeneratorInterface::OutputType::kI420A, absl::nullopt);
+ } else if (params_.video[video_idx].clip_path == "GeneratorI010") {
+ frame_generator = test::CreateSquareFrameGenerator(
+ static_cast<int>(params_.video[video_idx].width),
+ static_cast<int>(params_.video[video_idx].height),
+ test::FrameGeneratorInterface::OutputType::kI010, absl::nullopt);
+ } else if (params_.video[video_idx].clip_path == "GeneratorNV12") {
+ frame_generator = test::CreateSquareFrameGenerator(
+ static_cast<int>(params_.video[video_idx].width),
+ static_cast<int>(params_.video[video_idx].height),
+ test::FrameGeneratorInterface::OutputType::kNV12, absl::nullopt);
+ } else if (params_.video[video_idx].clip_path.empty()) {
+ video_sources_[video_idx] = test::CreateVideoCapturer(
+ params_.video[video_idx].width, params_.video[video_idx].height,
+ params_.video[video_idx].fps,
+ params_.video[video_idx].capture_device_index);
+ if (video_sources_[video_idx]) {
+ continue;
+ } else {
+ // Failed to get actual camera, use chroma generator as backup.
+ frame_generator = test::CreateSquareFrameGenerator(
+ static_cast<int>(params_.video[video_idx].width),
+ static_cast<int>(params_.video[video_idx].height), absl::nullopt,
+ absl::nullopt);
+ }
+ } else {
+ frame_generator = test::CreateFromYuvFileFrameGenerator(
+ {params_.video[video_idx].clip_path}, params_.video[video_idx].width,
+ params_.video[video_idx].height, 1);
+ ASSERT_TRUE(frame_generator) << "Could not create capturer for "
+ << params_.video[video_idx].clip_path
+ << ".yuv. Is this file present?";
+ }
+ ASSERT_TRUE(frame_generator);
+ auto frame_generator_capturer =
+ std::make_unique<test::FrameGeneratorCapturer>(
+ clock_, std::move(frame_generator), params_.video[video_idx].fps,
+ *task_queue_factory_);
+ EXPECT_TRUE(frame_generator_capturer->Init());
+ video_sources_[video_idx] = std::move(frame_generator_capturer);
+ }
+}
+
+void VideoQualityTest::StartAudioStreams() {
+ audio_send_stream_->Start();
+ for (AudioReceiveStreamInterface* audio_recv_stream : audio_receive_streams_)
+ audio_recv_stream->Start();
+}
+
+void VideoQualityTest::StartThumbnails() {
+ for (VideoSendStream* send_stream : thumbnail_send_streams_)
+ send_stream->Start();
+ for (VideoReceiveStreamInterface* receive_stream : thumbnail_receive_streams_)
+ receive_stream->Start();
+}
+
+void VideoQualityTest::StopThumbnails() {
+ for (VideoReceiveStreamInterface* receive_stream : thumbnail_receive_streams_)
+ receive_stream->Stop();
+ for (VideoSendStream* send_stream : thumbnail_send_streams_)
+ send_stream->Stop();
+}
+
+std::unique_ptr<test::LayerFilteringTransport>
+VideoQualityTest::CreateSendTransport() {
+ std::unique_ptr<NetworkBehaviorInterface> network_behavior = nullptr;
+ if (injection_components_->sender_network == nullptr) {
+ network_behavior = std::make_unique<SimulatedNetwork>(*params_.config);
+ } else {
+ network_behavior = std::move(injection_components_->sender_network);
+ }
+ return std::make_unique<test::LayerFilteringTransport>(
+ task_queue(),
+ std::make_unique<FakeNetworkPipe>(clock_, std::move(network_behavior)),
+ sender_call_.get(), kPayloadTypeVP8, kPayloadTypeVP9,
+ params_.video[0].selected_tl, params_.ss[0].selected_sl,
+ payload_type_map_, kVideoSendSsrcs[0],
+ static_cast<uint32_t>(kVideoSendSsrcs[0] + params_.ss[0].streams.size() -
+ 1),
+ GetRegisteredExtensions(), GetRegisteredExtensions());
+}
+
+std::unique_ptr<test::DirectTransport>
+VideoQualityTest::CreateReceiveTransport() {
+ std::unique_ptr<NetworkBehaviorInterface> network_behavior = nullptr;
+ if (injection_components_->receiver_network == nullptr) {
+ network_behavior = std::make_unique<SimulatedNetwork>(*params_.config);
+ } else {
+ network_behavior = std::move(injection_components_->receiver_network);
+ }
+ return std::make_unique<test::DirectTransport>(
+ task_queue(),
+ std::make_unique<FakeNetworkPipe>(clock_, std::move(network_behavior)),
+ receiver_call_.get(), payload_type_map_, GetRegisteredExtensions(),
+ GetRegisteredExtensions());
+}
+
+void VideoQualityTest::RunWithAnalyzer(const Params& params) {
+ num_video_streams_ = params.call.dual_video ? 2 : 1;
+ std::unique_ptr<test::LayerFilteringTransport> send_transport;
+ std::unique_ptr<test::DirectTransport> recv_transport;
+ FILE* graph_data_output_file = nullptr;
+
+ params_ = params;
+ // TODO(ivica): Merge with RunWithRenderer and use a flag / argument to
+ // differentiate between the analyzer and the renderer case.
+ CheckParamsAndInjectionComponents();
+
+ if (!params_.analyzer.graph_data_output_filename.empty()) {
+ graph_data_output_file =
+ fopen(params_.analyzer.graph_data_output_filename.c_str(), "w");
+ RTC_CHECK(graph_data_output_file)
+ << "Can't open the file " << params_.analyzer.graph_data_output_filename
+ << "!";
+ }
+
+ if (!params.logging.rtc_event_log_name.empty()) {
+ send_event_log_ = rtc_event_log_factory_.CreateRtcEventLog(
+ RtcEventLog::EncodingType::NewFormat);
+ recv_event_log_ = rtc_event_log_factory_.CreateRtcEventLog(
+ RtcEventLog::EncodingType::NewFormat);
+ std::unique_ptr<RtcEventLogOutputFile> send_output(
+ std::make_unique<RtcEventLogOutputFile>(
+ params.logging.rtc_event_log_name + "_send",
+ RtcEventLog::kUnlimitedOutput));
+ std::unique_ptr<RtcEventLogOutputFile> recv_output(
+ std::make_unique<RtcEventLogOutputFile>(
+ params.logging.rtc_event_log_name + "_recv",
+ RtcEventLog::kUnlimitedOutput));
+ bool event_log_started =
+ send_event_log_->StartLogging(std::move(send_output),
+ RtcEventLog::kImmediateOutput) &&
+ recv_event_log_->StartLogging(std::move(recv_output),
+ RtcEventLog::kImmediateOutput);
+ RTC_DCHECK(event_log_started);
+ } else {
+ send_event_log_ = std::make_unique<RtcEventLogNull>();
+ recv_event_log_ = std::make_unique<RtcEventLogNull>();
+ }
+
+ SendTask(task_queue(), [this, &params, &send_transport, &recv_transport]() {
+ Call::Config send_call_config(send_event_log_.get());
+ Call::Config recv_call_config(recv_event_log_.get());
+ send_call_config.bitrate_config = params.call.call_bitrate_config;
+ recv_call_config.bitrate_config = params.call.call_bitrate_config;
+ if (params_.audio.enabled)
+ InitializeAudioDevice(&send_call_config, &recv_call_config,
+ params_.audio.use_real_adm);
+
+ CreateCalls(send_call_config, recv_call_config);
+ send_transport = CreateSendTransport();
+ recv_transport = CreateReceiveTransport();
+ });
+
+ std::string graph_title = params_.analyzer.graph_title;
+ if (graph_title.empty())
+ graph_title = VideoQualityTest::GenerateGraphTitle();
+ bool is_quick_test_enabled = field_trial::IsEnabled("WebRTC-QuickPerfTest");
+ analyzer_ = std::make_unique<VideoAnalyzer>(
+ send_transport.get(), params_.analyzer.test_label,
+ params_.analyzer.avg_psnr_threshold, params_.analyzer.avg_ssim_threshold,
+ is_quick_test_enabled
+ ? kFramesSentInQuickTest
+ : params_.analyzer.test_durations_secs * params_.video[0].fps,
+ is_quick_test_enabled
+ ? TimeDelta::Millis(1)
+ : TimeDelta::Seconds(params_.analyzer.test_durations_secs),
+ graph_data_output_file, graph_title,
+ kVideoSendSsrcs[params_.ss[0].selected_stream],
+ kSendRtxSsrcs[params_.ss[0].selected_stream],
+ static_cast<size_t>(params_.ss[0].selected_stream),
+ params.ss[0].selected_sl, params_.video[0].selected_tl,
+ is_quick_test_enabled, clock_, params_.logging.rtp_dump_name,
+ task_queue());
+
+ SendTask(task_queue(), [&]() {
+ analyzer_->SetCall(sender_call_.get());
+ analyzer_->SetReceiver(receiver_call_->Receiver());
+ send_transport->SetReceiver(analyzer_.get());
+ recv_transport->SetReceiver(sender_call_->Receiver());
+
+ SetupVideo(analyzer_.get(), recv_transport.get());
+ SetupThumbnails(analyzer_.get(), recv_transport.get());
+ video_receive_configs_[params_.ss[0].selected_stream].renderer =
+ analyzer_.get();
+
+ CreateFlexfecStreams();
+ CreateVideoStreams();
+ analyzer_->SetSendStream(video_send_streams_[0]);
+ analyzer_->SetReceiveStream(
+ video_receive_streams_[params_.ss[0].selected_stream]);
+
+ GetVideoSendStream()->SetSource(analyzer_->OutputInterface(),
+ degradation_preference_);
+ SetupThumbnailCapturers(params_.call.num_thumbnails);
+ for (size_t i = 0; i < thumbnail_send_streams_.size(); ++i) {
+ thumbnail_send_streams_[i]->SetSource(thumbnail_capturers_[i].get(),
+ degradation_preference_);
+ }
+
+ CreateCapturers();
+
+ analyzer_->SetSource(video_sources_[0].get(), true);
+
+ for (size_t video_idx = 1; video_idx < num_video_streams_; ++video_idx) {
+ video_send_streams_[video_idx]->SetSource(video_sources_[video_idx].get(),
+ degradation_preference_);
+ }
+
+ if (params_.audio.enabled) {
+ SetupAudio(send_transport.get());
+ StartAudioStreams();
+ analyzer_->SetAudioReceiveStream(audio_receive_streams_[0]);
+ }
+ StartVideoStreams();
+ StartThumbnails();
+ analyzer_->StartMeasuringCpuProcessTime();
+ });
+
+ analyzer_->Wait();
+
+ SendTask(task_queue(), [&]() {
+ StopThumbnails();
+ Stop();
+
+ DestroyStreams();
+ DestroyThumbnailStreams();
+
+ if (graph_data_output_file)
+ fclose(graph_data_output_file);
+
+ send_transport.reset();
+ recv_transport.reset();
+
+ DestroyCalls();
+ });
+ analyzer_ = nullptr;
+}
+
+rtc::scoped_refptr<AudioDeviceModule> VideoQualityTest::CreateAudioDevice() {
+#ifdef WEBRTC_WIN
+ RTC_LOG(LS_INFO) << "Using latest version of ADM on Windows";
+ // We must initialize the COM library on a thread before we calling any of
+ // the library functions. All COM functions in the ADM will return
+ // CO_E_NOTINITIALIZED otherwise. The legacy ADM for Windows used internal
+ // COM initialization but the new ADM requires COM to be initialized
+ // externally.
+ com_initializer_ =
+ std::make_unique<ScopedCOMInitializer>(ScopedCOMInitializer::kMTA);
+ RTC_CHECK(com_initializer_->Succeeded());
+ RTC_CHECK(webrtc_win::core_audio_utility::IsSupported());
+ RTC_CHECK(webrtc_win::core_audio_utility::IsMMCSSSupported());
+ return CreateWindowsCoreAudioAudioDeviceModule(task_queue_factory_.get());
+#else
+ // Use legacy factory method on all platforms except Windows.
+ return AudioDeviceModule::Create(AudioDeviceModule::kPlatformDefaultAudio,
+ task_queue_factory_.get());
+#endif
+}
+
+void VideoQualityTest::InitializeAudioDevice(Call::Config* send_call_config,
+ Call::Config* recv_call_config,
+ bool use_real_adm) {
+ rtc::scoped_refptr<AudioDeviceModule> audio_device;
+ if (use_real_adm) {
+ // Run test with real ADM (using default audio devices) if user has
+ // explicitly set the --audio and --use_real_adm command-line flags.
+ audio_device = CreateAudioDevice();
+ } else {
+ // By default, create a test ADM which fakes audio.
+ audio_device = TestAudioDeviceModule::Create(
+ task_queue_factory_.get(),
+ TestAudioDeviceModule::CreatePulsedNoiseCapturer(32000, 48000),
+ TestAudioDeviceModule::CreateDiscardRenderer(48000), 1.f);
+ }
+ RTC_CHECK(audio_device);
+
+ AudioState::Config audio_state_config;
+ audio_state_config.audio_mixer = AudioMixerImpl::Create();
+ audio_state_config.audio_processing = AudioProcessingBuilder().Create();
+ audio_state_config.audio_device_module = audio_device;
+ send_call_config->audio_state = AudioState::Create(audio_state_config);
+ recv_call_config->audio_state = AudioState::Create(audio_state_config);
+ if (use_real_adm) {
+ // The real ADM requires extra initialization: setting default devices,
+ // setting up number of channels etc. Helper class also calls
+ // AudioDeviceModule::Init().
+ webrtc::adm_helpers::Init(audio_device.get());
+ } else {
+ audio_device->Init();
+ }
+ // Always initialize the ADM before injecting a valid audio transport.
+ RTC_CHECK(audio_device->RegisterAudioCallback(
+ send_call_config->audio_state->audio_transport()) == 0);
+}
+
+void VideoQualityTest::SetupAudio(Transport* transport) {
+ AudioSendStream::Config audio_send_config(transport);
+ audio_send_config.rtp.ssrc = kAudioSendSsrc;
+
+ // Add extension to enable audio send side BWE, and allow audio bit rate
+ // adaptation.
+ audio_send_config.rtp.extensions.clear();
+ audio_send_config.send_codec_spec = AudioSendStream::Config::SendCodecSpec(
+ kAudioSendPayloadType,
+ {"OPUS",
+ 48000,
+ 2,
+ {{"usedtx", (params_.audio.dtx ? "1" : "0")}, {"stereo", "1"}}});
+
+ if (params_.call.send_side_bwe) {
+ audio_send_config.rtp.extensions.push_back(
+ webrtc::RtpExtension(webrtc::RtpExtension::kTransportSequenceNumberUri,
+ kTransportSequenceNumberExtensionId));
+ audio_send_config.min_bitrate_bps = kOpusMinBitrateBps;
+ audio_send_config.max_bitrate_bps = kOpusBitrateFbBps;
+ audio_send_config.send_codec_spec->transport_cc_enabled = true;
+ // Only allow ANA when send-side BWE is enabled.
+ audio_send_config.audio_network_adaptor_config = params_.audio.ana_config;
+ }
+ audio_send_config.encoder_factory = audio_encoder_factory_;
+ SetAudioConfig(audio_send_config);
+
+ std::string sync_group;
+ if (params_.video[0].enabled && params_.audio.sync_video)
+ sync_group = kSyncGroup;
+
+ CreateMatchingAudioConfigs(transport, sync_group);
+ CreateAudioStreams();
+}
+
+void VideoQualityTest::RunWithRenderers(const Params& params) {
+ RTC_LOG(LS_INFO) << __FUNCTION__;
+ num_video_streams_ = params.call.dual_video ? 2 : 1;
+ std::unique_ptr<test::LayerFilteringTransport> send_transport;
+ std::unique_ptr<test::DirectTransport> recv_transport;
+ std::unique_ptr<test::VideoRenderer> local_preview;
+ std::vector<std::unique_ptr<test::VideoRenderer>> loopback_renderers;
+
+ if (!params.logging.rtc_event_log_name.empty()) {
+ send_event_log_ = rtc_event_log_factory_.CreateRtcEventLog(
+ RtcEventLog::EncodingType::NewFormat);
+ recv_event_log_ = rtc_event_log_factory_.CreateRtcEventLog(
+ RtcEventLog::EncodingType::NewFormat);
+ std::unique_ptr<RtcEventLogOutputFile> send_output(
+ std::make_unique<RtcEventLogOutputFile>(
+ params.logging.rtc_event_log_name + "_send",
+ RtcEventLog::kUnlimitedOutput));
+ std::unique_ptr<RtcEventLogOutputFile> recv_output(
+ std::make_unique<RtcEventLogOutputFile>(
+ params.logging.rtc_event_log_name + "_recv",
+ RtcEventLog::kUnlimitedOutput));
+ bool event_log_started =
+ send_event_log_->StartLogging(std::move(send_output),
+ /*output_period_ms=*/5000) &&
+ recv_event_log_->StartLogging(std::move(recv_output),
+ /*output_period_ms=*/5000);
+ RTC_DCHECK(event_log_started);
+ } else {
+ send_event_log_ = std::make_unique<RtcEventLogNull>();
+ recv_event_log_ = std::make_unique<RtcEventLogNull>();
+ }
+
+ SendTask(task_queue(), [&]() {
+ params_ = params;
+ CheckParamsAndInjectionComponents();
+
+ // TODO(ivica): Remove bitrate_config and use the default Call::Config(), to
+ // match the full stack tests.
+ Call::Config send_call_config(send_event_log_.get());
+ send_call_config.bitrate_config = params_.call.call_bitrate_config;
+ Call::Config recv_call_config(recv_event_log_.get());
+
+ if (params_.audio.enabled)
+ InitializeAudioDevice(&send_call_config, &recv_call_config,
+ params_.audio.use_real_adm);
+
+ CreateCalls(send_call_config, recv_call_config);
+
+ // TODO(minyue): consider if this is a good transport even for audio only
+ // calls.
+ send_transport = CreateSendTransport();
+
+ recv_transport = CreateReceiveTransport();
+
+ // TODO(ivica): Use two calls to be able to merge with RunWithAnalyzer or at
+ // least share as much code as possible. That way this test would also match
+ // the full stack tests better.
+ send_transport->SetReceiver(receiver_call_->Receiver());
+ recv_transport->SetReceiver(sender_call_->Receiver());
+
+ if (params_.video[0].enabled) {
+ // Create video renderers.
+ SetupVideo(send_transport.get(), recv_transport.get());
+ size_t num_streams_processed = 0;
+ for (size_t video_idx = 0; video_idx < num_video_streams_; ++video_idx) {
+ const size_t selected_stream_id = params_.ss[video_idx].selected_stream;
+ const size_t num_streams = params_.ss[video_idx].streams.size();
+ if (selected_stream_id == num_streams) {
+ for (size_t stream_id = 0; stream_id < num_streams; ++stream_id) {
+ rtc::StringBuilder oss;
+ oss << "Loopback Video #" << video_idx << " - Stream #"
+ << static_cast<int>(stream_id);
+ loopback_renderers.emplace_back(test::VideoRenderer::Create(
+ oss.str().c_str(),
+ params_.ss[video_idx].streams[stream_id].width,
+ params_.ss[video_idx].streams[stream_id].height));
+ video_receive_configs_[stream_id + num_streams_processed].renderer =
+ loopback_renderers.back().get();
+ if (params_.audio.enabled && params_.audio.sync_video)
+ video_receive_configs_[stream_id + num_streams_processed]
+ .sync_group = kSyncGroup;
+ }
+ } else {
+ rtc::StringBuilder oss;
+ oss << "Loopback Video #" << video_idx;
+ loopback_renderers.emplace_back(test::VideoRenderer::Create(
+ oss.str().c_str(),
+ params_.ss[video_idx].streams[selected_stream_id].width,
+ params_.ss[video_idx].streams[selected_stream_id].height));
+ video_receive_configs_[selected_stream_id + num_streams_processed]
+ .renderer = loopback_renderers.back().get();
+ if (params_.audio.enabled && params_.audio.sync_video)
+ video_receive_configs_[num_streams_processed + selected_stream_id]
+ .sync_group = kSyncGroup;
+ }
+ num_streams_processed += num_streams;
+ }
+ CreateFlexfecStreams();
+ CreateVideoStreams();
+
+ CreateCapturers();
+ if (params_.video[0].enabled) {
+ // Create local preview
+ local_preview.reset(test::VideoRenderer::Create(
+ "Local Preview", params_.video[0].width, params_.video[0].height));
+
+ video_sources_[0]->AddOrUpdateSink(local_preview.get(),
+ rtc::VideoSinkWants());
+ }
+ ConnectVideoSourcesToStreams();
+ }
+
+ if (params_.audio.enabled) {
+ SetupAudio(send_transport.get());
+ }
+
+ Start();
+ });
+
+ PressEnterToContinue(task_queue());
+
+ SendTask(task_queue(), [&]() {
+ Stop();
+ DestroyStreams();
+
+ send_transport.reset();
+ recv_transport.reset();
+
+ local_preview.reset();
+ loopback_renderers.clear();
+
+ DestroyCalls();
+ });
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/video/video_quality_test.h b/third_party/libwebrtc/video/video_quality_test.h
new file mode 100644
index 0000000000..f66256e94c
--- /dev/null
+++ b/third_party/libwebrtc/video/video_quality_test.h
@@ -0,0 +1,145 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#ifndef VIDEO_VIDEO_QUALITY_TEST_H_
+#define VIDEO_VIDEO_QUALITY_TEST_H_
+
+#include <map>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "api/fec_controller.h"
+#include "api/rtc_event_log/rtc_event_log_factory.h"
+#include "api/task_queue/task_queue_base.h"
+#include "api/task_queue/task_queue_factory.h"
+#include "api/test/frame_generator_interface.h"
+#include "api/test/video_quality_test_fixture.h"
+#include "api/video/video_bitrate_allocator_factory.h"
+#include "call/fake_network_pipe.h"
+#include "media/engine/internal_decoder_factory.h"
+#include "media/engine/internal_encoder_factory.h"
+#include "test/call_test.h"
+#include "test/layer_filtering_transport.h"
+#include "video/video_analyzer.h"
+#ifdef WEBRTC_WIN
+#include "modules/audio_device/win/core_audio_utility_win.h"
+#include "rtc_base/win/scoped_com_initializer.h"
+#endif
+
+namespace webrtc {
+
+class VideoQualityTest : public test::CallTest,
+ public VideoQualityTestFixtureInterface {
+ public:
+ explicit VideoQualityTest(
+ std::unique_ptr<InjectionComponents> injection_components);
+
+ void RunWithAnalyzer(const Params& params) override;
+ void RunWithRenderers(const Params& params) override;
+
+ const std::map<uint8_t, webrtc::MediaType>& payload_type_map() override {
+ return payload_type_map_;
+ }
+
+ static void FillScalabilitySettings(
+ Params* params,
+ size_t video_idx,
+ const std::vector<std::string>& stream_descriptors,
+ int num_streams,
+ size_t selected_stream,
+ int num_spatial_layers,
+ int selected_sl,
+ InterLayerPredMode inter_layer_pred,
+ const std::vector<std::string>& sl_descriptors);
+
+ // Helper static methods.
+ static VideoStream DefaultVideoStream(const Params& params, size_t video_idx);
+ static VideoStream DefaultThumbnailStream();
+ static std::vector<int> ParseCSV(const std::string& str);
+
+ protected:
+ // No-op implementation to be able to instantiate this class from non-TEST_F
+ // locations.
+ void TestBody() override;
+
+ // Helper methods accessing only params_.
+ std::string GenerateGraphTitle() const;
+ void CheckParamsAndInjectionComponents();
+
+ // Helper methods for setting up the call.
+ void CreateCapturers();
+ std::unique_ptr<test::FrameGeneratorInterface> CreateFrameGenerator(
+ size_t video_idx);
+ void SetupThumbnailCapturers(size_t num_thumbnail_streams);
+ std::unique_ptr<VideoDecoder> CreateVideoDecoder(
+ const SdpVideoFormat& format);
+ std::unique_ptr<VideoEncoder> CreateVideoEncoder(const SdpVideoFormat& format,
+ VideoAnalyzer* analyzer);
+ void SetupVideo(Transport* send_transport, Transport* recv_transport);
+ void SetupThumbnails(Transport* send_transport, Transport* recv_transport);
+ void StartAudioStreams();
+ void StartThumbnails();
+ void StopThumbnails();
+ void DestroyThumbnailStreams();
+ // Helper method for creating a real ADM (using hardware) for all platforms.
+ rtc::scoped_refptr<AudioDeviceModule> CreateAudioDevice();
+ void InitializeAudioDevice(Call::Config* send_call_config,
+ Call::Config* recv_call_config,
+ bool use_real_adm);
+ void SetupAudio(Transport* transport);
+
+ void StartEncodedFrameLogs(VideoReceiveStreamInterface* stream);
+
+ virtual std::unique_ptr<test::LayerFilteringTransport> CreateSendTransport();
+ virtual std::unique_ptr<test::DirectTransport> CreateReceiveTransport();
+
+ std::vector<std::unique_ptr<rtc::VideoSourceInterface<VideoFrame>>>
+ thumbnail_capturers_;
+ Clock* const clock_;
+ const std::unique_ptr<TaskQueueFactory> task_queue_factory_;
+ RtcEventLogFactory rtc_event_log_factory_;
+
+ test::FunctionVideoDecoderFactory video_decoder_factory_;
+ std::unique_ptr<VideoDecoderFactory> decoder_factory_;
+ test::FunctionVideoEncoderFactory video_encoder_factory_;
+ test::FunctionVideoEncoderFactory video_encoder_factory_with_analyzer_;
+ std::unique_ptr<VideoBitrateAllocatorFactory>
+ video_bitrate_allocator_factory_;
+ std::unique_ptr<VideoEncoderFactory> encoder_factory_;
+ std::vector<VideoSendStream::Config> thumbnail_send_configs_;
+ std::vector<VideoEncoderConfig> thumbnail_encoder_configs_;
+ std::vector<VideoSendStream*> thumbnail_send_streams_;
+ std::vector<VideoReceiveStreamInterface::Config> thumbnail_receive_configs_;
+ std::vector<VideoReceiveStreamInterface*> thumbnail_receive_streams_;
+
+ int receive_logs_;
+ int send_logs_;
+
+ Params params_;
+ std::unique_ptr<InjectionComponents> injection_components_;
+
+ // Set non-null when running with analyzer.
+ std::unique_ptr<VideoAnalyzer> analyzer_;
+
+ // Note: not same as similarly named member in CallTest. This is the number of
+ // separate send streams, the one in CallTest is the number of substreams for
+ // a single send stream.
+ size_t num_video_streams_;
+
+#ifdef WEBRTC_WIN
+ // Windows Core Audio based ADM needs to run on a COM initialized thread.
+ // Only referenced in combination with --audio --use_real_adm flags.
+ std::unique_ptr<ScopedCOMInitializer> com_initializer_;
+#endif
+};
+
+} // namespace webrtc
+
+#endif // VIDEO_VIDEO_QUALITY_TEST_H_
diff --git a/third_party/libwebrtc/video/video_receive_stream2.cc b/third_party/libwebrtc/video/video_receive_stream2.cc
new file mode 100644
index 0000000000..beb894e139
--- /dev/null
+++ b/third_party/libwebrtc/video/video_receive_stream2.cc
@@ -0,0 +1,1112 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video/video_receive_stream2.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+#include <algorithm>
+#include <memory>
+#include <set>
+#include <string>
+#include <utility>
+
+#include "absl/algorithm/container.h"
+#include "absl/types/optional.h"
+#include "api/array_view.h"
+#include "api/crypto/frame_decryptor_interface.h"
+#include "api/scoped_refptr.h"
+#include "api/sequence_checker.h"
+#include "api/task_queue/pending_task_safety_flag.h"
+#include "api/task_queue/task_queue_base.h"
+#include "api/units/frequency.h"
+#include "api/units/time_delta.h"
+#include "api/units/timestamp.h"
+#include "api/video/encoded_image.h"
+#include "api/video_codecs/sdp_video_format.h"
+#include "api/video_codecs/video_codec.h"
+#include "api/video_codecs/video_decoder_factory.h"
+#include "call/rtp_stream_receiver_controller_interface.h"
+#include "call/rtx_receive_stream.h"
+#include "modules/video_coding/include/video_codec_interface.h"
+#include "modules/video_coding/include/video_coding_defines.h"
+#include "modules/video_coding/include/video_error_codes.h"
+#include "modules/video_coding/timing/timing.h"
+#include "modules/video_coding/utility/vp8_header_parser.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/event.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/strings/string_builder.h"
+#include "rtc_base/synchronization/mutex.h"
+#include "rtc_base/thread_annotations.h"
+#include "rtc_base/time_utils.h"
+#include "rtc_base/trace_event.h"
+#include "system_wrappers/include/clock.h"
+#include "video/call_stats2.h"
+#include "video/frame_dumping_decoder.h"
+#include "video/receive_statistics_proxy2.h"
+#include "video/render/incoming_video_stream.h"
+#include "video/task_queue_frame_decode_scheduler.h"
+
+namespace webrtc {
+
+namespace internal {
+
+namespace {
+
+// The default delay before re-requesting a key frame to be sent.
+constexpr TimeDelta kMinBaseMinimumDelay = TimeDelta::Zero();
+constexpr TimeDelta kMaxBaseMinimumDelay = TimeDelta::Seconds(10);
+
+// Concrete instance of RecordableEncodedFrame wrapping needed content
+// from EncodedFrame.
+class WebRtcRecordableEncodedFrame : public RecordableEncodedFrame {
+ public:
+ explicit WebRtcRecordableEncodedFrame(
+ const EncodedFrame& frame,
+ RecordableEncodedFrame::EncodedResolution resolution)
+ : buffer_(frame.GetEncodedData()),
+ render_time_ms_(frame.RenderTime()),
+ codec_(frame.CodecSpecific()->codecType),
+ is_key_frame_(frame.FrameType() == VideoFrameType::kVideoFrameKey),
+ resolution_(resolution) {
+ if (frame.ColorSpace()) {
+ color_space_ = *frame.ColorSpace();
+ }
+ }
+
+ // VideoEncodedSinkInterface::FrameBuffer
+ rtc::scoped_refptr<const EncodedImageBufferInterface> encoded_buffer()
+ const override {
+ return buffer_;
+ }
+
+ absl::optional<webrtc::ColorSpace> color_space() const override {
+ return color_space_;
+ }
+
+ VideoCodecType codec() const override { return codec_; }
+
+ bool is_key_frame() const override { return is_key_frame_; }
+
+ EncodedResolution resolution() const override { return resolution_; }
+
+ Timestamp render_time() const override {
+ return Timestamp::Millis(render_time_ms_);
+ }
+
+ private:
+ rtc::scoped_refptr<EncodedImageBufferInterface> buffer_;
+ int64_t render_time_ms_;
+ VideoCodecType codec_;
+ bool is_key_frame_;
+ EncodedResolution resolution_;
+ absl::optional<webrtc::ColorSpace> color_space_;
+};
+
+RenderResolution InitialDecoderResolution(const FieldTrialsView& field_trials) {
+ FieldTrialOptional<int> width("w");
+ FieldTrialOptional<int> height("h");
+ ParseFieldTrial({&width, &height},
+ field_trials.Lookup("WebRTC-Video-InitialDecoderResolution"));
+ if (width && height) {
+ return RenderResolution(width.Value(), height.Value());
+ }
+
+ return RenderResolution(320, 180);
+}
+
+// Video decoder class to be used for unknown codecs. Doesn't support decoding
+// but logs messages to LS_ERROR.
+class NullVideoDecoder : public webrtc::VideoDecoder {
+ public:
+ bool Configure(const Settings& settings) override {
+ RTC_LOG(LS_ERROR) << "Can't initialize NullVideoDecoder.";
+ return true;
+ }
+
+ int32_t Decode(const webrtc::EncodedImage& input_image,
+ bool missing_frames,
+ int64_t render_time_ms) override {
+ RTC_LOG(LS_ERROR) << "The NullVideoDecoder doesn't support decoding.";
+ return WEBRTC_VIDEO_CODEC_OK;
+ }
+
+ int32_t RegisterDecodeCompleteCallback(
+ webrtc::DecodedImageCallback* callback) override {
+ RTC_LOG(LS_ERROR)
+ << "Can't register decode complete callback on NullVideoDecoder.";
+ return WEBRTC_VIDEO_CODEC_OK;
+ }
+
+ int32_t Release() override { return WEBRTC_VIDEO_CODEC_OK; }
+
+ const char* ImplementationName() const override { return "NullVideoDecoder"; }
+};
+
+bool IsKeyFrameAndUnspecifiedResolution(const EncodedFrame& frame) {
+ return frame.FrameType() == VideoFrameType::kVideoFrameKey &&
+ frame.EncodedImage()._encodedWidth == 0 &&
+ frame.EncodedImage()._encodedHeight == 0;
+}
+
+std::string OptionalDelayToLogString(const absl::optional<TimeDelta> opt) {
+ return opt.has_value() ? ToLogString(*opt) : "<unset>";
+}
+
+} // namespace
+
+TimeDelta DetermineMaxWaitForFrame(TimeDelta rtp_history, bool is_keyframe) {
+ // A (arbitrary) conversion factor between the remotely signalled NACK buffer
+ // time (if not present defaults to 1000ms) and the maximum time we wait for a
+ // remote frame. Chosen to not change existing defaults when using not
+ // rtx-time.
+ const int conversion_factor = 3;
+ if (rtp_history > TimeDelta::Zero() &&
+ conversion_factor * rtp_history < kMaxWaitForFrame) {
+ return is_keyframe ? rtp_history : conversion_factor * rtp_history;
+ }
+ return is_keyframe ? kMaxWaitForKeyFrame : kMaxWaitForFrame;
+}
+
+VideoReceiveStream2::VideoReceiveStream2(
+ TaskQueueFactory* task_queue_factory,
+ Call* call,
+ int num_cpu_cores,
+ PacketRouter* packet_router,
+ VideoReceiveStreamInterface::Config config,
+ CallStats* call_stats,
+ Clock* clock,
+ std::unique_ptr<VCMTiming> timing,
+ NackPeriodicProcessor* nack_periodic_processor,
+ DecodeSynchronizer* decode_sync,
+ RtcEventLog* event_log)
+ : task_queue_factory_(task_queue_factory),
+ transport_adapter_(config.rtcp_send_transport),
+ config_(std::move(config)),
+ num_cpu_cores_(num_cpu_cores),
+ call_(call),
+ clock_(clock),
+ call_stats_(call_stats),
+ source_tracker_(clock_),
+ stats_proxy_(remote_ssrc(), clock_, call->worker_thread()),
+ rtp_receive_statistics_(ReceiveStatistics::Create(clock_)),
+ timing_(std::move(timing)),
+ video_receiver_(clock_, timing_.get(), call->trials()),
+ rtp_video_stream_receiver_(call->worker_thread(),
+ clock_,
+ &transport_adapter_,
+ call_stats->AsRtcpRttStats(),
+ packet_router,
+ &config_,
+ rtp_receive_statistics_.get(),
+ &stats_proxy_,
+ &stats_proxy_,
+ nack_periodic_processor,
+ &stats_proxy_,
+ this, // OnCompleteFrameCallback
+ std::move(config_.frame_decryptor),
+ std::move(config_.frame_transformer),
+ call->trials(),
+ event_log),
+ rtp_stream_sync_(call->worker_thread(), this),
+ max_wait_for_keyframe_(DetermineMaxWaitForFrame(
+ TimeDelta::Millis(config_.rtp.nack.rtp_history_ms),
+ true)),
+ max_wait_for_frame_(DetermineMaxWaitForFrame(
+ TimeDelta::Millis(config_.rtp.nack.rtp_history_ms),
+ false)),
+ decode_queue_(task_queue_factory_->CreateTaskQueue(
+ "DecodingQueue",
+ TaskQueueFactory::Priority::HIGH)) {
+ RTC_LOG(LS_INFO) << "VideoReceiveStream2: " << config_.ToString();
+
+ RTC_DCHECK(call_->worker_thread());
+ RTC_DCHECK(config_.renderer);
+ RTC_DCHECK(call_stats_);
+ packet_sequence_checker_.Detach();
+
+ RTC_DCHECK(!config_.decoders.empty());
+ RTC_CHECK(config_.decoder_factory);
+ std::set<int> decoder_payload_types;
+ for (const Decoder& decoder : config_.decoders) {
+ RTC_CHECK(decoder_payload_types.find(decoder.payload_type) ==
+ decoder_payload_types.end())
+ << "Duplicate payload type (" << decoder.payload_type
+ << ") for different decoders.";
+ decoder_payload_types.insert(decoder.payload_type);
+ }
+
+ timing_->set_render_delay(TimeDelta::Millis(config_.render_delay_ms));
+
+ std::unique_ptr<FrameDecodeScheduler> scheduler =
+ decode_sync ? decode_sync->CreateSynchronizedFrameScheduler()
+ : std::make_unique<TaskQueueFrameDecodeScheduler>(
+ clock, call_->worker_thread());
+ buffer_ = std::make_unique<VideoStreamBufferController>(
+ clock_, call_->worker_thread(), timing_.get(), &stats_proxy_, this,
+ max_wait_for_keyframe_, max_wait_for_frame_, std::move(scheduler),
+ call_->trials());
+
+ if (rtx_ssrc()) {
+ rtx_receive_stream_ = std::make_unique<RtxReceiveStream>(
+ &rtp_video_stream_receiver_,
+ std::move(config_.rtp.rtx_associated_payload_types), remote_ssrc(),
+ rtp_receive_statistics_.get());
+ } else {
+ rtp_receive_statistics_->EnableRetransmitDetection(remote_ssrc(), true);
+ }
+}
+
+VideoReceiveStream2::~VideoReceiveStream2() {
+ RTC_DCHECK_RUN_ON(&worker_sequence_checker_);
+ RTC_LOG(LS_INFO) << "~VideoReceiveStream2: " << config_.ToString();
+ RTC_DCHECK(!media_receiver_);
+ RTC_DCHECK(!rtx_receiver_);
+ Stop();
+}
+
+void VideoReceiveStream2::RegisterWithTransport(
+ RtpStreamReceiverControllerInterface* receiver_controller) {
+ RTC_DCHECK_RUN_ON(&packet_sequence_checker_);
+ RTC_DCHECK(!media_receiver_);
+ RTC_DCHECK(!rtx_receiver_);
+
+ // Register with RtpStreamReceiverController.
+ media_receiver_ = receiver_controller->CreateReceiver(
+ remote_ssrc(), &rtp_video_stream_receiver_);
+ if (rtx_ssrc()) {
+ RTC_DCHECK(rtx_receive_stream_);
+ rtx_receiver_ = receiver_controller->CreateReceiver(
+ rtx_ssrc(), rtx_receive_stream_.get());
+ }
+}
+
+void VideoReceiveStream2::UnregisterFromTransport() {
+ RTC_DCHECK_RUN_ON(&packet_sequence_checker_);
+ media_receiver_.reset();
+ rtx_receiver_.reset();
+}
+
+const std::string& VideoReceiveStream2::sync_group() const {
+ RTC_DCHECK_RUN_ON(&packet_sequence_checker_);
+ return config_.sync_group;
+}
+
+void VideoReceiveStream2::SignalNetworkState(NetworkState state) {
+ RTC_DCHECK_RUN_ON(&worker_sequence_checker_);
+ rtp_video_stream_receiver_.SignalNetworkState(state);
+}
+
+bool VideoReceiveStream2::DeliverRtcp(const uint8_t* packet, size_t length) {
+ RTC_DCHECK_RUN_ON(&packet_sequence_checker_);
+ return rtp_video_stream_receiver_.DeliverRtcp(packet, length);
+}
+
+void VideoReceiveStream2::SetSync(Syncable* audio_syncable) {
+ RTC_DCHECK_RUN_ON(&packet_sequence_checker_);
+ rtp_stream_sync_.ConfigureSync(audio_syncable);
+}
+
+void VideoReceiveStream2::SetLocalSsrc(uint32_t local_ssrc) {
+ RTC_DCHECK_RUN_ON(&packet_sequence_checker_);
+ if (config_.rtp.local_ssrc == local_ssrc)
+ return;
+
+ // TODO(tommi): Make sure we don't rely on local_ssrc via the config struct.
+ const_cast<uint32_t&>(config_.rtp.local_ssrc) = local_ssrc;
+ rtp_video_stream_receiver_.OnLocalSsrcChange(local_ssrc);
+}
+
+void VideoReceiveStream2::Start() {
+ RTC_DCHECK_RUN_ON(&worker_sequence_checker_);
+
+ if (decoder_running_) {
+ return;
+ }
+
+ const bool protected_by_fec =
+ config_.rtp.protected_by_flexfec ||
+ rtp_video_stream_receiver_.ulpfec_payload_type() != -1;
+
+ if (config_.rtp.nack.rtp_history_ms > 0 && protected_by_fec) {
+ buffer_->SetProtectionMode(kProtectionNackFEC);
+ }
+
+ transport_adapter_.Enable();
+ rtc::VideoSinkInterface<VideoFrame>* renderer = nullptr;
+ if (config_.enable_prerenderer_smoothing) {
+ incoming_video_stream_.reset(new IncomingVideoStream(
+ task_queue_factory_, config_.render_delay_ms, this));
+ renderer = incoming_video_stream_.get();
+ } else {
+ renderer = this;
+ }
+
+ for (const Decoder& decoder : config_.decoders) {
+ VideoDecoder::Settings settings;
+ settings.set_codec_type(
+ PayloadStringToCodecType(decoder.video_format.name));
+ settings.set_max_render_resolution(
+ InitialDecoderResolution(call_->trials()));
+ settings.set_number_of_cores(num_cpu_cores_);
+
+ const bool raw_payload =
+ config_.rtp.raw_payload_types.count(decoder.payload_type) > 0;
+ {
+ // TODO(bugs.webrtc.org/11993): Make this call on the network thread.
+ RTC_DCHECK_RUN_ON(&packet_sequence_checker_);
+ rtp_video_stream_receiver_.AddReceiveCodec(
+ decoder.payload_type, settings.codec_type(),
+ decoder.video_format.parameters, raw_payload);
+ }
+ video_receiver_.RegisterReceiveCodec(decoder.payload_type, settings);
+ }
+
+ RTC_DCHECK(renderer != nullptr);
+ video_stream_decoder_.reset(
+ new VideoStreamDecoder(&video_receiver_, &stats_proxy_, renderer));
+
+ // Make sure we register as a stats observer *after* we've prepared the
+ // `video_stream_decoder_`.
+ call_stats_->RegisterStatsObserver(this);
+
+ // Start decoding on task queue.
+ stats_proxy_.DecoderThreadStarting();
+ decode_queue_.PostTask([this] {
+ RTC_DCHECK_RUN_ON(&decode_queue_);
+ decoder_stopped_ = false;
+ });
+ buffer_->StartNextDecode(true);
+ decoder_running_ = true;
+
+ {
+ // TODO(bugs.webrtc.org/11993): Make this call on the network thread.
+ RTC_DCHECK_RUN_ON(&packet_sequence_checker_);
+ rtp_video_stream_receiver_.StartReceive();
+ }
+}
+
+void VideoReceiveStream2::Stop() {
+ RTC_DCHECK_RUN_ON(&worker_sequence_checker_);
+
+ // TODO(bugs.webrtc.org/11993): Make this call on the network thread.
+ // Also call `GetUniqueFramesSeen()` at the same time (since it's a counter
+ // that's updated on the network thread).
+ RTC_DCHECK_RUN_ON(&packet_sequence_checker_);
+ rtp_video_stream_receiver_.StopReceive();
+
+ stats_proxy_.OnUniqueFramesCounted(
+ rtp_video_stream_receiver_.GetUniqueFramesSeen());
+
+ buffer_->Stop();
+ call_stats_->DeregisterStatsObserver(this);
+
+ if (decoder_running_) {
+ rtc::Event done;
+ decode_queue_.PostTask([this, &done] {
+ RTC_DCHECK_RUN_ON(&decode_queue_);
+ // Set `decoder_stopped_` before deregistering all decoders. This means
+ // that any pending encoded frame will return early without trying to
+ // access the decoder database.
+ decoder_stopped_ = true;
+ for (const Decoder& decoder : config_.decoders) {
+ video_receiver_.RegisterExternalDecoder(nullptr, decoder.payload_type);
+ }
+ done.Set();
+ });
+ done.Wait(rtc::Event::kForever);
+
+ decoder_running_ = false;
+ stats_proxy_.DecoderThreadStopped();
+
+ UpdateHistograms();
+ }
+
+ // TODO(bugs.webrtc.org/11993): Make these calls on the network thread.
+ RTC_DCHECK_RUN_ON(&packet_sequence_checker_);
+ rtp_video_stream_receiver_.RemoveReceiveCodecs();
+ video_receiver_.DeregisterReceiveCodecs();
+
+ video_stream_decoder_.reset();
+ incoming_video_stream_.reset();
+ transport_adapter_.Disable();
+}
+
+void VideoReceiveStream2::SetRtpExtensions(
+ std::vector<RtpExtension> extensions) {
+ RTC_DCHECK_RUN_ON(&packet_sequence_checker_);
+ rtp_video_stream_receiver_.SetRtpExtensions(extensions);
+ // TODO(tommi): We don't use the `c.rtp.extensions` member in the
+ // VideoReceiveStream2 class, so this const_cast<> is a temporary hack to keep
+ // things consistent between VideoReceiveStream2 and RtpVideoStreamReceiver2
+ // for debugging purposes. The `packet_sequence_checker_` gives us assurances
+ // that from a threading perspective, this is still safe. The accessors that
+ // give read access to this state, run behind the same check.
+ // The alternative to the const_cast<> would be to make `config_` non-const
+ // and guarded by `packet_sequence_checker_`. However the scope of that state
+ // is huge (the whole Config struct), and would require all methods that touch
+ // the struct to abide the needs of the `extensions` member.
+ const_cast<std::vector<RtpExtension>&>(config_.rtp.extensions) =
+ std::move(extensions);
+}
+
+RtpHeaderExtensionMap VideoReceiveStream2::GetRtpExtensionMap() const {
+ RTC_DCHECK_RUN_ON(&packet_sequence_checker_);
+ return rtp_video_stream_receiver_.GetRtpExtensions();
+}
+
+void VideoReceiveStream2::SetRtcpMode(RtcpMode mode) {
+ RTC_DCHECK_RUN_ON(&packet_sequence_checker_);
+ // TODO(tommi): Stop using the config struct for the internal state.
+ const_cast<RtcpMode&>(config_.rtp.rtcp_mode) = mode;
+ rtp_video_stream_receiver_.SetRtcpMode(mode);
+}
+
+void VideoReceiveStream2::SetFlexFecProtection(
+ RtpPacketSinkInterface* flexfec_sink) {
+ RTC_DCHECK_RUN_ON(&packet_sequence_checker_);
+ rtp_video_stream_receiver_.SetPacketSink(flexfec_sink);
+ // TODO(tommi): Stop using the config struct for the internal state.
+ const_cast<RtpPacketSinkInterface*&>(config_.rtp.packet_sink_) = flexfec_sink;
+ const_cast<bool&>(config_.rtp.protected_by_flexfec) =
+ (flexfec_sink != nullptr);
+}
+
+void VideoReceiveStream2::SetLossNotificationEnabled(bool enabled) {
+ RTC_DCHECK_RUN_ON(&packet_sequence_checker_);
+ // TODO(tommi): Stop using the config struct for the internal state.
+ const_cast<bool&>(config_.rtp.lntf.enabled) = enabled;
+ rtp_video_stream_receiver_.SetLossNotificationEnabled(enabled);
+}
+
+void VideoReceiveStream2::SetNackHistory(TimeDelta history) {
+ RTC_DCHECK_RUN_ON(&packet_sequence_checker_);
+ RTC_DCHECK_GE(history.ms(), 0);
+
+ if (config_.rtp.nack.rtp_history_ms == history.ms())
+ return;
+
+ // TODO(tommi): Stop using the config struct for the internal state.
+ const_cast<int&>(config_.rtp.nack.rtp_history_ms) = history.ms();
+
+ const bool protected_by_fec =
+ config_.rtp.protected_by_flexfec ||
+ rtp_video_stream_receiver_.ulpfec_payload_type() != -1;
+
+ buffer_->SetProtectionMode(history.ms() > 0 && protected_by_fec
+ ? kProtectionNackFEC
+ : kProtectionNack);
+
+ rtp_video_stream_receiver_.SetNackHistory(history);
+ TimeDelta max_wait_for_keyframe = DetermineMaxWaitForFrame(history, true);
+ TimeDelta max_wait_for_frame = DetermineMaxWaitForFrame(history, false);
+
+ max_wait_for_keyframe_ = max_wait_for_keyframe;
+ max_wait_for_frame_ = max_wait_for_frame;
+
+ buffer_->SetMaxWaits(max_wait_for_keyframe, max_wait_for_frame);
+}
+
+void VideoReceiveStream2::SetProtectionPayloadTypes(int red_payload_type,
+ int ulpfec_payload_type) {
+ RTC_DCHECK_RUN_ON(&packet_sequence_checker_);
+ rtp_video_stream_receiver_.SetProtectionPayloadTypes(red_payload_type,
+ ulpfec_payload_type);
+}
+
+void VideoReceiveStream2::SetRtcpXr(Config::Rtp::RtcpXr rtcp_xr) {
+ RTC_DCHECK_RUN_ON(&packet_sequence_checker_);
+ rtp_video_stream_receiver_.SetReferenceTimeReport(
+ rtcp_xr.receiver_reference_time_report);
+}
+
+void VideoReceiveStream2::SetAssociatedPayloadTypes(
+ std::map<int, int> associated_payload_types) {
+ RTC_DCHECK_RUN_ON(&packet_sequence_checker_);
+
+ // For setting the associated payload types after construction, we currently
+ // assume that the rtx_ssrc cannot change. In such a case we can know that
+ // if the ssrc is non-0, a `rtx_receive_stream_` instance has previously been
+ // created and configured (and is referenced by `rtx_receiver_`) and we can
+ // simply reconfigure it.
+ // If rtx_ssrc is 0 however, we ignore this call.
+ if (!rtx_ssrc())
+ return;
+
+ rtx_receive_stream_->SetAssociatedPayloadTypes(
+ std::move(associated_payload_types));
+}
+
+void VideoReceiveStream2::CreateAndRegisterExternalDecoder(
+ const Decoder& decoder) {
+ TRACE_EVENT0("webrtc",
+ "VideoReceiveStream2::CreateAndRegisterExternalDecoder");
+ std::unique_ptr<VideoDecoder> video_decoder =
+ config_.decoder_factory->CreateVideoDecoder(decoder.video_format);
+ // If we still have no valid decoder, we have to create a "Null" decoder
+ // that ignores all calls. The reason we can get into this state is that the
+ // old decoder factory interface doesn't have a way to query supported
+ // codecs.
+ if (!video_decoder) {
+ video_decoder = std::make_unique<NullVideoDecoder>();
+ }
+
+ std::string decoded_output_file =
+ call_->trials().Lookup("WebRTC-DecoderDataDumpDirectory");
+ // Because '/' can't be used inside a field trial parameter, we use ';'
+ // instead.
+ // This is only relevant to WebRTC-DecoderDataDumpDirectory
+ // field trial. ';' is chosen arbitrary. Even though it's a legal character
+ // in some file systems, we can sacrifice ability to use it in the path to
+ // dumped video, since it's developers-only feature for debugging.
+ absl::c_replace(decoded_output_file, ';', '/');
+ if (!decoded_output_file.empty()) {
+ char filename_buffer[256];
+ rtc::SimpleStringBuilder ssb(filename_buffer);
+ ssb << decoded_output_file << "/webrtc_receive_stream_" << remote_ssrc()
+ << "-" << rtc::TimeMicros() << ".ivf";
+ video_decoder = CreateFrameDumpingDecoderWrapper(
+ std::move(video_decoder), FileWrapper::OpenWriteOnly(ssb.str()));
+ }
+
+ video_receiver_.RegisterExternalDecoder(std::move(video_decoder),
+ decoder.payload_type);
+}
+
+VideoReceiveStreamInterface::Stats VideoReceiveStream2::GetStats() const {
+ RTC_DCHECK_RUN_ON(&worker_sequence_checker_);
+ VideoReceiveStream2::Stats stats = stats_proxy_.GetStats();
+ stats.total_bitrate_bps = 0;
+ StreamStatistician* statistician =
+ rtp_receive_statistics_->GetStatistician(stats.ssrc);
+ if (statistician) {
+ stats.rtp_stats = statistician->GetStats();
+ stats.total_bitrate_bps = statistician->BitrateReceived();
+ }
+ if (rtx_ssrc()) {
+ StreamStatistician* rtx_statistician =
+ rtp_receive_statistics_->GetStatistician(rtx_ssrc());
+ if (rtx_statistician)
+ stats.total_bitrate_bps += rtx_statistician->BitrateReceived();
+ }
+
+ // Mozilla modification: VideoReceiveStream2 and friends do not surface RTCP
+ // stats at all, and even on the most recent libwebrtc code there does not
+ // seem to be any support for these stats right now. So, we hack this in.
+ rtp_video_stream_receiver_.RemoteRTCPSenderInfo(
+ &stats.rtcp_sender_packets_sent, &stats.rtcp_sender_octets_sent,
+ &stats.rtcp_sender_ntp_timestamp_ms,
+ &stats.rtcp_sender_remote_ntp_timestamp_ms);
+
+ return stats;
+}
+
+void VideoReceiveStream2::UpdateHistograms() {
+ RTC_DCHECK_RUN_ON(&worker_sequence_checker_);
+ absl::optional<int> fraction_lost;
+ StreamDataCounters rtp_stats;
+ StreamStatistician* statistician =
+ rtp_receive_statistics_->GetStatistician(remote_ssrc());
+ if (statistician) {
+ fraction_lost = statistician->GetFractionLostInPercent();
+ rtp_stats = statistician->GetReceiveStreamDataCounters();
+ }
+ if (rtx_ssrc()) {
+ StreamStatistician* rtx_statistician =
+ rtp_receive_statistics_->GetStatistician(rtx_ssrc());
+ if (rtx_statistician) {
+ StreamDataCounters rtx_stats =
+ rtx_statistician->GetReceiveStreamDataCounters();
+ stats_proxy_.UpdateHistograms(fraction_lost, rtp_stats, &rtx_stats);
+ return;
+ }
+ }
+ stats_proxy_.UpdateHistograms(fraction_lost, rtp_stats, nullptr);
+}
+
+bool VideoReceiveStream2::SetBaseMinimumPlayoutDelayMs(int delay_ms) {
+ RTC_DCHECK_RUN_ON(&worker_sequence_checker_);
+ TimeDelta delay = TimeDelta::Millis(delay_ms);
+ if (delay < kMinBaseMinimumDelay || delay > kMaxBaseMinimumDelay) {
+ return false;
+ }
+
+ base_minimum_playout_delay_ = delay;
+ UpdatePlayoutDelays();
+ return true;
+}
+
+int VideoReceiveStream2::GetBaseMinimumPlayoutDelayMs() const {
+ RTC_DCHECK_RUN_ON(&worker_sequence_checker_);
+ constexpr TimeDelta kDefaultBaseMinPlayoutDelay = TimeDelta::Millis(-1);
+ // Unset must be -1.
+ static_assert(-1 == kDefaultBaseMinPlayoutDelay.ms(), "");
+ return base_minimum_playout_delay_.value_or(kDefaultBaseMinPlayoutDelay).ms();
+}
+
+void VideoReceiveStream2::OnFrame(const VideoFrame& video_frame) {
+ source_tracker_.OnFrameDelivered(video_frame.packet_infos());
+ config_.renderer->OnFrame(video_frame);
+
+ // TODO(bugs.webrtc.org/10739): we should set local capture clock offset for
+ // `video_frame.packet_infos`. But VideoFrame is const qualified here.
+
+ // For frame delay metrics, calculated in `OnRenderedFrame`, to better reflect
+ // user experience measurements must be done as close as possible to frame
+ // rendering moment. Capture current time, which is used for calculation of
+ // delay metrics in `OnRenderedFrame`, right after frame is passed to
+ // renderer. Frame may or may be not rendered by this time. This results in
+ // inaccuracy but is still the best we can do in the absence of "frame
+ // rendered" callback from the renderer.
+ VideoFrameMetaData frame_meta(video_frame, clock_->CurrentTime());
+ call_->worker_thread()->PostTask(
+ SafeTask(task_safety_.flag(), [frame_meta, this]() {
+ RTC_DCHECK_RUN_ON(&worker_sequence_checker_);
+ int64_t video_playout_ntp_ms;
+ int64_t sync_offset_ms;
+ double estimated_freq_khz;
+ if (rtp_stream_sync_.GetStreamSyncOffsetInMs(
+ frame_meta.rtp_timestamp, frame_meta.render_time_ms(),
+ &video_playout_ntp_ms, &sync_offset_ms, &estimated_freq_khz)) {
+ stats_proxy_.OnSyncOffsetUpdated(video_playout_ntp_ms, sync_offset_ms,
+ estimated_freq_khz);
+ }
+ stats_proxy_.OnRenderedFrame(frame_meta);
+ }));
+
+ webrtc::MutexLock lock(&pending_resolution_mutex_);
+ if (pending_resolution_.has_value()) {
+ if (!pending_resolution_->empty() &&
+ (video_frame.width() != static_cast<int>(pending_resolution_->width) ||
+ video_frame.height() !=
+ static_cast<int>(pending_resolution_->height))) {
+ RTC_LOG(LS_WARNING)
+ << "Recordable encoded frame stream resolution was reported as "
+ << pending_resolution_->width << "x" << pending_resolution_->height
+ << " but the stream is now " << video_frame.width()
+ << video_frame.height();
+ }
+ pending_resolution_ = RecordableEncodedFrame::EncodedResolution{
+ static_cast<unsigned>(video_frame.width()),
+ static_cast<unsigned>(video_frame.height())};
+ }
+}
+
+void VideoReceiveStream2::SetFrameDecryptor(
+ rtc::scoped_refptr<webrtc::FrameDecryptorInterface> frame_decryptor) {
+ rtp_video_stream_receiver_.SetFrameDecryptor(std::move(frame_decryptor));
+}
+
+void VideoReceiveStream2::SetDepacketizerToDecoderFrameTransformer(
+ rtc::scoped_refptr<FrameTransformerInterface> frame_transformer) {
+ rtp_video_stream_receiver_.SetDepacketizerToDecoderFrameTransformer(
+ std::move(frame_transformer));
+}
+
+void VideoReceiveStream2::RequestKeyFrame(Timestamp now) {
+ RTC_DCHECK_RUN_ON(&worker_sequence_checker_);
+ // Called from RtpVideoStreamReceiver (rtp_video_stream_receiver_ is
+ // ultimately responsible).
+ rtp_video_stream_receiver_.RequestKeyFrame();
+ last_keyframe_request_ = now;
+}
+
+void VideoReceiveStream2::OnCompleteFrame(std::unique_ptr<EncodedFrame> frame) {
+ RTC_DCHECK_RUN_ON(&worker_sequence_checker_);
+
+ const VideoPlayoutDelay& playout_delay = frame->EncodedImage().playout_delay_;
+ if (playout_delay.min_ms >= 0) {
+ frame_minimum_playout_delay_ = TimeDelta::Millis(playout_delay.min_ms);
+ UpdatePlayoutDelays();
+ }
+ if (playout_delay.max_ms >= 0) {
+ frame_maximum_playout_delay_ = TimeDelta::Millis(playout_delay.max_ms);
+ UpdatePlayoutDelays();
+ }
+
+ auto last_continuous_pid = buffer_->InsertFrame(std::move(frame));
+ if (last_continuous_pid.has_value()) {
+ {
+ // TODO(bugs.webrtc.org/11993): Call on the network thread.
+ RTC_DCHECK_RUN_ON(&packet_sequence_checker_);
+ rtp_video_stream_receiver_.FrameContinuous(*last_continuous_pid);
+ }
+ }
+}
+
+void VideoReceiveStream2::OnRttUpdate(int64_t avg_rtt_ms, int64_t max_rtt_ms) {
+ RTC_DCHECK_RUN_ON(&worker_sequence_checker_);
+ // TODO(bugs.webrtc.org/13757): Replace with TimeDelta.
+ buffer_->UpdateRtt(max_rtt_ms);
+ rtp_video_stream_receiver_.UpdateRtt(max_rtt_ms);
+ stats_proxy_.OnRttUpdate(avg_rtt_ms);
+}
+
+uint32_t VideoReceiveStream2::id() const {
+ RTC_DCHECK_RUN_ON(&worker_sequence_checker_);
+ return remote_ssrc();
+}
+
+absl::optional<Syncable::Info> VideoReceiveStream2::GetInfo() const {
+ RTC_DCHECK_RUN_ON(&packet_sequence_checker_);
+ absl::optional<Syncable::Info> info =
+ rtp_video_stream_receiver_.GetSyncInfo();
+
+ if (!info)
+ return absl::nullopt;
+
+ info->current_delay_ms = timing_->TargetVideoDelay().ms();
+ return info;
+}
+
+bool VideoReceiveStream2::GetPlayoutRtpTimestamp(uint32_t* rtp_timestamp,
+ int64_t* time_ms) const {
+ RTC_DCHECK_NOTREACHED();
+ return false;
+}
+
+void VideoReceiveStream2::SetEstimatedPlayoutNtpTimestampMs(
+ int64_t ntp_timestamp_ms,
+ int64_t time_ms) {
+ RTC_DCHECK_NOTREACHED();
+}
+
+bool VideoReceiveStream2::SetMinimumPlayoutDelay(int delay_ms) {
+ RTC_DCHECK_RUN_ON(&worker_sequence_checker_);
+ syncable_minimum_playout_delay_ = TimeDelta::Millis(delay_ms);
+ UpdatePlayoutDelays();
+ return true;
+}
+
+void VideoReceiveStream2::OnEncodedFrame(std::unique_ptr<EncodedFrame> frame) {
+ RTC_DCHECK_RUN_ON(&packet_sequence_checker_);
+ Timestamp now = clock_->CurrentTime();
+ const bool keyframe_request_is_due =
+ !last_keyframe_request_ ||
+ now >= (*last_keyframe_request_ + max_wait_for_keyframe_);
+ const bool received_frame_is_keyframe =
+ frame->FrameType() == VideoFrameType::kVideoFrameKey;
+
+ // Current OnPreDecode only cares about QP for VP8.
+ int qp = -1;
+ if (frame->CodecSpecific()->codecType == kVideoCodecVP8) {
+ if (!vp8::GetQp(frame->data(), frame->size(), &qp)) {
+ RTC_LOG(LS_WARNING) << "Failed to extract QP from VP8 video frame";
+ }
+ }
+ stats_proxy_.OnPreDecode(frame->CodecSpecific()->codecType, qp);
+
+ decode_queue_.PostTask([this, now, keyframe_request_is_due,
+ received_frame_is_keyframe, frame = std::move(frame),
+ keyframe_required = keyframe_required_]() mutable {
+ RTC_DCHECK_RUN_ON(&decode_queue_);
+ if (decoder_stopped_)
+ return;
+ DecodeFrameResult result = HandleEncodedFrameOnDecodeQueue(
+ std::move(frame), keyframe_request_is_due, keyframe_required);
+
+ // TODO(bugs.webrtc.org/11993): Make this PostTask to the network thread.
+ call_->worker_thread()->PostTask(
+ SafeTask(task_safety_.flag(),
+ [this, now, result = std::move(result),
+ received_frame_is_keyframe, keyframe_request_is_due]() {
+ RTC_DCHECK_RUN_ON(&packet_sequence_checker_);
+ keyframe_required_ = result.keyframe_required;
+
+ if (result.decoded_frame_picture_id) {
+ rtp_video_stream_receiver_.FrameDecoded(
+ *result.decoded_frame_picture_id);
+ }
+
+ HandleKeyFrameGeneration(received_frame_is_keyframe, now,
+ result.force_request_key_frame,
+ keyframe_request_is_due);
+ buffer_->StartNextDecode(keyframe_required_);
+ }));
+ });
+}
+
+void VideoReceiveStream2::OnDecodableFrameTimeout(TimeDelta wait) {
+ RTC_DCHECK_RUN_ON(&packet_sequence_checker_);
+ Timestamp now = clock_->CurrentTime();
+
+ absl::optional<int64_t> last_packet_ms =
+ rtp_video_stream_receiver_.LastReceivedPacketMs();
+
+ // To avoid spamming keyframe requests for a stream that is not active we
+ // check if we have received a packet within the last 5 seconds.
+ constexpr TimeDelta kInactiveDuration = TimeDelta::Seconds(5);
+ const bool stream_is_active =
+ last_packet_ms &&
+ now - Timestamp::Millis(*last_packet_ms) < kInactiveDuration;
+ if (!stream_is_active)
+ stats_proxy_.OnStreamInactive();
+
+ if (stream_is_active && !IsReceivingKeyFrame(now) &&
+ (!config_.crypto_options.sframe.require_frame_encryption ||
+ rtp_video_stream_receiver_.IsDecryptable())) {
+ RTC_LOG(LS_WARNING) << "No decodable frame in " << wait
+ << ", requesting keyframe.";
+ RequestKeyFrame(now);
+ }
+
+ buffer_->StartNextDecode(keyframe_required_);
+}
+
+VideoReceiveStream2::DecodeFrameResult
+VideoReceiveStream2::HandleEncodedFrameOnDecodeQueue(
+ std::unique_ptr<EncodedFrame> frame,
+ bool keyframe_request_is_due,
+ bool keyframe_required) {
+ RTC_DCHECK_RUN_ON(&decode_queue_);
+
+ bool force_request_key_frame = false;
+ absl::optional<int64_t> decoded_frame_picture_id;
+
+ if (!video_receiver_.IsExternalDecoderRegistered(frame->PayloadType())) {
+ // Look for the decoder with this payload type.
+ for (const Decoder& decoder : config_.decoders) {
+ if (decoder.payload_type == frame->PayloadType()) {
+ CreateAndRegisterExternalDecoder(decoder);
+ break;
+ }
+ }
+ }
+
+ int64_t frame_id = frame->Id();
+ int decode_result = DecodeAndMaybeDispatchEncodedFrame(std::move(frame));
+ if (decode_result == WEBRTC_VIDEO_CODEC_OK ||
+ decode_result == WEBRTC_VIDEO_CODEC_OK_REQUEST_KEYFRAME) {
+ keyframe_required = false;
+ frame_decoded_ = true;
+
+ decoded_frame_picture_id = frame_id;
+
+ if (decode_result == WEBRTC_VIDEO_CODEC_OK_REQUEST_KEYFRAME)
+ force_request_key_frame = true;
+ } else if (!frame_decoded_ || !keyframe_required || keyframe_request_is_due) {
+ keyframe_required = true;
+ // TODO(philipel): Remove this keyframe request when downstream project
+ // has been fixed.
+ force_request_key_frame = true;
+ }
+
+ return DecodeFrameResult{
+ .force_request_key_frame = force_request_key_frame,
+ .decoded_frame_picture_id = std::move(decoded_frame_picture_id),
+ .keyframe_required = keyframe_required,
+ };
+}
+
+int VideoReceiveStream2::DecodeAndMaybeDispatchEncodedFrame(
+ std::unique_ptr<EncodedFrame> frame) {
+ RTC_DCHECK_RUN_ON(&decode_queue_);
+
+ // If `buffered_encoded_frames_` grows out of control (=60 queued frames),
+ // maybe due to a stuck decoder, we just halt the process here and log the
+ // error.
+ const bool encoded_frame_output_enabled =
+ encoded_frame_buffer_function_ != nullptr &&
+ buffered_encoded_frames_.size() < kBufferedEncodedFramesMaxSize;
+ EncodedFrame* frame_ptr = frame.get();
+ if (encoded_frame_output_enabled) {
+ // If we receive a key frame with unset resolution, hold on dispatching the
+ // frame and following ones until we know a resolution of the stream.
+ // NOTE: The code below has a race where it can report the wrong
+ // resolution for keyframes after an initial keyframe of other resolution.
+ // However, the only known consumer of this information is the W3C
+ // MediaRecorder and it will only use the resolution in the first encoded
+ // keyframe from WebRTC, so misreporting is fine.
+ buffered_encoded_frames_.push_back(std::move(frame));
+ if (buffered_encoded_frames_.size() == kBufferedEncodedFramesMaxSize)
+ RTC_LOG(LS_ERROR) << "About to halt recordable encoded frame output due "
+ "to too many buffered frames.";
+
+ webrtc::MutexLock lock(&pending_resolution_mutex_);
+ if (IsKeyFrameAndUnspecifiedResolution(*frame_ptr) &&
+ !pending_resolution_.has_value())
+ pending_resolution_.emplace();
+ }
+
+ int decode_result = video_receiver_.Decode(frame_ptr);
+ if (encoded_frame_output_enabled) {
+ absl::optional<RecordableEncodedFrame::EncodedResolution>
+ pending_resolution;
+ {
+ // Fish out `pending_resolution_` to avoid taking the mutex on every lap
+ // or dispatching under the mutex in the flush loop.
+ webrtc::MutexLock lock(&pending_resolution_mutex_);
+ if (pending_resolution_.has_value())
+ pending_resolution = *pending_resolution_;
+ }
+ if (!pending_resolution.has_value() || !pending_resolution->empty()) {
+ // Flush the buffered frames.
+ for (const auto& frame : buffered_encoded_frames_) {
+ RecordableEncodedFrame::EncodedResolution resolution{
+ frame->EncodedImage()._encodedWidth,
+ frame->EncodedImage()._encodedHeight};
+ if (IsKeyFrameAndUnspecifiedResolution(*frame)) {
+ RTC_DCHECK(!pending_resolution->empty());
+ resolution = *pending_resolution;
+ }
+ encoded_frame_buffer_function_(
+ WebRtcRecordableEncodedFrame(*frame, resolution));
+ }
+ buffered_encoded_frames_.clear();
+ }
+ }
+ return decode_result;
+}
+
+void VideoReceiveStream2::HandleKeyFrameGeneration(
+ bool received_frame_is_keyframe,
+ Timestamp now,
+ bool always_request_key_frame,
+ bool keyframe_request_is_due) {
+ RTC_DCHECK_RUN_ON(&packet_sequence_checker_);
+ bool request_key_frame = always_request_key_frame;
+
+ // Repeat sending keyframe requests if we've requested a keyframe.
+ if (keyframe_generation_requested_) {
+ if (received_frame_is_keyframe) {
+ keyframe_generation_requested_ = false;
+ } else if (keyframe_request_is_due) {
+ if (!IsReceivingKeyFrame(now)) {
+ request_key_frame = true;
+ }
+ } else {
+ // It hasn't been long enough since the last keyframe request, do nothing.
+ }
+ }
+
+ if (request_key_frame) {
+ // HandleKeyFrameGeneration is initiated from the decode thread -
+ // RequestKeyFrame() triggers a call back to the decode thread.
+ // Perhaps there's a way to avoid that.
+ RequestKeyFrame(now);
+ }
+}
+
+bool VideoReceiveStream2::IsReceivingKeyFrame(Timestamp now) const {
+ RTC_DCHECK_RUN_ON(&packet_sequence_checker_);
+ absl::optional<int64_t> last_keyframe_packet_ms =
+ rtp_video_stream_receiver_.LastReceivedKeyframePacketMs();
+
+ // If we recently have been receiving packets belonging to a keyframe then
+ // we assume a keyframe is currently being received.
+ bool receiving_keyframe = last_keyframe_packet_ms &&
+ now - Timestamp::Millis(*last_keyframe_packet_ms) <
+ max_wait_for_keyframe_;
+ return receiving_keyframe;
+}
+
+void VideoReceiveStream2::UpdatePlayoutDelays() const {
+ RTC_DCHECK_RUN_ON(&worker_sequence_checker_);
+ const std::initializer_list<absl::optional<TimeDelta>> min_delays = {
+ frame_minimum_playout_delay_, base_minimum_playout_delay_,
+ syncable_minimum_playout_delay_};
+
+ // Since nullopt < anything, this will return the largest of the minumum
+ // delays, or nullopt if all are nullopt.
+ absl::optional<TimeDelta> minimum_delay = std::max(min_delays);
+ if (minimum_delay) {
+ auto num_playout_delays_set =
+ absl::c_count_if(min_delays, [](auto opt) { return opt.has_value(); });
+ if (num_playout_delays_set > 1 &&
+ timing_->min_playout_delay() != minimum_delay) {
+ RTC_LOG(LS_WARNING)
+ << "Multiple playout delays set. Actual delay value set to "
+ << *minimum_delay << " frame min delay="
+ << OptionalDelayToLogString(frame_maximum_playout_delay_)
+ << " base min delay="
+ << OptionalDelayToLogString(base_minimum_playout_delay_)
+ << " sync min delay="
+ << OptionalDelayToLogString(syncable_minimum_playout_delay_);
+ }
+ timing_->set_min_playout_delay(*minimum_delay);
+ if (frame_minimum_playout_delay_ == TimeDelta::Zero() &&
+ frame_maximum_playout_delay_ > TimeDelta::Zero()) {
+ // TODO(kron): Estimate frame rate from video stream.
+ constexpr Frequency kFrameRate = Frequency::Hertz(60);
+ // Convert playout delay in ms to number of frames.
+ int max_composition_delay_in_frames =
+ std::lrint(*frame_maximum_playout_delay_ * kFrameRate);
+ // Subtract frames in buffer.
+ max_composition_delay_in_frames =
+ std::max(max_composition_delay_in_frames - buffer_->Size(), 0);
+ timing_->SetMaxCompositionDelayInFrames(max_composition_delay_in_frames);
+ }
+ }
+
+ if (frame_maximum_playout_delay_) {
+ timing_->set_max_playout_delay(*frame_maximum_playout_delay_);
+ }
+}
+
+std::vector<webrtc::RtpSource> VideoReceiveStream2::GetSources() const {
+ return source_tracker_.GetSources();
+}
+
+VideoReceiveStream2::RecordingState
+VideoReceiveStream2::SetAndGetRecordingState(RecordingState state,
+ bool generate_key_frame) {
+ RTC_DCHECK_RUN_ON(&worker_sequence_checker_);
+ rtc::Event event;
+
+ // Save old state, set the new state.
+ RecordingState old_state;
+
+ absl::optional<Timestamp> last_keyframe_request;
+ {
+ // TODO(bugs.webrtc.org/11993): Post this to the network thread.
+ RTC_DCHECK_RUN_ON(&packet_sequence_checker_);
+ last_keyframe_request = last_keyframe_request_;
+ last_keyframe_request_ =
+ generate_key_frame
+ ? clock_->CurrentTime()
+ : Timestamp::Millis(state.last_keyframe_request_ms.value_or(0));
+ }
+
+ decode_queue_.PostTask(
+ [this, &event, &old_state, callback = std::move(state.callback),
+ last_keyframe_request = std::move(last_keyframe_request)] {
+ RTC_DCHECK_RUN_ON(&decode_queue_);
+ old_state.callback = std::move(encoded_frame_buffer_function_);
+ encoded_frame_buffer_function_ = std::move(callback);
+
+ old_state.last_keyframe_request_ms =
+ last_keyframe_request.value_or(Timestamp::Zero()).ms();
+
+ event.Set();
+ });
+
+ if (generate_key_frame) {
+ rtp_video_stream_receiver_.RequestKeyFrame();
+ {
+ // TODO(bugs.webrtc.org/11993): Post this to the network thread.
+ RTC_DCHECK_RUN_ON(&packet_sequence_checker_);
+ keyframe_generation_requested_ = true;
+ }
+ }
+
+ event.Wait(rtc::Event::kForever);
+ return old_state;
+}
+
+void VideoReceiveStream2::GenerateKeyFrame() {
+ RTC_DCHECK_RUN_ON(&packet_sequence_checker_);
+ RequestKeyFrame(clock_->CurrentTime());
+ keyframe_generation_requested_ = true;
+}
+
+} // namespace internal
+} // namespace webrtc
diff --git a/third_party/libwebrtc/video/video_receive_stream2.h b/third_party/libwebrtc/video/video_receive_stream2.h
new file mode 100644
index 0000000000..44e2228dab
--- /dev/null
+++ b/third_party/libwebrtc/video/video_receive_stream2.h
@@ -0,0 +1,345 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VIDEO_VIDEO_RECEIVE_STREAM2_H_
+#define VIDEO_VIDEO_RECEIVE_STREAM2_H_
+
+#include <map>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "api/sequence_checker.h"
+#include "api/task_queue/pending_task_safety_flag.h"
+#include "api/task_queue/task_queue_factory.h"
+#include "api/units/time_delta.h"
+#include "api/units/timestamp.h"
+#include "api/video/recordable_encoded_frame.h"
+#include "call/call.h"
+#include "call/rtp_packet_sink_interface.h"
+#include "call/syncable.h"
+#include "call/video_receive_stream.h"
+#include "modules/rtp_rtcp/source/source_tracker.h"
+#include "modules/video_coding/nack_requester.h"
+#include "modules/video_coding/video_receiver2.h"
+#include "rtc_base/system/no_unique_address.h"
+#include "rtc_base/task_queue.h"
+#include "rtc_base/thread_annotations.h"
+#include "system_wrappers/include/clock.h"
+#include "video/receive_statistics_proxy2.h"
+#include "video/rtp_streams_synchronizer2.h"
+#include "video/rtp_video_stream_receiver2.h"
+#include "video/transport_adapter.h"
+#include "video/video_stream_buffer_controller.h"
+#include "video/video_stream_decoder2.h"
+
+namespace webrtc {
+
+class RtpStreamReceiverInterface;
+class RtpStreamReceiverControllerInterface;
+class RtxReceiveStream;
+class VCMTiming;
+
+constexpr TimeDelta kMaxWaitForKeyFrame = TimeDelta::Millis(200);
+constexpr TimeDelta kMaxWaitForFrame = TimeDelta::Seconds(3);
+
+namespace internal {
+
+class CallStats;
+
+// Utility struct for grabbing metadata from a VideoFrame and processing it
+// asynchronously without needing the actual frame data.
+// Additionally the caller can bundle information from the current clock
+// when the metadata is captured, for accurate reporting and not needing
+// multiple calls to clock->Now().
+struct VideoFrameMetaData {
+ VideoFrameMetaData(const webrtc::VideoFrame& frame, Timestamp now)
+ : rtp_timestamp(frame.timestamp()),
+ timestamp_us(frame.timestamp_us()),
+ ntp_time_ms(frame.ntp_time_ms()),
+ width(frame.width()),
+ height(frame.height()),
+ decode_timestamp(now) {}
+
+ int64_t render_time_ms() const {
+ return timestamp_us / rtc::kNumMicrosecsPerMillisec;
+ }
+
+ const uint32_t rtp_timestamp;
+ const int64_t timestamp_us;
+ const int64_t ntp_time_ms;
+ const int width;
+ const int height;
+
+ const Timestamp decode_timestamp;
+};
+
+class VideoReceiveStream2
+ : public webrtc::VideoReceiveStreamInterface,
+ public rtc::VideoSinkInterface<VideoFrame>,
+ public RtpVideoStreamReceiver2::OnCompleteFrameCallback,
+ public Syncable,
+ public CallStatsObserver,
+ public FrameSchedulingReceiver {
+ public:
+ // The maximum number of buffered encoded frames when encoded output is
+ // configured.
+ static constexpr size_t kBufferedEncodedFramesMaxSize = 60;
+
+ VideoReceiveStream2(TaskQueueFactory* task_queue_factory,
+ Call* call,
+ int num_cpu_cores,
+ PacketRouter* packet_router,
+ VideoReceiveStreamInterface::Config config,
+ CallStats* call_stats,
+ Clock* clock,
+ std::unique_ptr<VCMTiming> timing,
+ NackPeriodicProcessor* nack_periodic_processor,
+ DecodeSynchronizer* decode_sync,
+ RtcEventLog* event_log);
+ // Destruction happens on the worker thread. Prior to destruction the caller
+ // must ensure that a registration with the transport has been cleared. See
+ // `RegisterWithTransport` for details.
+ // TODO(tommi): As a further improvement to this, performing the full
+ // destruction on the network thread could be made the default.
+ ~VideoReceiveStream2() override;
+
+ // Called on `packet_sequence_checker_` to register/unregister with the
+ // network transport.
+ void RegisterWithTransport(
+ RtpStreamReceiverControllerInterface* receiver_controller);
+ // If registration has previously been done (via `RegisterWithTransport`) then
+ // `UnregisterFromTransport` must be called prior to destruction, on the
+ // network thread.
+ void UnregisterFromTransport();
+
+ // Accessor for the a/v sync group. This value may change and the caller
+ // must be on the packet delivery thread.
+ const std::string& sync_group() const;
+
+ // Getters for const remote SSRC values that won't change throughout the
+ // object's lifetime.
+ uint32_t remote_ssrc() const { return config_.rtp.remote_ssrc; }
+ uint32_t rtx_ssrc() const { return config_.rtp.rtx_ssrc; }
+
+ void SignalNetworkState(NetworkState state);
+ bool DeliverRtcp(const uint8_t* packet, size_t length);
+
+ void SetSync(Syncable* audio_syncable);
+
+ // Updates the `rtp_video_stream_receiver_`'s `local_ssrc` when the default
+ // sender has been created, changed or removed.
+ void SetLocalSsrc(uint32_t local_ssrc);
+
+ // Implements webrtc::VideoReceiveStreamInterface.
+ void Start() override;
+ void Stop() override;
+
+ void SetRtpExtensions(std::vector<RtpExtension> extensions) override;
+ RtpHeaderExtensionMap GetRtpExtensionMap() const override;
+ void SetRtcpMode(RtcpMode mode) override;
+ void SetFlexFecProtection(RtpPacketSinkInterface* flexfec_sink) override;
+ void SetLossNotificationEnabled(bool enabled) override;
+ void SetNackHistory(TimeDelta history) override;
+ void SetProtectionPayloadTypes(int red_payload_type,
+ int ulpfec_payload_type) override;
+ void SetRtcpXr(Config::Rtp::RtcpXr rtcp_xr) override;
+ void SetAssociatedPayloadTypes(
+ std::map<int, int> associated_payload_types) override;
+
+ webrtc::VideoReceiveStreamInterface::Stats GetStats() const override;
+
+ // SetBaseMinimumPlayoutDelayMs and GetBaseMinimumPlayoutDelayMs are called
+ // from webrtc/api level and requested by user code. For e.g. blink/js layer
+ // in Chromium.
+ bool SetBaseMinimumPlayoutDelayMs(int delay_ms) override;
+ int GetBaseMinimumPlayoutDelayMs() const override;
+
+ void SetFrameDecryptor(
+ rtc::scoped_refptr<FrameDecryptorInterface> frame_decryptor) override;
+ void SetDepacketizerToDecoderFrameTransformer(
+ rtc::scoped_refptr<FrameTransformerInterface> frame_transformer) override;
+
+ // Implements rtc::VideoSinkInterface<VideoFrame>.
+ void OnFrame(const VideoFrame& video_frame) override;
+
+ // Implements RtpVideoStreamReceiver2::OnCompleteFrameCallback.
+ void OnCompleteFrame(std::unique_ptr<EncodedFrame> frame) override;
+
+ // Implements CallStatsObserver::OnRttUpdate
+ void OnRttUpdate(int64_t avg_rtt_ms, int64_t max_rtt_ms) override;
+
+ // Implements Syncable.
+ uint32_t id() const override;
+ absl::optional<Syncable::Info> GetInfo() const override;
+ bool GetPlayoutRtpTimestamp(uint32_t* rtp_timestamp,
+ int64_t* time_ms) const override;
+ void SetEstimatedPlayoutNtpTimestampMs(int64_t ntp_timestamp_ms,
+ int64_t time_ms) override;
+
+ // SetMinimumPlayoutDelay is only called by A/V sync.
+ bool SetMinimumPlayoutDelay(int delay_ms) override;
+
+ std::vector<webrtc::RtpSource> GetSources() const override;
+
+ RecordingState SetAndGetRecordingState(RecordingState state,
+ bool generate_key_frame) override;
+ void GenerateKeyFrame() override;
+
+ private:
+ // FrameSchedulingReceiver implementation.
+ // Called on packet sequence.
+ void OnEncodedFrame(std::unique_ptr<EncodedFrame> frame) override;
+ // Called on packet sequence.
+ void OnDecodableFrameTimeout(TimeDelta wait) override;
+
+ void CreateAndRegisterExternalDecoder(const Decoder& decoder);
+
+ struct DecodeFrameResult {
+ // True if the decoder returned code WEBRTC_VIDEO_CODEC_OK_REQUEST_KEYFRAME,
+ // or if the decoder failed and a keyframe is required. When true, a
+ // keyframe request should be sent even if a keyframe request was sent
+ // recently.
+ bool force_request_key_frame;
+
+ // The picture id of the frame that was decoded, or nullopt if the frame was
+ // not decoded.
+ absl::optional<int64_t> decoded_frame_picture_id;
+
+ // True if the next frame decoded must be a keyframe. This value will set
+ // the value of `keyframe_required_`, which will force the frame buffer to
+ // drop all frames that are not keyframes.
+ bool keyframe_required;
+ };
+
+ DecodeFrameResult HandleEncodedFrameOnDecodeQueue(
+ std::unique_ptr<EncodedFrame> frame,
+ bool keyframe_request_is_due,
+ bool keyframe_required) RTC_RUN_ON(decode_queue_);
+ void UpdatePlayoutDelays() const
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(worker_sequence_checker_);
+ void RequestKeyFrame(Timestamp now) RTC_RUN_ON(packet_sequence_checker_);
+ void HandleKeyFrameGeneration(bool received_frame_is_keyframe,
+ Timestamp now,
+ bool always_request_key_frame,
+ bool keyframe_request_is_due)
+ RTC_RUN_ON(packet_sequence_checker_);
+ bool IsReceivingKeyFrame(Timestamp timestamp) const
+ RTC_RUN_ON(packet_sequence_checker_);
+ int DecodeAndMaybeDispatchEncodedFrame(std::unique_ptr<EncodedFrame> frame)
+ RTC_RUN_ON(decode_queue_);
+
+ void UpdateHistograms();
+
+ RTC_NO_UNIQUE_ADDRESS SequenceChecker worker_sequence_checker_;
+ // TODO(bugs.webrtc.org/11993): This checker conceptually represents
+ // operations that belong to the network thread. The Call class is currently
+ // moving towards handling network packets on the network thread and while
+ // that work is ongoing, this checker may in practice represent the worker
+ // thread, but still serves as a mechanism of grouping together concepts
+ // that belong to the network thread. Once the packets are fully delivered
+ // on the network thread, this comment will be deleted.
+ RTC_NO_UNIQUE_ADDRESS SequenceChecker packet_sequence_checker_;
+
+ TaskQueueFactory* const task_queue_factory_;
+
+ TransportAdapter transport_adapter_;
+ const VideoReceiveStreamInterface::Config config_;
+ const int num_cpu_cores_;
+ Call* const call_;
+ Clock* const clock_;
+
+ CallStats* const call_stats_;
+
+ bool decoder_running_ RTC_GUARDED_BY(worker_sequence_checker_) = false;
+ bool decoder_stopped_ RTC_GUARDED_BY(decode_queue_) = true;
+
+ SourceTracker source_tracker_;
+ ReceiveStatisticsProxy stats_proxy_;
+ // Shared by media and rtx stream receivers, since the latter has no RtpRtcp
+ // module of its own.
+ const std::unique_ptr<ReceiveStatistics> rtp_receive_statistics_;
+
+ std::unique_ptr<VCMTiming> timing_; // Jitter buffer experiment.
+ VideoReceiver2 video_receiver_;
+ std::unique_ptr<rtc::VideoSinkInterface<VideoFrame>> incoming_video_stream_;
+ RtpVideoStreamReceiver2 rtp_video_stream_receiver_;
+ std::unique_ptr<VideoStreamDecoder> video_stream_decoder_;
+ RtpStreamsSynchronizer rtp_stream_sync_;
+
+ std::unique_ptr<VideoStreamBufferController> buffer_;
+
+ std::unique_ptr<RtpStreamReceiverInterface> media_receiver_
+ RTC_GUARDED_BY(packet_sequence_checker_);
+ std::unique_ptr<RtxReceiveStream> rtx_receive_stream_
+ RTC_GUARDED_BY(packet_sequence_checker_);
+ std::unique_ptr<RtpStreamReceiverInterface> rtx_receiver_
+ RTC_GUARDED_BY(packet_sequence_checker_);
+
+ // Whenever we are in an undecodable state (stream has just started or due to
+ // a decoding error) we require a keyframe to restart the stream.
+ bool keyframe_required_ RTC_GUARDED_BY(packet_sequence_checker_) = true;
+
+ // If we have successfully decoded any frame.
+ bool frame_decoded_ RTC_GUARDED_BY(decode_queue_) = false;
+
+ absl::optional<Timestamp> last_keyframe_request_
+ RTC_GUARDED_BY(packet_sequence_checker_);
+
+ // Keyframe request intervals are configurable through field trials.
+ TimeDelta max_wait_for_keyframe_ RTC_GUARDED_BY(packet_sequence_checker_);
+ TimeDelta max_wait_for_frame_ RTC_GUARDED_BY(packet_sequence_checker_);
+
+ // All of them tries to change current min_playout_delay on `timing_` but
+ // source of the change request is different in each case. Among them the
+ // biggest delay is used. -1 means use default value from the `timing_`.
+ //
+ // Minimum delay as decided by the RTP playout delay extension.
+ absl::optional<TimeDelta> frame_minimum_playout_delay_
+ RTC_GUARDED_BY(worker_sequence_checker_);
+ // Minimum delay as decided by the setLatency function in "webrtc/api".
+ absl::optional<TimeDelta> base_minimum_playout_delay_
+ RTC_GUARDED_BY(worker_sequence_checker_);
+ // Minimum delay as decided by the A/V synchronization feature.
+ absl::optional<TimeDelta> syncable_minimum_playout_delay_
+ RTC_GUARDED_BY(worker_sequence_checker_);
+
+ // Maximum delay as decided by the RTP playout delay extension.
+ absl::optional<TimeDelta> frame_maximum_playout_delay_
+ RTC_GUARDED_BY(worker_sequence_checker_);
+
+ // Function that is triggered with encoded frames, if not empty.
+ std::function<void(const RecordableEncodedFrame&)>
+ encoded_frame_buffer_function_ RTC_GUARDED_BY(decode_queue_);
+ // Set to true while we're requesting keyframes but not yet received one.
+ bool keyframe_generation_requested_ RTC_GUARDED_BY(packet_sequence_checker_) =
+ false;
+ // Lock to avoid unnecessary per-frame idle wakeups in the code.
+ webrtc::Mutex pending_resolution_mutex_;
+ // Signal from decode queue to OnFrame callback to fill pending_resolution_.
+ // absl::nullopt - no resolution needed. 0x0 - next OnFrame to fill with
+ // received resolution. Not 0x0 - OnFrame has filled a resolution.
+ absl::optional<RecordableEncodedFrame::EncodedResolution> pending_resolution_
+ RTC_GUARDED_BY(pending_resolution_mutex_);
+ // Buffered encoded frames held while waiting for decoded resolution.
+ std::vector<std::unique_ptr<EncodedFrame>> buffered_encoded_frames_
+ RTC_GUARDED_BY(decode_queue_);
+
+ // Defined last so they are destroyed before all other members.
+ rtc::TaskQueue decode_queue_;
+
+ // Used to signal destruction to potentially pending tasks.
+ ScopedTaskSafety task_safety_;
+};
+
+} // namespace internal
+} // namespace webrtc
+
+#endif // VIDEO_VIDEO_RECEIVE_STREAM2_H_
diff --git a/third_party/libwebrtc/video/video_receive_stream2_unittest.cc b/third_party/libwebrtc/video/video_receive_stream2_unittest.cc
new file mode 100644
index 0000000000..458944aefa
--- /dev/null
+++ b/third_party/libwebrtc/video/video_receive_stream2_unittest.cc
@@ -0,0 +1,1219 @@
+/*
+ * Copyright 2017 The WebRTC Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video/video_receive_stream2.h"
+
+#include <algorithm>
+#include <cstddef>
+#include <deque>
+#include <limits>
+#include <memory>
+#include <ostream>
+#include <queue>
+#include <tuple>
+#include <utility>
+#include <vector>
+
+#include "absl/memory/memory.h"
+#include "absl/types/optional.h"
+#include "api/metronome/test/fake_metronome.h"
+#include "api/test/mock_video_decoder.h"
+#include "api/test/mock_video_decoder_factory.h"
+#include "api/test/time_controller.h"
+#include "api/units/frequency.h"
+#include "api/units/time_delta.h"
+#include "api/video/encoded_image.h"
+#include "api/video/recordable_encoded_frame.h"
+#include "api/video/test/video_frame_matchers.h"
+#include "api/video/video_frame.h"
+#include "api/video_codecs/sdp_video_format.h"
+#include "api/video_codecs/video_decoder.h"
+#include "call/rtp_stream_receiver_controller.h"
+#include "call/video_receive_stream.h"
+#include "common_video/test/utilities.h"
+#include "media/engine/fake_webrtc_call.h"
+#include "modules/pacing/packet_router.h"
+#include "modules/rtp_rtcp/source/rtp_packet_to_send.h"
+#include "modules/video_coding/encoded_frame.h"
+#include "rtc_base/logging.h"
+#include "system_wrappers/include/clock.h"
+#include "test/fake_decoder.h"
+#include "test/fake_encoded_frame.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+#include "test/mock_transport.h"
+#include "test/rtcp_packet_parser.h"
+#include "test/time_controller/simulated_time_controller.h"
+#include "test/video_decoder_proxy_factory.h"
+#include "video/call_stats2.h"
+
+namespace webrtc {
+
+// Printing SdpVideoFormat for gmock argument matchers.
+void PrintTo(const SdpVideoFormat& value, std::ostream* os) {
+ *os << value.ToString();
+}
+
+void PrintTo(const RecordableEncodedFrame::EncodedResolution& value,
+ std::ostream* os) {
+ *os << value.width << "x" << value.height;
+}
+
+void PrintTo(const RecordableEncodedFrame& value, std::ostream* os) {
+ *os << "RecordableEncodedFrame(render_time=" << value.render_time()
+ << " resolution=" << ::testing::PrintToString(value.resolution()) << ")";
+}
+
+} // namespace webrtc
+
+namespace webrtc {
+
+namespace {
+
+using test::video_frame_matchers::NtpTimestamp;
+using test::video_frame_matchers::PacketInfos;
+using test::video_frame_matchers::Rotation;
+using ::testing::_;
+using ::testing::AllOf;
+using ::testing::AnyNumber;
+using ::testing::ElementsAreArray;
+using ::testing::Eq;
+using ::testing::Field;
+using ::testing::InSequence;
+using ::testing::Invoke;
+using ::testing::IsEmpty;
+using ::testing::Optional;
+using ::testing::Pointee;
+using ::testing::Property;
+using ::testing::Return;
+using ::testing::SizeIs;
+using ::testing::WithoutArgs;
+
+auto RenderedFrameWith(::testing::Matcher<VideoFrame> m) {
+ return Optional(m);
+}
+auto RenderedFrame() {
+ return RenderedFrameWith(_);
+}
+testing::Matcher<absl::optional<VideoFrame>> DidNotReceiveFrame() {
+ return Eq(absl::nullopt);
+}
+
+constexpr TimeDelta kDefaultTimeOut = TimeDelta::Millis(50);
+constexpr int kDefaultNumCpuCores = 2;
+
+constexpr Timestamp kStartTime = Timestamp::Millis(1'337'000);
+constexpr Frequency k30Fps = Frequency::Hertz(30);
+constexpr TimeDelta k30FpsDelay = 1 / k30Fps;
+constexpr Frequency kRtpTimestampHz = Frequency::KiloHertz(90);
+constexpr uint32_t k30FpsRtpTimestampDelta = kRtpTimestampHz / k30Fps;
+constexpr uint32_t kFirstRtpTimestamp = 90000;
+
+class FakeVideoRenderer : public rtc::VideoSinkInterface<VideoFrame> {
+ public:
+ explicit FakeVideoRenderer(TimeController* time_controller)
+ : time_controller_(time_controller) {}
+ ~FakeVideoRenderer() override = default;
+
+ void OnFrame(const VideoFrame& frame) override {
+ RTC_LOG(LS_VERBOSE) << "Received frame with timestamp="
+ << frame.timestamp();
+ if (!last_frame_.empty()) {
+ RTC_LOG(LS_INFO) << "Already had frame queue with timestamp="
+ << last_frame_.back().timestamp();
+ }
+ last_frame_.push_back(frame);
+ }
+
+ // If `advance_time`, then the clock will always advance by `timeout`.
+ absl::optional<VideoFrame> WaitForFrame(TimeDelta timeout,
+ bool advance_time = false) {
+ auto start = time_controller_->GetClock()->CurrentTime();
+ if (last_frame_.empty()) {
+ time_controller_->AdvanceTime(TimeDelta::Zero());
+ time_controller_->Wait([this] { return !last_frame_.empty(); }, timeout);
+ }
+ absl::optional<VideoFrame> ret;
+ if (!last_frame_.empty()) {
+ ret = last_frame_.front();
+ last_frame_.pop_front();
+ }
+ if (advance_time) {
+ time_controller_->AdvanceTime(
+ timeout - (time_controller_->GetClock()->CurrentTime() - start));
+ }
+ return ret;
+ }
+
+ private:
+ std::deque<VideoFrame> last_frame_;
+ TimeController* const time_controller_;
+};
+
+MATCHER_P2(MatchResolution, w, h, "") {
+ return arg.resolution().width == w && arg.resolution().height == h;
+}
+
+MATCHER_P(RtpTimestamp, timestamp, "") {
+ if (arg.timestamp() != timestamp) {
+ *result_listener->stream()
+ << "rtp timestamp was " << arg.timestamp() << " != " << timestamp;
+ return false;
+ }
+ return true;
+}
+
+// Rtp timestamp for in order frame at 30fps.
+uint32_t RtpTimestampForFrame(int id) {
+ return kFirstRtpTimestamp + id * k30FpsRtpTimestampDelta;
+}
+
+// Receive time for in order frame at 30fps.
+Timestamp ReceiveTimeForFrame(int id) {
+ return kStartTime + id * k30FpsDelay;
+}
+
+} // namespace
+
+class VideoReceiveStream2Test : public ::testing::TestWithParam<bool> {
+ public:
+ auto DefaultDecodeAction() {
+ return Invoke(&fake_decoder_, &test::FakeDecoder::Decode);
+ }
+
+ bool UseMetronome() const { return GetParam(); }
+
+ VideoReceiveStream2Test()
+ : time_controller_(kStartTime),
+ clock_(time_controller_.GetClock()),
+ config_(&mock_transport_, &mock_h264_decoder_factory_),
+ call_stats_(clock_, time_controller_.GetMainThread()),
+ fake_renderer_(&time_controller_),
+ fake_metronome_(TimeDelta::Millis(16)),
+ decode_sync_(clock_,
+ &fake_metronome_,
+ time_controller_.GetMainThread()),
+ h264_decoder_factory_(&mock_decoder_) {
+ // By default, mock decoder factory is backed by VideoDecoderProxyFactory.
+ ON_CALL(mock_h264_decoder_factory_, CreateVideoDecoder)
+ .WillByDefault(
+ Invoke(&h264_decoder_factory_,
+ &test::VideoDecoderProxyFactory::CreateVideoDecoder));
+
+ // By default, mock decode will wrap the fake decoder.
+ ON_CALL(mock_decoder_, Configure)
+ .WillByDefault(Invoke(&fake_decoder_, &test::FakeDecoder::Configure));
+ ON_CALL(mock_decoder_, Decode).WillByDefault(DefaultDecodeAction());
+ ON_CALL(mock_decoder_, RegisterDecodeCompleteCallback)
+ .WillByDefault(
+ Invoke(&fake_decoder_,
+ &test::FakeDecoder::RegisterDecodeCompleteCallback));
+ ON_CALL(mock_decoder_, Release)
+ .WillByDefault(Invoke(&fake_decoder_, &test::FakeDecoder::Release));
+ ON_CALL(mock_transport_, SendRtcp)
+ .WillByDefault(
+ Invoke(&rtcp_packet_parser_, &test::RtcpPacketParser::Parse));
+ }
+
+ ~VideoReceiveStream2Test() override {
+ if (video_receive_stream_) {
+ video_receive_stream_->Stop();
+ video_receive_stream_->UnregisterFromTransport();
+ }
+ time_controller_.AdvanceTime(TimeDelta::Zero());
+ }
+
+ void SetUp() override {
+ config_.rtp.remote_ssrc = 1111;
+ config_.rtp.local_ssrc = 2222;
+ config_.renderer = &fake_renderer_;
+ VideoReceiveStreamInterface::Decoder h264_decoder;
+ h264_decoder.payload_type = 99;
+ h264_decoder.video_format = SdpVideoFormat("H264");
+ h264_decoder.video_format.parameters.insert(
+ {"sprop-parameter-sets", "Z0IACpZTBYmI,aMljiA=="});
+ VideoReceiveStreamInterface::Decoder h265_decoder;
+ h265_decoder.payload_type = 100;
+ h265_decoder.video_format = SdpVideoFormat("H265");
+
+ config_.decoders = {h265_decoder, h264_decoder};
+
+ RecreateReceiveStream();
+ }
+
+ void RecreateReceiveStream(
+ absl::optional<VideoReceiveStreamInterface::RecordingState> state =
+ absl::nullopt) {
+ if (video_receive_stream_) {
+ video_receive_stream_->UnregisterFromTransport();
+ video_receive_stream_ = nullptr;
+ }
+ timing_ = new VCMTiming(clock_, fake_call_.trials());
+ video_receive_stream_ =
+ std::make_unique<webrtc::internal::VideoReceiveStream2>(
+ time_controller_.GetTaskQueueFactory(), &fake_call_,
+ kDefaultNumCpuCores, &packet_router_, config_.Copy(), &call_stats_,
+ clock_, absl::WrapUnique(timing_), &nack_periodic_processor_,
+ UseMetronome() ? &decode_sync_ : nullptr, nullptr);
+ video_receive_stream_->RegisterWithTransport(
+ &rtp_stream_receiver_controller_);
+ if (state)
+ video_receive_stream_->SetAndGetRecordingState(std::move(*state), false);
+ }
+
+ protected:
+ GlobalSimulatedTimeController time_controller_;
+ Clock* const clock_;
+ NackPeriodicProcessor nack_periodic_processor_;
+ testing::NiceMock<MockVideoDecoderFactory> mock_h264_decoder_factory_;
+ VideoReceiveStreamInterface::Config config_;
+ internal::CallStats call_stats_;
+ testing::NiceMock<MockVideoDecoder> mock_decoder_;
+ FakeVideoRenderer fake_renderer_;
+ cricket::FakeCall fake_call_;
+ MockTransport mock_transport_;
+ test::RtcpPacketParser rtcp_packet_parser_;
+ PacketRouter packet_router_;
+ RtpStreamReceiverController rtp_stream_receiver_controller_;
+ std::unique_ptr<webrtc::internal::VideoReceiveStream2> video_receive_stream_;
+ VCMTiming* timing_;
+ test::FakeMetronome fake_metronome_;
+ DecodeSynchronizer decode_sync_;
+
+ private:
+ test::VideoDecoderProxyFactory h264_decoder_factory_;
+ test::FakeDecoder fake_decoder_;
+};
+
+TEST_P(VideoReceiveStream2Test, CreateFrameFromH264FmtpSpropAndIdr) {
+ constexpr uint8_t idr_nalu[] = {0x05, 0xFF, 0xFF, 0xFF};
+ RtpPacketToSend rtppacket(nullptr);
+ uint8_t* payload = rtppacket.AllocatePayload(sizeof(idr_nalu));
+ memcpy(payload, idr_nalu, sizeof(idr_nalu));
+ rtppacket.SetMarker(true);
+ rtppacket.SetSsrc(1111);
+ rtppacket.SetPayloadType(99);
+ rtppacket.SetSequenceNumber(1);
+ rtppacket.SetTimestamp(0);
+ EXPECT_CALL(mock_decoder_, RegisterDecodeCompleteCallback(_));
+ video_receive_stream_->Start();
+ EXPECT_CALL(mock_decoder_, Decode(_, false, _));
+ RtpPacketReceived parsed_packet;
+ ASSERT_TRUE(parsed_packet.Parse(rtppacket.data(), rtppacket.size()));
+ rtp_stream_receiver_controller_.OnRtpPacket(parsed_packet);
+ EXPECT_CALL(mock_decoder_, Release());
+
+ time_controller_.AdvanceTime(TimeDelta::Zero());
+}
+
+TEST_P(VideoReceiveStream2Test, PlayoutDelay) {
+ const VideoPlayoutDelay kPlayoutDelayMs = {123, 321};
+ std::unique_ptr<test::FakeEncodedFrame> test_frame =
+ test::FakeFrameBuilder().Id(0).AsLast().Build();
+ test_frame->SetPlayoutDelay(kPlayoutDelayMs);
+
+ video_receive_stream_->OnCompleteFrame(std::move(test_frame));
+ auto timings = timing_->GetTimings();
+ EXPECT_EQ(kPlayoutDelayMs.min_ms, timings.min_playout_delay.ms());
+ EXPECT_EQ(kPlayoutDelayMs.max_ms, timings.max_playout_delay.ms());
+
+ // Check that the biggest minimum delay is chosen.
+ video_receive_stream_->SetMinimumPlayoutDelay(400);
+ timings = timing_->GetTimings();
+ EXPECT_EQ(400, timings.min_playout_delay.ms());
+
+ // Check base minimum delay validation.
+ EXPECT_FALSE(video_receive_stream_->SetBaseMinimumPlayoutDelayMs(12345));
+ EXPECT_FALSE(video_receive_stream_->SetBaseMinimumPlayoutDelayMs(-1));
+ EXPECT_TRUE(video_receive_stream_->SetBaseMinimumPlayoutDelayMs(500));
+ timings = timing_->GetTimings();
+ EXPECT_EQ(500, timings.min_playout_delay.ms());
+
+ // Check that intermidiate values are remembered and the biggest remembered
+ // is chosen.
+ video_receive_stream_->SetBaseMinimumPlayoutDelayMs(0);
+ timings = timing_->GetTimings();
+ EXPECT_EQ(400, timings.min_playout_delay.ms());
+
+ video_receive_stream_->SetMinimumPlayoutDelay(0);
+ timings = timing_->GetTimings();
+ EXPECT_EQ(123, timings.min_playout_delay.ms());
+}
+
+TEST_P(VideoReceiveStream2Test, PlayoutDelayPreservesDefaultMaxValue) {
+ const TimeDelta default_max_playout_latency =
+ timing_->GetTimings().max_playout_delay;
+ const VideoPlayoutDelay kPlayoutDelayMs = {123, -1};
+
+ std::unique_ptr<test::FakeEncodedFrame> test_frame =
+ test::FakeFrameBuilder().Id(0).AsLast().Build();
+ test_frame->SetPlayoutDelay(kPlayoutDelayMs);
+
+ video_receive_stream_->OnCompleteFrame(std::move(test_frame));
+
+ // Ensure that -1 preserves default maximum value from `timing_`.
+ auto timings = timing_->GetTimings();
+ EXPECT_EQ(kPlayoutDelayMs.min_ms, timings.min_playout_delay.ms());
+ EXPECT_NE(kPlayoutDelayMs.max_ms, timings.max_playout_delay.ms());
+ EXPECT_EQ(default_max_playout_latency, timings.max_playout_delay);
+}
+
+TEST_P(VideoReceiveStream2Test, PlayoutDelayPreservesDefaultMinValue) {
+ const TimeDelta default_min_playout_latency =
+ timing_->GetTimings().min_playout_delay;
+ const VideoPlayoutDelay kPlayoutDelayMs = {-1, 321};
+
+ std::unique_ptr<test::FakeEncodedFrame> test_frame =
+ test::FakeFrameBuilder().Id(0).AsLast().Build();
+ test_frame->SetPlayoutDelay(kPlayoutDelayMs);
+
+ video_receive_stream_->OnCompleteFrame(std::move(test_frame));
+
+ // Ensure that -1 preserves default minimum value from `timing_`.
+ auto timings = timing_->GetTimings();
+ EXPECT_NE(kPlayoutDelayMs.min_ms, timings.min_playout_delay.ms());
+ EXPECT_EQ(kPlayoutDelayMs.max_ms, timings.max_playout_delay.ms());
+ EXPECT_EQ(default_min_playout_latency, timings.min_playout_delay);
+}
+
+TEST_P(VideoReceiveStream2Test, RenderParametersSetToDefaultValues) {
+ // Default render parameters.
+ const VideoFrame::RenderParameters kDefaultRenderParameters;
+ // Default with no playout delay set.
+ std::unique_ptr<test::FakeEncodedFrame> test_frame0 =
+ test::FakeFrameBuilder().Id(0).AsLast().Build();
+ video_receive_stream_->OnCompleteFrame(std::move(test_frame0));
+ EXPECT_EQ(timing_->RenderParameters(), kDefaultRenderParameters);
+}
+
+TEST_P(VideoReceiveStream2Test, UseLowLatencyRenderingSetFromPlayoutDelay) {
+ // use_low_latency_rendering set if playout delay set to min=0, max<=500 ms.
+ std::unique_ptr<test::FakeEncodedFrame> test_frame0 =
+ test::FakeFrameBuilder().Id(0).AsLast().Build();
+ test_frame0->SetPlayoutDelay({/*min_ms=*/0, /*max_ms=*/0});
+ video_receive_stream_->OnCompleteFrame(std::move(test_frame0));
+ EXPECT_TRUE(timing_->RenderParameters().use_low_latency_rendering);
+
+ std::unique_ptr<test::FakeEncodedFrame> test_frame1 =
+ test::FakeFrameBuilder().Id(1).AsLast().Build();
+ test_frame1->SetPlayoutDelay({/*min_ms=*/0, /*max_ms=*/500});
+ video_receive_stream_->OnCompleteFrame(std::move(test_frame1));
+ EXPECT_TRUE(timing_->RenderParameters().use_low_latency_rendering);
+}
+
+TEST_P(VideoReceiveStream2Test, MaxCompositionDelaySetFromMaxPlayoutDelay) {
+ // The max composition delay is dependent on the number of frames in the
+ // pre-decode queue. It's therefore important to advance the time as the test
+ // runs to get the correct expectations of max_composition_delay_in_frames.
+ video_receive_stream_->Start();
+ // Max composition delay not set if no playout delay is set.
+ std::unique_ptr<test::FakeEncodedFrame> test_frame0 =
+ test::FakeFrameBuilder()
+ .Id(0)
+ .Time(RtpTimestampForFrame(0))
+ .ReceivedTime(ReceiveTimeForFrame(0))
+ .AsLast()
+ .Build();
+ video_receive_stream_->OnCompleteFrame(std::move(test_frame0));
+ EXPECT_THAT(timing_->RenderParameters().max_composition_delay_in_frames,
+ Eq(absl::nullopt));
+ time_controller_.AdvanceTime(k30FpsDelay);
+
+ // Max composition delay not set for playout delay 0,0.
+ std::unique_ptr<test::FakeEncodedFrame> test_frame1 =
+ test::FakeFrameBuilder()
+ .Id(1)
+ .Time(RtpTimestampForFrame(1))
+ .ReceivedTime(ReceiveTimeForFrame(1))
+ .AsLast()
+ .Build();
+ test_frame1->SetPlayoutDelay({0, 0});
+ video_receive_stream_->OnCompleteFrame(std::move(test_frame1));
+ EXPECT_THAT(timing_->RenderParameters().max_composition_delay_in_frames,
+ Eq(absl::nullopt));
+ time_controller_.AdvanceTime(k30FpsDelay);
+
+ // Max composition delay not set for playout delay X,Y, where X,Y>0.
+ std::unique_ptr<test::FakeEncodedFrame> test_frame2 =
+ test::FakeFrameBuilder()
+ .Id(2)
+ .Time(RtpTimestampForFrame(2))
+ .ReceivedTime(ReceiveTimeForFrame(2))
+ .AsLast()
+ .Build();
+ test_frame2->SetPlayoutDelay({10, 30});
+ video_receive_stream_->OnCompleteFrame(std::move(test_frame2));
+ EXPECT_THAT(timing_->RenderParameters().max_composition_delay_in_frames,
+ Eq(absl::nullopt));
+
+ time_controller_.AdvanceTime(k30FpsDelay);
+
+ // Max composition delay set if playout delay X,Y, where X=0,Y>0.
+ const int kExpectedMaxCompositionDelayInFrames = 3; // ~50 ms at 60 fps.
+ std::unique_ptr<test::FakeEncodedFrame> test_frame3 =
+ test::FakeFrameBuilder()
+ .Id(3)
+ .Time(RtpTimestampForFrame(3))
+ .ReceivedTime(ReceiveTimeForFrame(3))
+ .AsLast()
+ .Build();
+ test_frame3->SetPlayoutDelay({0, 50});
+ video_receive_stream_->OnCompleteFrame(std::move(test_frame3));
+ EXPECT_THAT(timing_->RenderParameters().max_composition_delay_in_frames,
+ Optional(kExpectedMaxCompositionDelayInFrames));
+}
+
+TEST_P(VideoReceiveStream2Test, LazyDecoderCreation) {
+ constexpr uint8_t idr_nalu[] = {0x05, 0xFF, 0xFF, 0xFF};
+ RtpPacketToSend rtppacket(nullptr);
+ uint8_t* payload = rtppacket.AllocatePayload(sizeof(idr_nalu));
+ memcpy(payload, idr_nalu, sizeof(idr_nalu));
+ rtppacket.SetMarker(true);
+ rtppacket.SetSsrc(1111);
+ // H265 payload type.
+ rtppacket.SetPayloadType(99);
+ rtppacket.SetSequenceNumber(1);
+ rtppacket.SetTimestamp(0);
+
+ // No decoders are created by default.
+ EXPECT_CALL(mock_h264_decoder_factory_, CreateVideoDecoder(_)).Times(0);
+ video_receive_stream_->Start();
+ time_controller_.AdvanceTime(TimeDelta::Zero());
+
+ EXPECT_TRUE(
+ testing::Mock::VerifyAndClearExpectations(&mock_h264_decoder_factory_));
+ // Verify that the decoder is created when we receive payload data and tries
+ // to decode a frame.
+ EXPECT_CALL(
+ mock_h264_decoder_factory_,
+ CreateVideoDecoder(Field(&SdpVideoFormat::name, testing::Eq("H264"))));
+ EXPECT_CALL(mock_decoder_, Configure);
+ EXPECT_CALL(mock_decoder_, RegisterDecodeCompleteCallback);
+ EXPECT_CALL(mock_decoder_, Decode);
+ RtpPacketReceived parsed_packet;
+ ASSERT_TRUE(parsed_packet.Parse(rtppacket.data(), rtppacket.size()));
+ rtp_stream_receiver_controller_.OnRtpPacket(parsed_packet);
+ EXPECT_CALL(mock_decoder_, Release);
+
+ // Make sure the decoder thread had a chance to run.
+ time_controller_.AdvanceTime(TimeDelta::Zero());
+}
+
+TEST_P(VideoReceiveStream2Test, PassesNtpTime) {
+ const Timestamp kNtpTimestamp = Timestamp::Millis(12345);
+ std::unique_ptr<test::FakeEncodedFrame> test_frame =
+ test::FakeFrameBuilder()
+ .Id(0)
+ .PayloadType(99)
+ .NtpTime(kNtpTimestamp)
+ .AsLast()
+ .Build();
+
+ video_receive_stream_->Start();
+ video_receive_stream_->OnCompleteFrame(std::move(test_frame));
+ EXPECT_THAT(fake_renderer_.WaitForFrame(kDefaultTimeOut),
+ RenderedFrameWith(NtpTimestamp(kNtpTimestamp)));
+}
+
+TEST_P(VideoReceiveStream2Test, PassesRotation) {
+ const webrtc::VideoRotation kRotation = webrtc::kVideoRotation_180;
+ std::unique_ptr<test::FakeEncodedFrame> test_frame = test::FakeFrameBuilder()
+ .Id(0)
+ .PayloadType(99)
+ .Rotation(kRotation)
+ .AsLast()
+ .Build();
+
+ video_receive_stream_->Start();
+ video_receive_stream_->OnCompleteFrame(std::move(test_frame));
+ EXPECT_THAT(fake_renderer_.WaitForFrame(kDefaultTimeOut),
+ RenderedFrameWith(Rotation(kRotation)));
+}
+
+TEST_P(VideoReceiveStream2Test, PassesPacketInfos) {
+ RtpPacketInfos packet_infos = CreatePacketInfos(3);
+ auto test_frame = test::FakeFrameBuilder()
+ .Id(0)
+ .PayloadType(99)
+ .PacketInfos(packet_infos)
+ .AsLast()
+ .Build();
+
+ video_receive_stream_->Start();
+ video_receive_stream_->OnCompleteFrame(std::move(test_frame));
+ EXPECT_THAT(fake_renderer_.WaitForFrame(kDefaultTimeOut),
+ RenderedFrameWith(PacketInfos(ElementsAreArray(packet_infos))));
+}
+
+TEST_P(VideoReceiveStream2Test, RenderedFrameUpdatesGetSources) {
+ constexpr uint32_t kSsrc = 1111;
+ constexpr uint32_t kCsrc = 9001;
+ constexpr uint32_t kRtpTimestamp = 12345;
+
+ // Prepare one video frame with per-packet information.
+ auto test_frame =
+ test::FakeFrameBuilder().Id(0).PayloadType(99).AsLast().Build();
+ RtpPacketInfos packet_infos;
+ {
+ RtpPacketInfos::vector_type infos;
+
+ RtpPacketInfo info;
+ info.set_ssrc(kSsrc);
+ info.set_csrcs({kCsrc});
+ info.set_rtp_timestamp(kRtpTimestamp);
+
+ info.set_receive_time(clock_->CurrentTime() - TimeDelta::Millis(5000));
+ infos.push_back(info);
+
+ info.set_receive_time(clock_->CurrentTime() - TimeDelta::Millis(3000));
+ infos.push_back(info);
+
+ info.set_receive_time(clock_->CurrentTime() - TimeDelta::Millis(2000));
+ infos.push_back(info);
+
+ info.set_receive_time(clock_->CurrentTime() - TimeDelta::Millis(1000));
+ infos.push_back(info);
+
+ packet_infos = RtpPacketInfos(std::move(infos));
+ }
+ test_frame->SetPacketInfos(packet_infos);
+
+ // Start receive stream.
+ video_receive_stream_->Start();
+ EXPECT_THAT(video_receive_stream_->GetSources(), IsEmpty());
+
+ // Render one video frame.
+ int64_t timestamp_ms_min = clock_->TimeInMilliseconds();
+ video_receive_stream_->OnCompleteFrame(std::move(test_frame));
+ // Verify that the per-packet information is passed to the renderer.
+ EXPECT_THAT(fake_renderer_.WaitForFrame(kDefaultTimeOut),
+ RenderedFrameWith(PacketInfos(ElementsAreArray(packet_infos))));
+ int64_t timestamp_ms_max = clock_->TimeInMilliseconds();
+
+ // Verify that the per-packet information also updates `GetSources()`.
+ std::vector<RtpSource> sources = video_receive_stream_->GetSources();
+ ASSERT_THAT(sources, SizeIs(2));
+ {
+ auto it = std::find_if(sources.begin(), sources.end(),
+ [](const RtpSource& source) {
+ return source.source_type() == RtpSourceType::SSRC;
+ });
+ ASSERT_NE(it, sources.end());
+
+ EXPECT_EQ(it->source_id(), kSsrc);
+ EXPECT_EQ(it->source_type(), RtpSourceType::SSRC);
+ EXPECT_EQ(it->rtp_timestamp(), kRtpTimestamp);
+ EXPECT_GE(it->timestamp_ms(), timestamp_ms_min);
+ EXPECT_LE(it->timestamp_ms(), timestamp_ms_max);
+ }
+ {
+ auto it = std::find_if(sources.begin(), sources.end(),
+ [](const RtpSource& source) {
+ return source.source_type() == RtpSourceType::CSRC;
+ });
+ ASSERT_NE(it, sources.end());
+
+ EXPECT_EQ(it->source_id(), kCsrc);
+ EXPECT_EQ(it->source_type(), RtpSourceType::CSRC);
+ EXPECT_EQ(it->rtp_timestamp(), kRtpTimestamp);
+ EXPECT_GE(it->timestamp_ms(), timestamp_ms_min);
+ EXPECT_LE(it->timestamp_ms(), timestamp_ms_max);
+ }
+}
+
+std::unique_ptr<test::FakeEncodedFrame> MakeFrameWithResolution(
+ VideoFrameType frame_type,
+ int picture_id,
+ int width,
+ int height) {
+ auto frame =
+ test::FakeFrameBuilder().Id(picture_id).PayloadType(99).AsLast().Build();
+ frame->SetFrameType(frame_type);
+ frame->_encodedWidth = width;
+ frame->_encodedHeight = height;
+ return frame;
+}
+
+std::unique_ptr<test::FakeEncodedFrame> MakeFrame(VideoFrameType frame_type,
+ int picture_id) {
+ return MakeFrameWithResolution(frame_type, picture_id, 320, 240);
+}
+
+TEST_P(VideoReceiveStream2Test, PassesFrameWhenEncodedFramesCallbackSet) {
+ testing::MockFunction<void(const RecordableEncodedFrame&)> callback;
+ video_receive_stream_->Start();
+ EXPECT_CALL(callback, Call);
+ video_receive_stream_->SetAndGetRecordingState(
+ VideoReceiveStreamInterface::RecordingState(callback.AsStdFunction()),
+ true);
+ video_receive_stream_->OnCompleteFrame(
+ MakeFrame(VideoFrameType::kVideoFrameKey, 0));
+ EXPECT_TRUE(fake_renderer_.WaitForFrame(kDefaultTimeOut));
+
+ EXPECT_THAT(rtcp_packet_parser_.pli()->num_packets(), Eq(1));
+
+ video_receive_stream_->Stop();
+}
+
+TEST_P(VideoReceiveStream2Test, MovesEncodedFrameDispatchStateWhenReCreating) {
+ testing::MockFunction<void(const RecordableEncodedFrame&)> callback;
+ video_receive_stream_->Start();
+ // Expect a key frame request over RTCP.
+ video_receive_stream_->SetAndGetRecordingState(
+ VideoReceiveStreamInterface::RecordingState(callback.AsStdFunction()),
+ true);
+ video_receive_stream_->Stop();
+ VideoReceiveStreamInterface::RecordingState old_state =
+ video_receive_stream_->SetAndGetRecordingState(
+ VideoReceiveStreamInterface::RecordingState(), false);
+ RecreateReceiveStream(std::move(old_state));
+
+ EXPECT_THAT(rtcp_packet_parser_.pli()->num_packets(), Eq(1));
+
+ video_receive_stream_->Stop();
+}
+
+TEST_P(VideoReceiveStream2Test, RequestsKeyFramesUntilKeyFrameReceived) {
+ // Recreate receive stream with shorter delay to test rtx.
+ TimeDelta rtx_delay = TimeDelta::Millis(50);
+ config_.rtp.nack.rtp_history_ms = rtx_delay.ms();
+ auto tick = rtx_delay / 2;
+ RecreateReceiveStream();
+ video_receive_stream_->Start();
+
+ video_receive_stream_->GenerateKeyFrame();
+ video_receive_stream_->OnCompleteFrame(
+ MakeFrame(VideoFrameType::kVideoFrameDelta, 0));
+ fake_renderer_.WaitForFrame(kDefaultTimeOut);
+ time_controller_.AdvanceTime(tick);
+ video_receive_stream_->OnCompleteFrame(
+ MakeFrame(VideoFrameType::kVideoFrameDelta, 1));
+ fake_renderer_.WaitForFrame(kDefaultTimeOut);
+ time_controller_.AdvanceTime(TimeDelta::Zero());
+ testing::Mock::VerifyAndClearExpectations(&mock_transport_);
+
+ EXPECT_THAT(rtcp_packet_parser_.pli()->num_packets(), Eq(1));
+
+ // T+keyframetimeout: still no key frame received, expect key frame request
+ // sent again.
+ time_controller_.AdvanceTime(tick);
+ video_receive_stream_->OnCompleteFrame(
+ MakeFrame(VideoFrameType::kVideoFrameDelta, 2));
+ EXPECT_THAT(fake_renderer_.WaitForFrame(kDefaultTimeOut), RenderedFrame());
+ testing::Mock::VerifyAndClearExpectations(&mock_transport_);
+
+ EXPECT_THAT(rtcp_packet_parser_.pli()->num_packets(), Eq(2));
+
+ // T+keyframetimeout: now send a key frame - we should not observe new key
+ // frame requests after this.
+ video_receive_stream_->OnCompleteFrame(
+ MakeFrame(VideoFrameType::kVideoFrameKey, 3));
+ EXPECT_THAT(fake_renderer_.WaitForFrame(kDefaultTimeOut), RenderedFrame());
+ time_controller_.AdvanceTime(2 * tick);
+ video_receive_stream_->OnCompleteFrame(
+ MakeFrame(VideoFrameType::kVideoFrameDelta, 4));
+ EXPECT_THAT(fake_renderer_.WaitForFrame(kDefaultTimeOut), RenderedFrame());
+
+ EXPECT_THAT(rtcp_packet_parser_.pli()->num_packets(), Eq(2));
+}
+
+TEST_P(VideoReceiveStream2Test,
+ DispatchesEncodedFrameSequenceStartingWithKeyframeWithoutResolution) {
+ video_receive_stream_->Start();
+ testing::MockFunction<void(const RecordableEncodedFrame&)> callback;
+ video_receive_stream_->SetAndGetRecordingState(
+ VideoReceiveStreamInterface::RecordingState(callback.AsStdFunction()),
+ /*generate_key_frame=*/false);
+
+ InSequence s;
+ EXPECT_CALL(callback,
+ Call(MatchResolution(test::FakeDecoder::kDefaultWidth,
+ test::FakeDecoder::kDefaultHeight)));
+ EXPECT_CALL(callback, Call);
+
+ video_receive_stream_->OnCompleteFrame(
+ MakeFrameWithResolution(VideoFrameType::kVideoFrameKey, 0, 0, 0));
+ EXPECT_THAT(fake_renderer_.WaitForFrame(kDefaultTimeOut), RenderedFrame());
+ video_receive_stream_->OnCompleteFrame(
+ MakeFrameWithResolution(VideoFrameType::kVideoFrameDelta, 1, 0, 0));
+ EXPECT_THAT(fake_renderer_.WaitForFrame(kDefaultTimeOut), RenderedFrame());
+
+ video_receive_stream_->Stop();
+}
+
+TEST_P(VideoReceiveStream2Test,
+ DispatchesEncodedFrameSequenceStartingWithKeyframeWithResolution) {
+ video_receive_stream_->Start();
+ testing::MockFunction<void(const RecordableEncodedFrame&)> callback;
+ video_receive_stream_->SetAndGetRecordingState(
+ VideoReceiveStreamInterface::RecordingState(callback.AsStdFunction()),
+ /*generate_key_frame=*/false);
+
+ InSequence s;
+ EXPECT_CALL(callback, Call(MatchResolution(1080u, 720u)));
+ EXPECT_CALL(callback, Call);
+
+ video_receive_stream_->OnCompleteFrame(
+ MakeFrameWithResolution(VideoFrameType::kVideoFrameKey, 0, 1080, 720));
+ EXPECT_THAT(fake_renderer_.WaitForFrame(kDefaultTimeOut), RenderedFrame());
+ video_receive_stream_->OnCompleteFrame(
+ MakeFrameWithResolution(VideoFrameType::kVideoFrameDelta, 1, 0, 0));
+ EXPECT_THAT(fake_renderer_.WaitForFrame(kDefaultTimeOut), RenderedFrame());
+
+ video_receive_stream_->Stop();
+}
+
+TEST_P(VideoReceiveStream2Test, DependantFramesAreScheduled) {
+ video_receive_stream_->Start();
+
+ auto key_frame = test::FakeFrameBuilder()
+ .Id(0)
+ .PayloadType(99)
+ .Time(kFirstRtpTimestamp)
+ .ReceivedTime(kStartTime)
+ .AsLast()
+ .Build();
+ auto delta_frame = test::FakeFrameBuilder()
+ .Id(1)
+ .PayloadType(99)
+ .Time(RtpTimestampForFrame(1))
+ .ReceivedTime(ReceiveTimeForFrame(1))
+ .Refs({0})
+ .AsLast()
+ .Build();
+
+ // Expect frames are decoded in order.
+ InSequence seq;
+ EXPECT_CALL(mock_decoder_,
+ Decode(test::RtpTimestamp(kFirstRtpTimestamp), _, _));
+ EXPECT_CALL(mock_decoder_, Decode(test::RtpTimestamp(kFirstRtpTimestamp +
+ k30FpsRtpTimestampDelta),
+ _, _))
+ .Times(1);
+ video_receive_stream_->OnCompleteFrame(std::move(key_frame));
+ EXPECT_THAT(fake_renderer_.WaitForFrame(TimeDelta::Zero()), RenderedFrame());
+
+ time_controller_.AdvanceTime(k30FpsDelay);
+ video_receive_stream_->OnCompleteFrame(std::move(delta_frame));
+ EXPECT_THAT(fake_renderer_.WaitForFrame(k30FpsDelay), RenderedFrame());
+
+ video_receive_stream_->Stop();
+}
+
+TEST_P(VideoReceiveStream2Test, FramesScheduledInOrder) {
+ video_receive_stream_->Start();
+
+ auto key_frame = test::FakeFrameBuilder()
+ .Id(0)
+ .PayloadType(99)
+ .Time(kFirstRtpTimestamp)
+ .AsLast()
+ .Build();
+ auto delta_frame1 = test::FakeFrameBuilder()
+ .Id(1)
+ .PayloadType(99)
+ .Time(RtpTimestampForFrame(1))
+ .Refs({0})
+ .AsLast()
+ .Build();
+ auto delta_frame2 = test::FakeFrameBuilder()
+ .Id(2)
+ .PayloadType(99)
+ .Time(RtpTimestampForFrame(2))
+ .Refs({1})
+ .AsLast()
+ .Build();
+
+ // Expect frames are decoded in order despite delta_frame1 arriving first.
+ InSequence seq;
+ EXPECT_CALL(mock_decoder_,
+ Decode(test::RtpTimestamp(kFirstRtpTimestamp), _, _))
+ .Times(1);
+ EXPECT_CALL(mock_decoder_,
+ Decode(test::RtpTimestamp(RtpTimestampForFrame(1)), _, _))
+ .Times(1);
+ EXPECT_CALL(mock_decoder_,
+ Decode(test::RtpTimestamp(RtpTimestampForFrame(2)), _, _))
+ .Times(1);
+ key_frame->SetReceivedTime(clock_->CurrentTime().ms());
+ video_receive_stream_->OnCompleteFrame(std::move(key_frame));
+ EXPECT_THAT(fake_renderer_.WaitForFrame(TimeDelta::Zero()), RenderedFrame());
+
+ delta_frame2->SetReceivedTime(clock_->CurrentTime().ms());
+ video_receive_stream_->OnCompleteFrame(std::move(delta_frame2));
+ EXPECT_THAT(fake_renderer_.WaitForFrame(k30FpsDelay), DidNotReceiveFrame());
+ // `delta_frame1` arrives late.
+ delta_frame1->SetReceivedTime(clock_->CurrentTime().ms());
+ video_receive_stream_->OnCompleteFrame(std::move(delta_frame1));
+ EXPECT_THAT(fake_renderer_.WaitForFrame(k30FpsDelay), RenderedFrame());
+ EXPECT_THAT(fake_renderer_.WaitForFrame(k30FpsDelay * 2), RenderedFrame());
+ video_receive_stream_->Stop();
+}
+
+TEST_P(VideoReceiveStream2Test, WaitsforAllSpatialLayers) {
+ video_receive_stream_->Start();
+ auto sl0 = test::FakeFrameBuilder()
+ .Id(0)
+ .PayloadType(99)
+ .Time(kFirstRtpTimestamp)
+ .ReceivedTime(kStartTime)
+ .Build();
+ auto sl1 = test::FakeFrameBuilder()
+ .Id(1)
+ .PayloadType(99)
+ .ReceivedTime(kStartTime)
+ .Time(kFirstRtpTimestamp)
+ .Refs({0})
+ .Build();
+ auto sl2 = test::FakeFrameBuilder()
+ .Id(2)
+ .PayloadType(99)
+ .ReceivedTime(kStartTime)
+ .Time(kFirstRtpTimestamp)
+ .Refs({0, 1})
+ .AsLast()
+ .Build();
+
+ // No decodes should be called until `sl2` is received.
+ EXPECT_CALL(mock_decoder_, Decode).Times(0);
+ sl0->SetReceivedTime(clock_->CurrentTime().ms());
+ video_receive_stream_->OnCompleteFrame(std::move(sl0));
+ EXPECT_THAT(fake_renderer_.WaitForFrame(TimeDelta::Zero()),
+ DidNotReceiveFrame());
+ video_receive_stream_->OnCompleteFrame(std::move(sl1));
+ EXPECT_THAT(fake_renderer_.WaitForFrame(TimeDelta::Zero()),
+ DidNotReceiveFrame());
+ // When `sl2` arrives decode should happen.
+ EXPECT_CALL(mock_decoder_,
+ Decode(test::RtpTimestamp(kFirstRtpTimestamp), _, _))
+ .Times(1);
+ video_receive_stream_->OnCompleteFrame(std::move(sl2));
+ EXPECT_THAT(fake_renderer_.WaitForFrame(TimeDelta::Zero()), RenderedFrame());
+ video_receive_stream_->Stop();
+}
+
+TEST_P(VideoReceiveStream2Test, FramesFastForwardOnSystemHalt) {
+ video_receive_stream_->Start();
+
+ // The frame structure looks like this,
+ // F1
+ // /
+ // F0 --> F2
+ //
+ // In this case we will have a system halt simulated. By the time the system
+ // resumes, F1 will be old and so F2 should be decoded.
+ auto key_frame = test::FakeFrameBuilder()
+ .Id(0)
+ .PayloadType(99)
+ .Time(kFirstRtpTimestamp)
+ .AsLast()
+ .Build();
+ auto ffwd_frame = test::FakeFrameBuilder()
+ .Id(1)
+ .PayloadType(99)
+ .Time(RtpTimestampForFrame(1))
+ .Refs({0})
+ .AsLast()
+ .Build();
+ auto rendered_frame = test::FakeFrameBuilder()
+ .Id(2)
+ .PayloadType(99)
+ .Time(RtpTimestampForFrame(2))
+ .Refs({0})
+ .AsLast()
+ .Build();
+ InSequence seq;
+ EXPECT_CALL(mock_decoder_,
+ Decode(test::RtpTimestamp(kFirstRtpTimestamp), _, _))
+ .WillOnce(testing::DoAll(Invoke([&] {
+ // System halt will be simulated in the decode.
+ time_controller_.AdvanceTime(k30FpsDelay * 2);
+ }),
+ DefaultDecodeAction()));
+ EXPECT_CALL(mock_decoder_,
+ Decode(test::RtpTimestamp(RtpTimestampForFrame(2)), _, _));
+ video_receive_stream_->OnCompleteFrame(std::move(key_frame));
+ video_receive_stream_->OnCompleteFrame(std::move(ffwd_frame));
+ video_receive_stream_->OnCompleteFrame(std::move(rendered_frame));
+ EXPECT_THAT(fake_renderer_.WaitForFrame(TimeDelta::Zero()),
+ RenderedFrameWith(RtpTimestamp(RtpTimestampForFrame(0))));
+ EXPECT_THAT(fake_renderer_.WaitForFrame(TimeDelta::Zero()),
+ RenderedFrameWith(RtpTimestamp(RtpTimestampForFrame(2))));
+
+ // Check stats show correct dropped frames.
+ auto stats = video_receive_stream_->GetStats();
+ EXPECT_EQ(stats.frames_dropped, 1u);
+
+ video_receive_stream_->Stop();
+}
+
+TEST_P(VideoReceiveStream2Test, BetterFrameInsertedWhileWaitingToDecodeFrame) {
+ video_receive_stream_->Start();
+
+ auto key_frame = test::FakeFrameBuilder()
+ .Id(0)
+ .PayloadType(99)
+ .Time(kFirstRtpTimestamp)
+ .ReceivedTime(ReceiveTimeForFrame(0))
+ .AsLast()
+ .Build();
+ auto f1 = test::FakeFrameBuilder()
+ .Id(1)
+ .PayloadType(99)
+ .Time(RtpTimestampForFrame(1))
+ .ReceivedTime(ReceiveTimeForFrame(1))
+ .Refs({0})
+ .AsLast()
+ .Build();
+ auto f2 = test::FakeFrameBuilder()
+ .Id(2)
+ .PayloadType(99)
+ .Time(RtpTimestampForFrame(2))
+ .ReceivedTime(ReceiveTimeForFrame(2))
+ .Refs({0})
+ .AsLast()
+ .Build();
+
+ video_receive_stream_->OnCompleteFrame(std::move(key_frame));
+ EXPECT_THAT(fake_renderer_.WaitForFrame(TimeDelta::Zero()), RenderedFrame());
+
+ InSequence seq;
+ EXPECT_CALL(mock_decoder_,
+ Decode(test::RtpTimestamp(RtpTimestampForFrame(1)), _, _))
+ .Times(1);
+ EXPECT_CALL(mock_decoder_,
+ Decode(test::RtpTimestamp(RtpTimestampForFrame(2)), _, _))
+ .Times(1);
+ // Simulate f1 arriving after f2 but before f2 is decoded.
+ video_receive_stream_->OnCompleteFrame(std::move(f2));
+ EXPECT_THAT(fake_renderer_.WaitForFrame(k30FpsDelay), DidNotReceiveFrame());
+ video_receive_stream_->OnCompleteFrame(std::move(f1));
+ EXPECT_THAT(fake_renderer_.WaitForFrame(k30FpsDelay), RenderedFrame());
+ EXPECT_THAT(fake_renderer_.WaitForFrame(k30FpsDelay), RenderedFrame());
+
+ video_receive_stream_->Stop();
+}
+
+// Note: This test takes a long time (~10s) to run if the fake metronome is
+// active. Since the test needs to wait for the timestamp to rollover, it has a
+// fake delay of around 6.5 hours. Even though time is simulated, this will be
+// around 1,500,000 metronome tick invocations.
+TEST_P(VideoReceiveStream2Test, RtpTimestampWrapAround) {
+ EXPECT_CALL(mock_transport_, SendRtcp).Times(AnyNumber());
+ video_receive_stream_->Start();
+
+ constexpr uint32_t kBaseRtp = std::numeric_limits<uint32_t>::max() / 2;
+ video_receive_stream_->OnCompleteFrame(
+ test::FakeFrameBuilder()
+ .Id(0)
+ .PayloadType(99)
+ .Time(kBaseRtp)
+ .ReceivedTime(clock_->CurrentTime())
+ .AsLast()
+ .Build());
+ EXPECT_THAT(fake_renderer_.WaitForFrame(TimeDelta::Zero()), RenderedFrame());
+ time_controller_.AdvanceTime(k30FpsDelay);
+ video_receive_stream_->OnCompleteFrame(
+ test::FakeFrameBuilder()
+ .Id(1)
+ .PayloadType(99)
+ .Time(kBaseRtp + k30FpsRtpTimestampDelta)
+ .ReceivedTime(clock_->CurrentTime())
+ .AsLast()
+ .Build());
+ EXPECT_THAT(fake_renderer_.WaitForFrame(k30FpsDelay), RenderedFrame());
+
+ // Pause stream so that RTP timestamp wraps around.
+ constexpr uint32_t kLastRtp = kBaseRtp + k30FpsRtpTimestampDelta;
+ constexpr uint32_t kWrapAroundRtp =
+ kLastRtp + std::numeric_limits<uint32_t>::max() / 2 + 1;
+ // Pause for corresponding delay such that RTP timestamp would increase this
+ // much at 30fps.
+ constexpr TimeDelta kWrapAroundDelay =
+ (std::numeric_limits<uint32_t>::max() / 2 + 1) / kRtpTimestampHz;
+
+ time_controller_.AdvanceTime(kWrapAroundDelay);
+ video_receive_stream_->OnCompleteFrame(
+ test::FakeFrameBuilder()
+ .Id(2)
+ .PayloadType(99)
+ .Time(kWrapAroundRtp)
+ .ReceivedTime(clock_->CurrentTime())
+ .AsLast()
+ .Build());
+ EXPECT_CALL(mock_decoder_, Decode(test::RtpTimestamp(kWrapAroundRtp), _, _))
+ .Times(1);
+ EXPECT_THAT(fake_renderer_.WaitForFrame(TimeDelta::Zero()), RenderedFrame());
+
+ video_receive_stream_->Stop();
+}
+
+// If a frame was lost causing the stream to become temporarily non-decodable
+// and the sender reduces their framerate during this time, the video stream
+// should start decoding at the new framerate. However, if the connection is
+// poor, a keyframe will take a long time to send. If the timing of the incoming
+// frames was not kept up to date with the new framerate while the stream was
+// decodable, this late frame will have a large delay as the rtp timestamp of
+// this keyframe will look like the frame arrived early if the frame-rate was
+// not updated.
+TEST_P(VideoReceiveStream2Test, PoorConnectionWithFpsChangeDuringLostFrame) {
+ video_receive_stream_->Start();
+
+ constexpr Frequency k15Fps = Frequency::Hertz(15);
+ constexpr TimeDelta k15FpsDelay = 1 / k15Fps;
+ constexpr uint32_t k15FpsRtpTimestampDelta = kRtpTimestampHz / k15Fps;
+
+ // Initial keyframe and frames at 30fps.
+ video_receive_stream_->OnCompleteFrame(
+ test::FakeFrameBuilder()
+ .Id(0)
+ .PayloadType(99)
+ .Time(RtpTimestampForFrame(0))
+ .ReceivedTime(ReceiveTimeForFrame(0))
+ .AsLast()
+ .Build());
+ EXPECT_THAT(fake_renderer_.WaitForFrame(k30FpsDelay, /*advance_time=*/true),
+ RenderedFrameWith(RtpTimestamp(RtpTimestampForFrame(0))));
+
+ video_receive_stream_->OnCompleteFrame(
+ test::FakeFrameBuilder()
+ .Id(1)
+ .PayloadType(99)
+ .Time(RtpTimestampForFrame(1))
+ .ReceivedTime(ReceiveTimeForFrame(1))
+ .Refs({0})
+ .AsLast()
+ .Build());
+ EXPECT_THAT(fake_renderer_.WaitForFrame(k30FpsDelay, /*advance_time=*/true),
+ RenderedFrameWith(RtpTimestamp(RtpTimestampForFrame(1))));
+
+ // Simulate lost frame 2, followed by 2 second of frames at 30fps, followed by
+ // 2 second of frames at 15 fps, and then a keyframe.
+ time_controller_.AdvanceTime(k30FpsDelay);
+
+ Timestamp send_30fps_end_time = clock_->CurrentTime() + TimeDelta::Seconds(2);
+ int id = 3;
+ EXPECT_CALL(mock_transport_, SendRtcp).Times(AnyNumber());
+ while (clock_->CurrentTime() < send_30fps_end_time) {
+ ++id;
+ video_receive_stream_->OnCompleteFrame(
+ test::FakeFrameBuilder()
+ .Id(id)
+ .PayloadType(99)
+ .Time(RtpTimestampForFrame(id))
+ .ReceivedTime(ReceiveTimeForFrame(id))
+ .Refs({id - 1})
+ .AsLast()
+ .Build());
+ EXPECT_THAT(fake_renderer_.WaitForFrame(k30FpsDelay, /*advance_time=*/true),
+ Eq(absl::nullopt));
+ }
+ uint32_t current_rtp = RtpTimestampForFrame(id);
+ Timestamp send_15fps_end_time = clock_->CurrentTime() + TimeDelta::Seconds(2);
+ while (clock_->CurrentTime() < send_15fps_end_time) {
+ ++id;
+ current_rtp += k15FpsRtpTimestampDelta;
+ video_receive_stream_->OnCompleteFrame(
+ test::FakeFrameBuilder()
+ .Id(id)
+ .PayloadType(99)
+ .Time(current_rtp)
+ .ReceivedTime(clock_->CurrentTime())
+ .Refs({id - 1})
+ .AsLast()
+ .Build());
+ EXPECT_THAT(fake_renderer_.WaitForFrame(k15FpsDelay, /*advance_time=*/true),
+ Eq(absl::nullopt));
+ }
+
+ ++id;
+ current_rtp += k15FpsRtpTimestampDelta;
+ // Insert keyframe which will recover the stream. However, on a poor
+ // connection the keyframe will take significant time to send.
+ constexpr TimeDelta kKeyframeDelay = TimeDelta::Millis(200);
+ video_receive_stream_->OnCompleteFrame(
+ test::FakeFrameBuilder()
+ .Id(id)
+ .PayloadType(99)
+ .Time(current_rtp)
+ .ReceivedTime(clock_->CurrentTime() + kKeyframeDelay)
+ .AsLast()
+ .Build());
+ // If the framerate was not updated to be 15fps from the frames that arrived
+ // previously, this will fail, as the delay will be longer.
+ EXPECT_THAT(fake_renderer_.WaitForFrame(k15FpsDelay, /*advance_time=*/true),
+ RenderedFrameWith(RtpTimestamp(current_rtp)));
+
+ video_receive_stream_->Stop();
+}
+
+TEST_P(VideoReceiveStream2Test, StreamShouldNotTimeoutWhileWaitingForFrame) {
+ // Disable smoothing since this makes it hard to test frame timing.
+ config_.enable_prerenderer_smoothing = false;
+ RecreateReceiveStream();
+
+ video_receive_stream_->Start();
+ EXPECT_CALL(mock_transport_, SendRtcp).Times(AnyNumber());
+
+ video_receive_stream_->OnCompleteFrame(
+ test::FakeFrameBuilder()
+ .Id(0)
+ .PayloadType(99)
+ .Time(RtpTimestampForFrame(0))
+ .ReceivedTime(ReceiveTimeForFrame(0))
+ .AsLast()
+ .Build());
+ EXPECT_THAT(fake_renderer_.WaitForFrame(k30FpsDelay, /*advance_time=*/true),
+ RenderedFrameWith(RtpTimestamp(RtpTimestampForFrame(0))));
+
+ for (int id = 1; id < 30; ++id) {
+ video_receive_stream_->OnCompleteFrame(
+ test::FakeFrameBuilder()
+ .Id(id)
+ .PayloadType(99)
+ .Time(RtpTimestampForFrame(id))
+ .ReceivedTime(ReceiveTimeForFrame(id))
+ .Refs({0})
+ .AsLast()
+ .Build());
+ EXPECT_THAT(fake_renderer_.WaitForFrame(k30FpsDelay, /*advance_time=*/true),
+ RenderedFrameWith(RtpTimestamp(RtpTimestampForFrame(id))));
+ }
+
+ // Simulate a pause in the stream, followed by a decodable frame that is ready
+ // long in the future. The stream should not timeout in this case, but rather
+ // decode the frame just before the timeout.
+ time_controller_.AdvanceTime(TimeDelta::Millis(2900));
+ uint32_t late_decode_rtp = kFirstRtpTimestamp + 200 * k30FpsRtpTimestampDelta;
+ video_receive_stream_->OnCompleteFrame(
+ test::FakeFrameBuilder()
+ .Id(121)
+ .PayloadType(99)
+ .Time(late_decode_rtp)
+ .ReceivedTime(clock_->CurrentTime())
+ .AsLast()
+ .Build());
+ EXPECT_THAT(fake_renderer_.WaitForFrame(TimeDelta::Millis(100),
+ /*advance_time=*/true),
+ RenderedFrameWith(RtpTimestamp(late_decode_rtp)));
+
+ video_receive_stream_->Stop();
+}
+
+INSTANTIATE_TEST_SUITE_P(VideoReceiveStream2Test,
+ VideoReceiveStream2Test,
+ testing::Bool(),
+ [](const auto& test_param_info) {
+ return (test_param_info.param
+ ? "ScheduleDecodesWithMetronome"
+ : "ScheduleDecodesWithPostTask");
+ });
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/video/video_receive_stream_timeout_tracker.cc b/third_party/libwebrtc/video/video_receive_stream_timeout_tracker.cc
new file mode 100644
index 0000000000..0409f26560
--- /dev/null
+++ b/third_party/libwebrtc/video/video_receive_stream_timeout_tracker.cc
@@ -0,0 +1,98 @@
+/*
+ * Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video/video_receive_stream_timeout_tracker.h"
+
+#include <algorithm>
+#include <utility>
+
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+
+VideoReceiveStreamTimeoutTracker::VideoReceiveStreamTimeoutTracker(
+ Clock* clock,
+ TaskQueueBase* const bookkeeping_queue,
+ const Timeouts& timeouts,
+ TimeoutCallback callback)
+ : clock_(clock),
+ bookkeeping_queue_(bookkeeping_queue),
+ timeouts_(timeouts),
+ timeout_cb_(std::move(callback)) {}
+
+VideoReceiveStreamTimeoutTracker::~VideoReceiveStreamTimeoutTracker() {
+ RTC_DCHECK(!timeout_task_.Running());
+}
+
+bool VideoReceiveStreamTimeoutTracker::Running() const {
+ return timeout_task_.Running();
+}
+
+TimeDelta VideoReceiveStreamTimeoutTracker::TimeUntilTimeout() const {
+ return std::max(timeout_ - clock_->CurrentTime(), TimeDelta::Zero());
+}
+
+void VideoReceiveStreamTimeoutTracker::Start(bool waiting_for_keyframe) {
+ RTC_DCHECK_RUN_ON(bookkeeping_queue_);
+ RTC_DCHECK(!timeout_task_.Running());
+ waiting_for_keyframe_ = waiting_for_keyframe;
+ TimeDelta timeout_delay = TimeoutForNextFrame();
+ last_frame_ = clock_->CurrentTime();
+ timeout_ = last_frame_ + timeout_delay;
+ timeout_task_ =
+ RepeatingTaskHandle::DelayedStart(bookkeeping_queue_, timeout_delay,
+ [this] { return HandleTimeoutTask(); });
+}
+
+void VideoReceiveStreamTimeoutTracker::Stop() {
+ timeout_task_.Stop();
+}
+
+void VideoReceiveStreamTimeoutTracker::SetWaitingForKeyframe() {
+ RTC_DCHECK_RUN_ON(bookkeeping_queue_);
+ waiting_for_keyframe_ = true;
+ TimeDelta timeout_delay = TimeoutForNextFrame();
+ if (clock_->CurrentTime() + timeout_delay < timeout_) {
+ Stop();
+ Start(waiting_for_keyframe_);
+ }
+}
+
+void VideoReceiveStreamTimeoutTracker::OnEncodedFrameReleased() {
+ RTC_DCHECK_RUN_ON(bookkeeping_queue_);
+ // If we were waiting for a keyframe, then it has just been released.
+ waiting_for_keyframe_ = false;
+ last_frame_ = clock_->CurrentTime();
+ timeout_ = last_frame_ + TimeoutForNextFrame();
+}
+
+TimeDelta VideoReceiveStreamTimeoutTracker::HandleTimeoutTask() {
+ RTC_DCHECK_RUN_ON(bookkeeping_queue_);
+ Timestamp now = clock_->CurrentTime();
+ // `timeout_` is hit and we have timed out. Schedule the next timeout at
+ // the timeout delay.
+ if (now >= timeout_) {
+ RTC_DLOG(LS_VERBOSE) << "Stream timeout at " << now;
+ TimeDelta timeout_delay = TimeoutForNextFrame();
+ timeout_ = now + timeout_delay;
+ timeout_cb_(now - last_frame_);
+ return timeout_delay;
+ }
+ // Otherwise, `timeout_` changed since we scheduled a timeout. Reschedule
+ // a timeout check.
+ return timeout_ - now;
+}
+
+void VideoReceiveStreamTimeoutTracker::SetTimeouts(Timeouts timeouts) {
+ RTC_DCHECK_RUN_ON(bookkeeping_queue_);
+ timeouts_ = timeouts;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/video/video_receive_stream_timeout_tracker.h b/third_party/libwebrtc/video/video_receive_stream_timeout_tracker.h
new file mode 100644
index 0000000000..c15aa70e92
--- /dev/null
+++ b/third_party/libwebrtc/video/video_receive_stream_timeout_tracker.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VIDEO_VIDEO_RECEIVE_STREAM_TIMEOUT_TRACKER_H_
+#define VIDEO_VIDEO_RECEIVE_STREAM_TIMEOUT_TRACKER_H_
+
+#include <functional>
+
+#include "api/task_queue/task_queue_base.h"
+#include "api/units/time_delta.h"
+#include "api/units/timestamp.h"
+#include "rtc_base/task_utils/repeating_task.h"
+#include "system_wrappers/include/clock.h"
+
+namespace webrtc {
+
+class VideoReceiveStreamTimeoutTracker {
+ public:
+ struct Timeouts {
+ TimeDelta max_wait_for_keyframe;
+ TimeDelta max_wait_for_frame;
+ };
+
+ using TimeoutCallback = std::function<void(TimeDelta wait)>;
+ VideoReceiveStreamTimeoutTracker(Clock* clock,
+ TaskQueueBase* const bookkeeping_queue,
+ const Timeouts& timeouts,
+ TimeoutCallback callback);
+ ~VideoReceiveStreamTimeoutTracker();
+ VideoReceiveStreamTimeoutTracker(const VideoReceiveStreamTimeoutTracker&) =
+ delete;
+ VideoReceiveStreamTimeoutTracker& operator=(
+ const VideoReceiveStreamTimeoutTracker&) = delete;
+
+ bool Running() const;
+ void Start(bool waiting_for_keyframe);
+ void Stop();
+ void SetWaitingForKeyframe();
+ void OnEncodedFrameReleased();
+ TimeDelta TimeUntilTimeout() const;
+
+ void SetTimeouts(Timeouts timeouts);
+
+ private:
+ TimeDelta TimeoutForNextFrame() const RTC_RUN_ON(bookkeeping_queue_) {
+ return waiting_for_keyframe_ ? timeouts_.max_wait_for_keyframe
+ : timeouts_.max_wait_for_frame;
+ }
+ TimeDelta HandleTimeoutTask();
+
+ Clock* const clock_;
+ TaskQueueBase* const bookkeeping_queue_;
+ Timeouts timeouts_ RTC_GUARDED_BY(bookkeeping_queue_);
+ const TimeoutCallback timeout_cb_;
+ RepeatingTaskHandle timeout_task_;
+
+ Timestamp last_frame_ = Timestamp::MinusInfinity();
+ Timestamp timeout_ = Timestamp::MinusInfinity();
+ bool waiting_for_keyframe_;
+};
+} // namespace webrtc
+
+#endif // VIDEO_VIDEO_RECEIVE_STREAM_TIMEOUT_TRACKER_H_
diff --git a/third_party/libwebrtc/video/video_receive_stream_timeout_tracker_gn/moz.build b/third_party/libwebrtc/video/video_receive_stream_timeout_tracker_gn/moz.build
new file mode 100644
index 0000000000..0de224d0df
--- /dev/null
+++ b/third_party/libwebrtc/video/video_receive_stream_timeout_tracker_gn/moz.build
@@ -0,0 +1,232 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+DEFINES["WEBRTC_STRICT_FIELD_TRIALS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "!/third_party/libwebrtc/gen",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/video/video_receive_stream_timeout_tracker.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "crypt32",
+ "iphlpapi",
+ "secur32",
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "mips32":
+
+ DEFINES["MIPS32_LE"] = True
+ DEFINES["MIPS_FPU_LE"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "mips64":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("video_receive_stream_timeout_tracker_gn")
diff --git a/third_party/libwebrtc/video/video_receive_stream_timeout_tracker_unittest.cc b/third_party/libwebrtc/video/video_receive_stream_timeout_tracker_unittest.cc
new file mode 100644
index 0000000000..ea3bf896b8
--- /dev/null
+++ b/third_party/libwebrtc/video/video_receive_stream_timeout_tracker_unittest.cc
@@ -0,0 +1,94 @@
+/*
+ * Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video/video_receive_stream_timeout_tracker.h"
+
+#include <utility>
+#include <vector>
+
+#include "api/task_queue/task_queue_base.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+#include "test/time_controller/simulated_time_controller.h"
+
+namespace webrtc {
+
+namespace {
+
+constexpr auto kMaxWaitForKeyframe = TimeDelta::Millis(500);
+constexpr auto kMaxWaitForFrame = TimeDelta::Millis(1500);
+constexpr VideoReceiveStreamTimeoutTracker::Timeouts config = {
+ kMaxWaitForKeyframe, kMaxWaitForFrame};
+} // namespace
+
+class VideoReceiveStreamTimeoutTrackerTest : public ::testing::Test {
+ public:
+ VideoReceiveStreamTimeoutTrackerTest()
+ : time_controller_(Timestamp::Millis(2000)),
+ timeout_tracker_(time_controller_.GetClock(),
+ time_controller_.GetMainThread(),
+ config,
+ [this](TimeDelta delay) { OnTimeout(delay); }) {}
+
+ protected:
+ GlobalSimulatedTimeController time_controller_;
+ VideoReceiveStreamTimeoutTracker timeout_tracker_;
+ std::vector<TimeDelta> timeouts_;
+
+ private:
+ void OnTimeout(TimeDelta delay) { timeouts_.push_back(delay); }
+};
+
+TEST_F(VideoReceiveStreamTimeoutTrackerTest, TimeoutAfterInitialPeriod) {
+ timeout_tracker_.Start(true);
+ time_controller_.AdvanceTime(kMaxWaitForKeyframe);
+ EXPECT_THAT(timeouts_, testing::ElementsAre(kMaxWaitForKeyframe));
+ timeout_tracker_.Stop();
+}
+
+TEST_F(VideoReceiveStreamTimeoutTrackerTest, NoTimeoutAfterStop) {
+ timeout_tracker_.Start(true);
+ time_controller_.AdvanceTime(kMaxWaitForKeyframe / 2);
+ timeout_tracker_.Stop();
+ time_controller_.AdvanceTime(kMaxWaitForKeyframe);
+ EXPECT_THAT(timeouts_, testing::IsEmpty());
+}
+
+TEST_F(VideoReceiveStreamTimeoutTrackerTest, TimeoutForDeltaFrame) {
+ timeout_tracker_.Start(true);
+ time_controller_.AdvanceTime(TimeDelta::Millis(5));
+ timeout_tracker_.OnEncodedFrameReleased();
+ time_controller_.AdvanceTime(kMaxWaitForFrame);
+ EXPECT_THAT(timeouts_, testing::ElementsAre(kMaxWaitForFrame));
+ timeout_tracker_.Stop();
+}
+
+TEST_F(VideoReceiveStreamTimeoutTrackerTest, TimeoutForKeyframeWhenForced) {
+ timeout_tracker_.Start(true);
+ time_controller_.AdvanceTime(TimeDelta::Millis(5));
+ timeout_tracker_.OnEncodedFrameReleased();
+ timeout_tracker_.SetWaitingForKeyframe();
+ time_controller_.AdvanceTime(kMaxWaitForKeyframe);
+ EXPECT_THAT(timeouts_, testing::ElementsAre(kMaxWaitForKeyframe));
+ timeout_tracker_.Stop();
+}
+
+TEST_F(VideoReceiveStreamTimeoutTrackerTest, TotalTimeoutUsedInCallback) {
+ timeout_tracker_.Start(true);
+ time_controller_.AdvanceTime(kMaxWaitForKeyframe * 2);
+ timeout_tracker_.OnEncodedFrameReleased();
+ time_controller_.AdvanceTime(kMaxWaitForFrame * 2);
+ EXPECT_THAT(timeouts_,
+ testing::ElementsAre(kMaxWaitForKeyframe, kMaxWaitForKeyframe * 2,
+ kMaxWaitForFrame, kMaxWaitForFrame * 2));
+ timeout_tracker_.Stop();
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/video/video_send_stream.cc b/third_party/libwebrtc/video/video_send_stream.cc
new file mode 100644
index 0000000000..e5545e761c
--- /dev/null
+++ b/third_party/libwebrtc/video/video_send_stream.cc
@@ -0,0 +1,359 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include "video/video_send_stream.h"
+
+#include <utility>
+
+#include "api/array_view.h"
+#include "api/task_queue/task_queue_base.h"
+#include "api/video/video_stream_encoder_settings.h"
+#include "modules/rtp_rtcp/include/rtp_header_extension_map.h"
+#include "modules/rtp_rtcp/source/rtp_header_extension_size.h"
+#include "modules/rtp_rtcp/source/rtp_sender.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/strings/string_builder.h"
+#include "system_wrappers/include/clock.h"
+#include "video/adaptation/overuse_frame_detector.h"
+#include "video/frame_cadence_adapter.h"
+#include "video/video_stream_encoder.h"
+
+namespace webrtc {
+
+namespace {
+
+size_t CalculateMaxHeaderSize(const RtpConfig& config) {
+ size_t header_size = kRtpHeaderSize;
+ size_t extensions_size = 0;
+ size_t fec_extensions_size = 0;
+ if (!config.extensions.empty()) {
+ RtpHeaderExtensionMap extensions_map(config.extensions);
+ extensions_size = RtpHeaderExtensionSize(RTPSender::VideoExtensionSizes(),
+ extensions_map);
+ fec_extensions_size =
+ RtpHeaderExtensionSize(RTPSender::FecExtensionSizes(), extensions_map);
+ }
+ header_size += extensions_size;
+ if (config.flexfec.payload_type >= 0) {
+ // All FEC extensions again plus maximum FlexFec overhead.
+ header_size += fec_extensions_size + 32;
+ } else {
+ if (config.ulpfec.ulpfec_payload_type >= 0) {
+ // Header with all the FEC extensions will be repeated plus maximum
+ // UlpFec overhead.
+ header_size += fec_extensions_size + 18;
+ }
+ if (config.ulpfec.red_payload_type >= 0) {
+ header_size += 1; // RED header.
+ }
+ }
+ // Additional room for Rtx.
+ if (config.rtx.payload_type >= 0)
+ header_size += kRtxHeaderSize;
+ return header_size;
+}
+
+VideoStreamEncoder::BitrateAllocationCallbackType
+GetBitrateAllocationCallbackType(const VideoSendStream::Config& config,
+ const FieldTrialsView& field_trials) {
+ if (webrtc::RtpExtension::FindHeaderExtensionByUri(
+ config.rtp.extensions,
+ webrtc::RtpExtension::kVideoLayersAllocationUri,
+ config.crypto_options.srtp.enable_encrypted_rtp_header_extensions
+ ? RtpExtension::Filter::kPreferEncryptedExtension
+ : RtpExtension::Filter::kDiscardEncryptedExtension)) {
+ return VideoStreamEncoder::BitrateAllocationCallbackType::
+ kVideoLayersAllocation;
+ }
+ if (field_trials.IsEnabled("WebRTC-Target-Bitrate-Rtcp")) {
+ return VideoStreamEncoder::BitrateAllocationCallbackType::
+ kVideoBitrateAllocation;
+ }
+ return VideoStreamEncoder::BitrateAllocationCallbackType::
+ kVideoBitrateAllocationWhenScreenSharing;
+}
+
+RtpSenderFrameEncryptionConfig CreateFrameEncryptionConfig(
+ const VideoSendStream::Config* config) {
+ RtpSenderFrameEncryptionConfig frame_encryption_config;
+ frame_encryption_config.frame_encryptor = config->frame_encryptor.get();
+ frame_encryption_config.crypto_options = config->crypto_options;
+ return frame_encryption_config;
+}
+
+RtpSenderObservers CreateObservers(RtcpRttStats* call_stats,
+ EncoderRtcpFeedback* encoder_feedback,
+ SendStatisticsProxy* stats_proxy,
+ SendDelayStats* send_delay_stats) {
+ RtpSenderObservers observers;
+ observers.rtcp_rtt_stats = call_stats;
+ observers.intra_frame_callback = encoder_feedback;
+ observers.rtcp_loss_notification_observer = encoder_feedback;
+ observers.report_block_data_observer = stats_proxy;
+ observers.rtp_stats = stats_proxy;
+ observers.bitrate_observer = stats_proxy;
+ observers.frame_count_observer = stats_proxy;
+ observers.rtcp_type_observer = stats_proxy;
+ observers.send_delay_observer = stats_proxy;
+ observers.send_packet_observer = send_delay_stats;
+ return observers;
+}
+
+std::unique_ptr<VideoStreamEncoder> CreateVideoStreamEncoder(
+ Clock* clock,
+ int num_cpu_cores,
+ TaskQueueFactory* task_queue_factory,
+ SendStatisticsProxy* stats_proxy,
+ const VideoStreamEncoderSettings& encoder_settings,
+ VideoStreamEncoder::BitrateAllocationCallbackType
+ bitrate_allocation_callback_type,
+ const FieldTrialsView& field_trials,
+ webrtc::VideoEncoderFactory::EncoderSelectorInterface* encoder_selector) {
+ std::unique_ptr<TaskQueueBase, TaskQueueDeleter> encoder_queue =
+ task_queue_factory->CreateTaskQueue("EncoderQueue",
+ TaskQueueFactory::Priority::NORMAL);
+ TaskQueueBase* encoder_queue_ptr = encoder_queue.get();
+ return std::make_unique<VideoStreamEncoder>(
+ clock, num_cpu_cores, stats_proxy, encoder_settings,
+ std::make_unique<OveruseFrameDetector>(stats_proxy, field_trials),
+ FrameCadenceAdapterInterface::Create(clock, encoder_queue_ptr,
+ field_trials),
+ std::move(encoder_queue), bitrate_allocation_callback_type, field_trials,
+ encoder_selector);
+}
+
+} // namespace
+
+namespace internal {
+
+VideoSendStream::VideoSendStream(
+ Clock* clock,
+ int num_cpu_cores,
+ TaskQueueFactory* task_queue_factory,
+ TaskQueueBase* network_queue,
+ RtcpRttStats* call_stats,
+ RtpTransportControllerSendInterface* transport,
+ BitrateAllocatorInterface* bitrate_allocator,
+ SendDelayStats* send_delay_stats,
+ RtcEventLog* event_log,
+ VideoSendStream::Config config,
+ VideoEncoderConfig encoder_config,
+ const std::map<uint32_t, RtpState>& suspended_ssrcs,
+ const std::map<uint32_t, RtpPayloadState>& suspended_payload_states,
+ std::unique_ptr<FecController> fec_controller,
+ const FieldTrialsView& field_trials)
+ : rtp_transport_queue_(transport->GetWorkerQueue()),
+ transport_(transport),
+ stats_proxy_(clock, config, encoder_config.content_type, field_trials),
+ config_(std::move(config)),
+ content_type_(encoder_config.content_type),
+ video_stream_encoder_(CreateVideoStreamEncoder(
+ clock,
+ num_cpu_cores,
+ task_queue_factory,
+ &stats_proxy_,
+ config_.encoder_settings,
+ GetBitrateAllocationCallbackType(config_, field_trials),
+ field_trials,
+ config_.encoder_selector)),
+ encoder_feedback_(
+ clock,
+ config_.rtp.ssrcs,
+ video_stream_encoder_.get(),
+ [this](uint32_t ssrc, const std::vector<uint16_t>& seq_nums) {
+ return rtp_video_sender_->GetSentRtpPacketInfos(ssrc, seq_nums);
+ }),
+ rtp_video_sender_(
+ transport->CreateRtpVideoSender(suspended_ssrcs,
+ suspended_payload_states,
+ config_.rtp,
+ config_.rtcp_report_interval_ms,
+ config_.send_transport,
+ CreateObservers(call_stats,
+ &encoder_feedback_,
+ &stats_proxy_,
+ send_delay_stats),
+ event_log,
+ std::move(fec_controller),
+ CreateFrameEncryptionConfig(&config_),
+ config_.frame_transformer)),
+ send_stream_(clock,
+ &stats_proxy_,
+ transport,
+ bitrate_allocator,
+ video_stream_encoder_.get(),
+ &config_,
+ encoder_config.max_bitrate_bps,
+ encoder_config.bitrate_priority,
+ encoder_config.content_type,
+ rtp_video_sender_,
+ field_trials) {
+ RTC_DCHECK(config_.encoder_settings.encoder_factory);
+ RTC_DCHECK(config_.encoder_settings.bitrate_allocator_factory);
+
+ video_stream_encoder_->SetFecControllerOverride(rtp_video_sender_);
+
+ ReconfigureVideoEncoder(std::move(encoder_config));
+}
+
+VideoSendStream::~VideoSendStream() {
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ RTC_DCHECK(!running_);
+ transport_->DestroyRtpVideoSender(rtp_video_sender_);
+}
+
+void VideoSendStream::Start() {
+ const std::vector<bool> active_layers(config_.rtp.ssrcs.size(), true);
+ StartPerRtpStream(active_layers);
+}
+
+void VideoSendStream::StartPerRtpStream(const std::vector<bool> active_layers) {
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+
+ // Keep our `running_` flag expected state in sync with active layers since
+ // the `send_stream_` will be implicitly stopped/started depending on the
+ // state of the layers.
+ bool running = false;
+
+ rtc::StringBuilder active_layers_string;
+ active_layers_string << "{";
+ for (size_t i = 0; i < active_layers.size(); ++i) {
+ if (active_layers[i]) {
+ running = true;
+ active_layers_string << "1";
+ } else {
+ active_layers_string << "0";
+ }
+ if (i < active_layers.size() - 1) {
+ active_layers_string << ", ";
+ }
+ }
+ active_layers_string << "}";
+ RTC_LOG(LS_INFO) << "StartPerRtpStream: " << active_layers_string.str();
+
+ rtp_transport_queue_->RunOrPost(
+ SafeTask(transport_queue_safety_, [this, active_layers] {
+ send_stream_.StartPerRtpStream(active_layers);
+ }));
+
+ running_ = running;
+}
+
+void VideoSendStream::Stop() {
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ if (!running_)
+ return;
+ RTC_DLOG(LS_INFO) << "VideoSendStream::Stop";
+ running_ = false;
+ rtp_transport_queue_->RunOrPost(SafeTask(transport_queue_safety_, [this] {
+ // As the stream can get re-used and implicitly restarted via changing
+ // the state of the active layers, we do not mark the
+ // `transport_queue_safety_` flag with `SetNotAlive()` here. That's only
+ // done when we stop permanently via `StopPermanentlyAndGetRtpStates()`.
+ send_stream_.Stop();
+ }));
+}
+
+bool VideoSendStream::started() {
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ return running_;
+}
+
+void VideoSendStream::AddAdaptationResource(
+ rtc::scoped_refptr<Resource> resource) {
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ video_stream_encoder_->AddAdaptationResource(resource);
+}
+
+std::vector<rtc::scoped_refptr<Resource>>
+VideoSendStream::GetAdaptationResources() {
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ return video_stream_encoder_->GetAdaptationResources();
+}
+
+void VideoSendStream::SetSource(
+ rtc::VideoSourceInterface<webrtc::VideoFrame>* source,
+ const DegradationPreference& degradation_preference) {
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ video_stream_encoder_->SetSource(source, degradation_preference);
+}
+
+void VideoSendStream::ReconfigureVideoEncoder(VideoEncoderConfig config) {
+ ReconfigureVideoEncoder(std::move(config), nullptr);
+}
+
+void VideoSendStream::ReconfigureVideoEncoder(VideoEncoderConfig config,
+ SetParametersCallback callback) {
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ RTC_DCHECK_EQ(content_type_, config.content_type);
+ video_stream_encoder_->ConfigureEncoder(
+ std::move(config),
+ config_.rtp.max_packet_size - CalculateMaxHeaderSize(config_.rtp),
+ std::move(callback));
+}
+
+VideoSendStream::Stats VideoSendStream::GetStats() {
+ // TODO(perkj, solenberg): Some test cases in EndToEndTest call GetStats from
+ // a network thread. See comment in Call::GetStats().
+ // RTC_DCHECK_RUN_ON(&thread_checker_);
+ return stats_proxy_.GetStats();
+}
+
+absl::optional<float> VideoSendStream::GetPacingFactorOverride() const {
+ return send_stream_.configured_pacing_factor();
+}
+
+void VideoSendStream::StopPermanentlyAndGetRtpStates(
+ VideoSendStream::RtpStateMap* rtp_state_map,
+ VideoSendStream::RtpPayloadStateMap* payload_state_map) {
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ video_stream_encoder_->Stop();
+
+ running_ = false;
+ // Always run these cleanup steps regardless of whether running_ was set
+ // or not. This will unregister callbacks before destruction.
+ // See `VideoSendStreamImpl::StopVideoSendStream` for more.
+ rtp_transport_queue_->RunSynchronous(
+ [this, rtp_state_map, payload_state_map]() {
+ transport_queue_safety_->SetNotAlive();
+ send_stream_.Stop();
+ *rtp_state_map = send_stream_.GetRtpStates();
+ *payload_state_map = send_stream_.GetRtpPayloadStates();
+ });
+}
+
+void VideoSendStream::DeliverRtcp(const uint8_t* packet, size_t length) {
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ send_stream_.DeliverRtcp(packet, length);
+}
+
+void VideoSendStream::GenerateKeyFrame(const std::vector<std::string>& rids) {
+ // Map rids to layers. If rids is empty, generate a keyframe for all layers.
+ std::vector<VideoFrameType> next_frames(config_.rtp.ssrcs.size(),
+ VideoFrameType::kVideoFrameKey);
+ if (!config_.rtp.rids.empty() && !rids.empty()) {
+ std::fill(next_frames.begin(), next_frames.end(),
+ VideoFrameType::kVideoFrameDelta);
+ for (const auto& rid : rids) {
+ for (size_t i = 0; i < config_.rtp.rids.size(); i++) {
+ if (config_.rtp.rids[i] == rid) {
+ next_frames[i] = VideoFrameType::kVideoFrameKey;
+ break;
+ }
+ }
+ }
+ }
+ if (video_stream_encoder_) {
+ video_stream_encoder_->SendKeyFrame(next_frames);
+ }
+}
+
+} // namespace internal
+} // namespace webrtc
diff --git a/third_party/libwebrtc/video/video_send_stream.h b/third_party/libwebrtc/video/video_send_stream.h
new file mode 100644
index 0000000000..404873fd39
--- /dev/null
+++ b/third_party/libwebrtc/video/video_send_stream.h
@@ -0,0 +1,125 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VIDEO_VIDEO_SEND_STREAM_H_
+#define VIDEO_VIDEO_SEND_STREAM_H_
+
+#include <map>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "api/fec_controller.h"
+#include "api/field_trials_view.h"
+#include "api/sequence_checker.h"
+#include "api/task_queue/pending_task_safety_flag.h"
+#include "call/bitrate_allocator.h"
+#include "call/video_receive_stream.h"
+#include "call/video_send_stream.h"
+#include "modules/utility/maybe_worker_thread.h"
+#include "rtc_base/event.h"
+#include "rtc_base/system/no_unique_address.h"
+#include "video/encoder_rtcp_feedback.h"
+#include "video/send_delay_stats.h"
+#include "video/send_statistics_proxy.h"
+#include "video/video_send_stream_impl.h"
+#include "video/video_stream_encoder_interface.h"
+
+namespace webrtc {
+namespace test {
+class VideoSendStreamPeer;
+} // namespace test
+
+class IvfFileWriter;
+class RateLimiter;
+class RtpRtcp;
+class RtpTransportControllerSendInterface;
+class RtcEventLog;
+
+namespace internal {
+
+class VideoSendStreamImpl;
+
+// VideoSendStream implements webrtc::VideoSendStream.
+// Internally, it delegates all public methods to VideoSendStreamImpl and / or
+// VideoStreamEncoder.
+class VideoSendStream : public webrtc::VideoSendStream {
+ public:
+ using RtpStateMap = std::map<uint32_t, RtpState>;
+ using RtpPayloadStateMap = std::map<uint32_t, RtpPayloadState>;
+
+ VideoSendStream(
+ Clock* clock,
+ int num_cpu_cores,
+ TaskQueueFactory* task_queue_factory,
+ TaskQueueBase* network_queue,
+ RtcpRttStats* call_stats,
+ RtpTransportControllerSendInterface* transport,
+ BitrateAllocatorInterface* bitrate_allocator,
+ SendDelayStats* send_delay_stats,
+ RtcEventLog* event_log,
+ VideoSendStream::Config config,
+ VideoEncoderConfig encoder_config,
+ const std::map<uint32_t, RtpState>& suspended_ssrcs,
+ const std::map<uint32_t, RtpPayloadState>& suspended_payload_states,
+ std::unique_ptr<FecController> fec_controller,
+ const FieldTrialsView& field_trials);
+
+ ~VideoSendStream() override;
+
+ void DeliverRtcp(const uint8_t* packet, size_t length);
+
+ // webrtc::VideoSendStream implementation.
+ void Start() override;
+ void StartPerRtpStream(std::vector<bool> active_layers) override;
+ void Stop() override;
+ bool started() override;
+
+ void AddAdaptationResource(rtc::scoped_refptr<Resource> resource) override;
+ std::vector<rtc::scoped_refptr<Resource>> GetAdaptationResources() override;
+
+ void SetSource(rtc::VideoSourceInterface<webrtc::VideoFrame>* source,
+ const DegradationPreference& degradation_preference) override;
+
+ void ReconfigureVideoEncoder(VideoEncoderConfig config) override;
+ void ReconfigureVideoEncoder(VideoEncoderConfig config,
+ SetParametersCallback callback) override;
+ Stats GetStats() override;
+
+ void StopPermanentlyAndGetRtpStates(RtpStateMap* rtp_state_map,
+ RtpPayloadStateMap* payload_state_map);
+ void GenerateKeyFrame(const std::vector<std::string>& rids) override;
+
+ private:
+ friend class test::VideoSendStreamPeer;
+
+ absl::optional<float> GetPacingFactorOverride() const;
+
+ RTC_NO_UNIQUE_ADDRESS SequenceChecker thread_checker_;
+ MaybeWorkerThread* const rtp_transport_queue_;
+ RtpTransportControllerSendInterface* const transport_;
+ rtc::Event thread_sync_event_;
+ rtc::scoped_refptr<PendingTaskSafetyFlag> transport_queue_safety_ =
+ PendingTaskSafetyFlag::CreateDetached();
+
+ SendStatisticsProxy stats_proxy_;
+ const VideoSendStream::Config config_;
+ const VideoEncoderConfig::ContentType content_type_;
+ std::unique_ptr<VideoStreamEncoderInterface> video_stream_encoder_;
+ EncoderRtcpFeedback encoder_feedback_;
+ RtpVideoSenderInterface* const rtp_video_sender_;
+ VideoSendStreamImpl send_stream_;
+ bool running_ RTC_GUARDED_BY(thread_checker_) = false;
+};
+
+} // namespace internal
+} // namespace webrtc
+
+#endif // VIDEO_VIDEO_SEND_STREAM_H_
diff --git a/third_party/libwebrtc/video/video_send_stream_impl.cc b/third_party/libwebrtc/video/video_send_stream_impl.cc
new file mode 100644
index 0000000000..f34388e56a
--- /dev/null
+++ b/third_party/libwebrtc/video/video_send_stream_impl.cc
@@ -0,0 +1,625 @@
+/*
+ * Copyright 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include "video/video_send_stream_impl.h"
+
+#include <stdio.h>
+
+#include <algorithm>
+#include <cstdint>
+#include <string>
+#include <utility>
+
+#include "absl/algorithm/container.h"
+#include "api/crypto/crypto_options.h"
+#include "api/rtp_parameters.h"
+#include "api/scoped_refptr.h"
+#include "api/sequence_checker.h"
+#include "api/task_queue/pending_task_safety_flag.h"
+#include "api/task_queue/task_queue_base.h"
+#include "api/video_codecs/video_codec.h"
+#include "call/rtp_transport_controller_send_interface.h"
+#include "call/video_send_stream.h"
+#include "modules/pacing/pacing_controller.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/experiments/alr_experiment.h"
+#include "rtc_base/experiments/field_trial_parser.h"
+#include "rtc_base/experiments/min_video_bitrate_experiment.h"
+#include "rtc_base/experiments/rate_control_settings.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/numerics/safe_conversions.h"
+#include "rtc_base/trace_event.h"
+#include "system_wrappers/include/clock.h"
+#include "system_wrappers/include/field_trial.h"
+
+namespace webrtc {
+namespace internal {
+namespace {
+
+// Max positive size difference to treat allocations as "similar".
+static constexpr int kMaxVbaSizeDifferencePercent = 10;
+// Max time we will throttle similar video bitrate allocations.
+static constexpr int64_t kMaxVbaThrottleTimeMs = 500;
+
+constexpr TimeDelta kEncoderTimeOut = TimeDelta::Seconds(2);
+
+constexpr double kVideoHysteresis = 1.2;
+constexpr double kScreenshareHysteresis = 1.35;
+
+// When send-side BWE is used a stricter 1.1x pacing factor is used, rather than
+// the 2.5x which is used with receive-side BWE. Provides a more careful
+// bandwidth rampup with less risk of overshoots causing adverse effects like
+// packet loss. Not used for receive side BWE, since there we lack the probing
+// feature and so may result in too slow initial rampup.
+static constexpr double kStrictPacingMultiplier = 1.1;
+
+bool TransportSeqNumExtensionConfigured(const VideoSendStream::Config& config) {
+ const std::vector<RtpExtension>& extensions = config.rtp.extensions;
+ return absl::c_any_of(extensions, [](const RtpExtension& ext) {
+ return ext.uri == RtpExtension::kTransportSequenceNumberUri;
+ });
+}
+
+// Calculate max padding bitrate for a multi layer codec.
+int CalculateMaxPadBitrateBps(const std::vector<VideoStream>& streams,
+ bool is_svc,
+ VideoEncoderConfig::ContentType content_type,
+ int min_transmit_bitrate_bps,
+ bool pad_to_min_bitrate,
+ bool alr_probing) {
+ int pad_up_to_bitrate_bps = 0;
+
+ RTC_DCHECK(!is_svc || streams.size() <= 1) << "Only one stream is allowed in "
+ "SVC mode.";
+
+ // Filter out only the active streams;
+ std::vector<VideoStream> active_streams;
+ for (const VideoStream& stream : streams) {
+ if (stream.active)
+ active_streams.emplace_back(stream);
+ }
+
+ if (active_streams.size() > 1 || (!active_streams.empty() && is_svc)) {
+ // Simulcast or SVC is used.
+ // if SVC is used, stream bitrates should already encode svc bitrates:
+ // min_bitrate = min bitrate of a lowest svc layer.
+ // target_bitrate = sum of target bitrates of lower layers + min bitrate
+ // of the last one (as used in the calculations below).
+ // max_bitrate = sum of all active layers' max_bitrate.
+ if (alr_probing) {
+ // With alr probing, just pad to the min bitrate of the lowest stream,
+ // probing will handle the rest of the rampup.
+ pad_up_to_bitrate_bps = active_streams[0].min_bitrate_bps;
+ } else {
+ // Without alr probing, pad up to start bitrate of the
+ // highest active stream.
+ const double hysteresis_factor =
+ content_type == VideoEncoderConfig::ContentType::kScreen
+ ? kScreenshareHysteresis
+ : kVideoHysteresis;
+ if (is_svc) {
+ // For SVC, since there is only one "stream", the padding bitrate
+ // needed to enable the top spatial layer is stored in the
+ // `target_bitrate_bps` field.
+ // TODO(sprang): This behavior needs to die.
+ pad_up_to_bitrate_bps = static_cast<int>(
+ hysteresis_factor * active_streams[0].target_bitrate_bps + 0.5);
+ } else {
+ const size_t top_active_stream_idx = active_streams.size() - 1;
+ pad_up_to_bitrate_bps = std::min(
+ static_cast<int>(
+ hysteresis_factor *
+ active_streams[top_active_stream_idx].min_bitrate_bps +
+ 0.5),
+ active_streams[top_active_stream_idx].target_bitrate_bps);
+
+ // Add target_bitrate_bps of the lower active streams.
+ for (size_t i = 0; i < top_active_stream_idx; ++i) {
+ pad_up_to_bitrate_bps += active_streams[i].target_bitrate_bps;
+ }
+ }
+ }
+ } else if (!active_streams.empty() && pad_to_min_bitrate) {
+ pad_up_to_bitrate_bps = active_streams[0].min_bitrate_bps;
+ }
+
+ pad_up_to_bitrate_bps =
+ std::max(pad_up_to_bitrate_bps, min_transmit_bitrate_bps);
+
+ return pad_up_to_bitrate_bps;
+}
+
+absl::optional<AlrExperimentSettings> GetAlrSettings(
+ VideoEncoderConfig::ContentType content_type) {
+ if (content_type == VideoEncoderConfig::ContentType::kScreen) {
+ return AlrExperimentSettings::CreateFromFieldTrial(
+ AlrExperimentSettings::kScreenshareProbingBweExperimentName);
+ }
+ return AlrExperimentSettings::CreateFromFieldTrial(
+ AlrExperimentSettings::kStrictPacingAndProbingExperimentName);
+}
+
+bool SameStreamsEnabled(const VideoBitrateAllocation& lhs,
+ const VideoBitrateAllocation& rhs) {
+ for (size_t si = 0; si < kMaxSpatialLayers; ++si) {
+ for (size_t ti = 0; ti < kMaxTemporalStreams; ++ti) {
+ if (lhs.HasBitrate(si, ti) != rhs.HasBitrate(si, ti)) {
+ return false;
+ }
+ }
+ }
+ return true;
+}
+
+// Returns an optional that has value iff TransportSeqNumExtensionConfigured
+// is `true` for the given video send stream config.
+absl::optional<float> GetConfiguredPacingFactor(
+ const VideoSendStream::Config& config,
+ VideoEncoderConfig::ContentType content_type,
+ const PacingConfig& default_pacing_config) {
+ if (!TransportSeqNumExtensionConfigured(config))
+ return absl::nullopt;
+
+ absl::optional<AlrExperimentSettings> alr_settings =
+ GetAlrSettings(content_type);
+ if (alr_settings)
+ return alr_settings->pacing_factor;
+
+ RateControlSettings rate_control_settings =
+ RateControlSettings::ParseFromFieldTrials();
+ return rate_control_settings.GetPacingFactor().value_or(
+ default_pacing_config.pacing_factor);
+}
+
+uint32_t GetInitialEncoderMaxBitrate(int initial_encoder_max_bitrate) {
+ if (initial_encoder_max_bitrate > 0)
+ return rtc::dchecked_cast<uint32_t>(initial_encoder_max_bitrate);
+
+ // TODO(srte): Make sure max bitrate is not set to negative values. We don't
+ // have any way to handle unset values in downstream code, such as the
+ // bitrate allocator. Previously -1 was implicitly casted to UINT32_MAX, a
+ // behaviour that is not safe. Converting to 10 Mbps should be safe for
+ // reasonable use cases as it allows adding the max of multiple streams
+ // without wrappping around.
+ const int kFallbackMaxBitrateBps = 10000000;
+ RTC_DLOG(LS_ERROR) << "ERROR: Initial encoder max bitrate = "
+ << initial_encoder_max_bitrate << " which is <= 0!";
+ RTC_DLOG(LS_INFO) << "Using default encoder max bitrate = 10 Mbps";
+ return kFallbackMaxBitrateBps;
+}
+
+} // namespace
+
+PacingConfig::PacingConfig(const FieldTrialsView& field_trials)
+ : pacing_factor("factor", kStrictPacingMultiplier),
+ max_pacing_delay("max_delay", PacingController::kMaxExpectedQueueLength) {
+ ParseFieldTrial({&pacing_factor, &max_pacing_delay},
+ field_trials.Lookup("WebRTC-Video-Pacing"));
+}
+PacingConfig::PacingConfig(const PacingConfig&) = default;
+PacingConfig::~PacingConfig() = default;
+
+VideoSendStreamImpl::VideoSendStreamImpl(
+ Clock* clock,
+ SendStatisticsProxy* stats_proxy,
+ RtpTransportControllerSendInterface* transport,
+ BitrateAllocatorInterface* bitrate_allocator,
+ VideoStreamEncoderInterface* video_stream_encoder,
+ const VideoSendStream::Config* config,
+ int initial_encoder_max_bitrate,
+ double initial_encoder_bitrate_priority,
+ VideoEncoderConfig::ContentType content_type,
+ RtpVideoSenderInterface* rtp_video_sender,
+ const FieldTrialsView& field_trials)
+ : clock_(clock),
+ has_alr_probing_(config->periodic_alr_bandwidth_probing ||
+ GetAlrSettings(content_type)),
+ pacing_config_(PacingConfig(field_trials)),
+ stats_proxy_(stats_proxy),
+ config_(config),
+ rtp_transport_queue_(transport->GetWorkerQueue()),
+ timed_out_(false),
+ transport_(transport),
+ bitrate_allocator_(bitrate_allocator),
+ disable_padding_(true),
+ max_padding_bitrate_(0),
+ encoder_min_bitrate_bps_(0),
+ encoder_max_bitrate_bps_(
+ GetInitialEncoderMaxBitrate(initial_encoder_max_bitrate)),
+ encoder_target_rate_bps_(0),
+ encoder_bitrate_priority_(initial_encoder_bitrate_priority),
+ video_stream_encoder_(video_stream_encoder),
+ bandwidth_observer_(transport->GetBandwidthObserver()),
+ rtp_video_sender_(rtp_video_sender),
+ configured_pacing_factor_(
+ GetConfiguredPacingFactor(*config_, content_type, pacing_config_)) {
+ RTC_DCHECK_GE(config_->rtp.payload_type, 0);
+ RTC_DCHECK_LE(config_->rtp.payload_type, 127);
+ RTC_DCHECK(!config_->rtp.ssrcs.empty());
+ RTC_DCHECK(transport_);
+ RTC_DCHECK_NE(initial_encoder_max_bitrate, 0);
+ RTC_LOG(LS_INFO) << "VideoSendStreamImpl: " << config_->ToString();
+
+ RTC_CHECK(AlrExperimentSettings::MaxOneFieldTrialEnabled());
+
+ // Only request rotation at the source when we positively know that the remote
+ // side doesn't support the rotation extension. This allows us to prepare the
+ // encoder in the expectation that rotation is supported - which is the common
+ // case.
+ bool rotation_applied = absl::c_none_of(
+ config_->rtp.extensions, [](const RtpExtension& extension) {
+ return extension.uri == RtpExtension::kVideoRotationUri;
+ });
+
+ video_stream_encoder_->SetSink(this, rotation_applied);
+
+ absl::optional<bool> enable_alr_bw_probing;
+
+ // If send-side BWE is enabled, check if we should apply updated probing and
+ // pacing settings.
+ if (configured_pacing_factor_) {
+ absl::optional<AlrExperimentSettings> alr_settings =
+ GetAlrSettings(content_type);
+ int queue_time_limit_ms;
+ if (alr_settings) {
+ enable_alr_bw_probing = true;
+ queue_time_limit_ms = alr_settings->max_paced_queue_time;
+ } else {
+ RateControlSettings rate_control_settings =
+ RateControlSettings::ParseFromFieldTrials();
+ enable_alr_bw_probing = rate_control_settings.UseAlrProbing();
+ queue_time_limit_ms = pacing_config_.max_pacing_delay.Get().ms();
+ }
+
+ transport->SetQueueTimeLimit(queue_time_limit_ms);
+ }
+
+ if (config_->periodic_alr_bandwidth_probing) {
+ enable_alr_bw_probing = config_->periodic_alr_bandwidth_probing;
+ }
+
+ if (enable_alr_bw_probing) {
+ transport->EnablePeriodicAlrProbing(*enable_alr_bw_probing);
+ }
+
+ rtp_transport_queue_->RunOrPost(SafeTask(transport_queue_safety_, [this] {
+ if (configured_pacing_factor_)
+ transport_->SetPacingFactor(*configured_pacing_factor_);
+
+ video_stream_encoder_->SetStartBitrate(
+ bitrate_allocator_->GetStartBitrate(this));
+ }));
+}
+
+VideoSendStreamImpl::~VideoSendStreamImpl() {
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ RTC_LOG(LS_INFO) << "~VideoSendStreamImpl: " << config_->ToString();
+ // TODO(webrtc:14502): Change `transport_queue_safety_` to be of type
+ // ScopedTaskSafety if experiment WebRTC-SendPacketsOnWorkerThread succeed.
+ if (rtp_transport_queue_->IsCurrent()) {
+ transport_queue_safety_->SetNotAlive();
+ }
+}
+
+void VideoSendStreamImpl::DeliverRtcp(const uint8_t* packet, size_t length) {
+ // Runs on a worker thread.
+ rtp_video_sender_->DeliverRtcp(packet, length);
+}
+
+void VideoSendStreamImpl::StartPerRtpStream(
+ const std::vector<bool> active_layers) {
+ RTC_DCHECK_RUN_ON(rtp_transport_queue_);
+ bool previously_active = rtp_video_sender_->IsActive();
+ rtp_video_sender_->SetActiveModules(active_layers);
+ if (!rtp_video_sender_->IsActive() && previously_active) {
+ StopVideoSendStream();
+ } else if (rtp_video_sender_->IsActive() && !previously_active) {
+ StartupVideoSendStream();
+ }
+}
+
+void VideoSendStreamImpl::StartupVideoSendStream() {
+ RTC_DCHECK_RUN_ON(rtp_transport_queue_);
+ transport_queue_safety_->SetAlive();
+
+ bitrate_allocator_->AddObserver(this, GetAllocationConfig());
+ // Start monitoring encoder activity.
+ {
+ RTC_DCHECK(!check_encoder_activity_task_.Running());
+
+ activity_ = false;
+ timed_out_ = false;
+ check_encoder_activity_task_ = RepeatingTaskHandle::DelayedStart(
+ rtp_transport_queue_->TaskQueueForDelayedTasks(), kEncoderTimeOut,
+ [this] {
+ RTC_DCHECK_RUN_ON(rtp_transport_queue_);
+ if (!activity_) {
+ if (!timed_out_) {
+ SignalEncoderTimedOut();
+ }
+ timed_out_ = true;
+ disable_padding_ = true;
+ } else if (timed_out_) {
+ SignalEncoderActive();
+ timed_out_ = false;
+ }
+ activity_ = false;
+ return kEncoderTimeOut;
+ });
+ }
+
+ video_stream_encoder_->SendKeyFrame();
+}
+
+void VideoSendStreamImpl::Stop() {
+ RTC_DCHECK_RUN_ON(rtp_transport_queue_);
+ RTC_LOG(LS_INFO) << "VideoSendStreamImpl::Stop";
+ if (!rtp_video_sender_->IsActive())
+ return;
+
+ RTC_DCHECK(transport_queue_safety_->alive());
+ TRACE_EVENT_INSTANT0("webrtc", "VideoSendStream::Stop");
+ rtp_video_sender_->Stop();
+ StopVideoSendStream();
+}
+
+void VideoSendStreamImpl::StopVideoSendStream() {
+ RTC_DCHECK_RUN_ON(rtp_transport_queue_);
+ bitrate_allocator_->RemoveObserver(this);
+ check_encoder_activity_task_.Stop();
+ video_stream_encoder_->OnBitrateUpdated(DataRate::Zero(), DataRate::Zero(),
+ DataRate::Zero(), 0, 0, 0);
+ stats_proxy_->OnSetEncoderTargetRate(0);
+ transport_queue_safety_->SetNotAlive();
+}
+
+void VideoSendStreamImpl::SignalEncoderTimedOut() {
+ RTC_DCHECK_RUN_ON(rtp_transport_queue_);
+ // If the encoder has not produced anything the last kEncoderTimeOut and it
+ // is supposed to, deregister as BitrateAllocatorObserver. This can happen
+ // if a camera stops producing frames.
+ if (encoder_target_rate_bps_ > 0) {
+ RTC_LOG(LS_INFO) << "SignalEncoderTimedOut, Encoder timed out.";
+ bitrate_allocator_->RemoveObserver(this);
+ }
+}
+
+void VideoSendStreamImpl::OnBitrateAllocationUpdated(
+ const VideoBitrateAllocation& allocation) {
+ // OnBitrateAllocationUpdated is invoked from the encoder task queue or
+ // the rtp_transport_queue_.
+ auto task = [=] {
+ RTC_DCHECK_RUN_ON(rtp_transport_queue_);
+ if (encoder_target_rate_bps_ == 0) {
+ return;
+ }
+ int64_t now_ms = clock_->TimeInMilliseconds();
+ if (video_bitrate_allocation_context_) {
+ // If new allocation is within kMaxVbaSizeDifferencePercent larger
+ // than the previously sent allocation and the same streams are still
+ // enabled, it is considered "similar". We do not want send similar
+ // allocations more once per kMaxVbaThrottleTimeMs.
+ const VideoBitrateAllocation& last =
+ video_bitrate_allocation_context_->last_sent_allocation;
+ const bool is_similar =
+ allocation.get_sum_bps() >= last.get_sum_bps() &&
+ allocation.get_sum_bps() <
+ (last.get_sum_bps() * (100 + kMaxVbaSizeDifferencePercent)) /
+ 100 &&
+ SameStreamsEnabled(allocation, last);
+ if (is_similar &&
+ (now_ms - video_bitrate_allocation_context_->last_send_time_ms) <
+ kMaxVbaThrottleTimeMs) {
+ // This allocation is too similar, cache it and return.
+ video_bitrate_allocation_context_->throttled_allocation = allocation;
+ return;
+ }
+ } else {
+ video_bitrate_allocation_context_.emplace();
+ }
+
+ video_bitrate_allocation_context_->last_sent_allocation = allocation;
+ video_bitrate_allocation_context_->throttled_allocation.reset();
+ video_bitrate_allocation_context_->last_send_time_ms = now_ms;
+
+ // Send bitrate allocation metadata only if encoder is not paused.
+ rtp_video_sender_->OnBitrateAllocationUpdated(allocation);
+ };
+ if (!rtp_transport_queue_->IsCurrent()) {
+ rtp_transport_queue_->TaskQueueForPost()->PostTask(
+ SafeTask(transport_queue_safety_, std::move(task)));
+ } else {
+ task();
+ }
+}
+
+void VideoSendStreamImpl::OnVideoLayersAllocationUpdated(
+ VideoLayersAllocation allocation) {
+ // OnVideoLayersAllocationUpdated is handled on the encoder task queue in
+ // order to not race with OnEncodedImage callbacks.
+ rtp_video_sender_->OnVideoLayersAllocationUpdated(allocation);
+}
+
+void VideoSendStreamImpl::SignalEncoderActive() {
+ RTC_DCHECK_RUN_ON(rtp_transport_queue_);
+ if (rtp_video_sender_->IsActive()) {
+ RTC_LOG(LS_INFO) << "SignalEncoderActive, Encoder is active.";
+ bitrate_allocator_->AddObserver(this, GetAllocationConfig());
+ }
+}
+
+MediaStreamAllocationConfig VideoSendStreamImpl::GetAllocationConfig() const {
+ return MediaStreamAllocationConfig{
+ static_cast<uint32_t>(encoder_min_bitrate_bps_),
+ encoder_max_bitrate_bps_,
+ static_cast<uint32_t>(disable_padding_ ? 0 : max_padding_bitrate_),
+ /* priority_bitrate */ 0,
+ !config_->suspend_below_min_bitrate,
+ encoder_bitrate_priority_};
+}
+
+void VideoSendStreamImpl::OnEncoderConfigurationChanged(
+ std::vector<VideoStream> streams,
+ bool is_svc,
+ VideoEncoderConfig::ContentType content_type,
+ int min_transmit_bitrate_bps) {
+ // Currently called on the encoder TQ
+ RTC_DCHECK(!rtp_transport_queue_->IsCurrent());
+ auto closure = [this, streams = std::move(streams), is_svc, content_type,
+ min_transmit_bitrate_bps]() mutable {
+ RTC_DCHECK_GE(config_->rtp.ssrcs.size(), streams.size());
+ TRACE_EVENT0("webrtc", "VideoSendStream::OnEncoderConfigurationChanged");
+ RTC_DCHECK_RUN_ON(rtp_transport_queue_);
+
+ const VideoCodecType codec_type =
+ PayloadStringToCodecType(config_->rtp.payload_name);
+
+ const absl::optional<DataRate> experimental_min_bitrate =
+ GetExperimentalMinVideoBitrate(codec_type);
+ encoder_min_bitrate_bps_ =
+ experimental_min_bitrate
+ ? experimental_min_bitrate->bps()
+ : std::max(streams[0].min_bitrate_bps, kDefaultMinVideoBitrateBps);
+
+ encoder_max_bitrate_bps_ = 0;
+ double stream_bitrate_priority_sum = 0;
+ for (const auto& stream : streams) {
+ // We don't want to allocate more bitrate than needed to inactive streams.
+ encoder_max_bitrate_bps_ += stream.active ? stream.max_bitrate_bps : 0;
+ if (stream.bitrate_priority) {
+ RTC_DCHECK_GT(*stream.bitrate_priority, 0);
+ stream_bitrate_priority_sum += *stream.bitrate_priority;
+ }
+ }
+ RTC_DCHECK_GT(stream_bitrate_priority_sum, 0);
+ encoder_bitrate_priority_ = stream_bitrate_priority_sum;
+ encoder_max_bitrate_bps_ =
+ std::max(static_cast<uint32_t>(encoder_min_bitrate_bps_),
+ encoder_max_bitrate_bps_);
+
+ // TODO(bugs.webrtc.org/10266): Query the VideoBitrateAllocator instead.
+ max_padding_bitrate_ = CalculateMaxPadBitrateBps(
+ streams, is_svc, content_type, min_transmit_bitrate_bps,
+ config_->suspend_below_min_bitrate, has_alr_probing_);
+
+ // Clear stats for disabled layers.
+ for (size_t i = streams.size(); i < config_->rtp.ssrcs.size(); ++i) {
+ stats_proxy_->OnInactiveSsrc(config_->rtp.ssrcs[i]);
+ }
+
+ const size_t num_temporal_layers =
+ streams.back().num_temporal_layers.value_or(1);
+
+ rtp_video_sender_->SetEncodingData(streams[0].width, streams[0].height,
+ num_temporal_layers);
+
+ if (rtp_video_sender_->IsActive()) {
+ // The send stream is started already. Update the allocator with new
+ // bitrate limits.
+ bitrate_allocator_->AddObserver(this, GetAllocationConfig());
+ }
+ };
+
+ rtp_transport_queue_->TaskQueueForPost()->PostTask(
+ SafeTask(transport_queue_safety_, std::move(closure)));
+}
+
+EncodedImageCallback::Result VideoSendStreamImpl::OnEncodedImage(
+ const EncodedImage& encoded_image,
+ const CodecSpecificInfo* codec_specific_info) {
+ // Encoded is called on whatever thread the real encoder implementation run
+ // on. In the case of hardware encoders, there might be several encoders
+ // running in parallel on different threads.
+
+ // Indicate that there still is activity going on.
+ activity_ = true;
+ RTC_DCHECK(!rtp_transport_queue_->IsCurrent());
+
+ auto task_to_run_on_worker = [this]() {
+ RTC_DCHECK_RUN_ON(rtp_transport_queue_);
+ if (disable_padding_) {
+ disable_padding_ = false;
+ // To ensure that padding bitrate is propagated to the bitrate allocator.
+ SignalEncoderActive();
+ }
+ // Check if there's a throttled VideoBitrateAllocation that we should try
+ // sending.
+ auto& context = video_bitrate_allocation_context_;
+ if (context && context->throttled_allocation) {
+ OnBitrateAllocationUpdated(*context->throttled_allocation);
+ }
+ };
+ rtp_transport_queue_->TaskQueueForPost()->PostTask(
+ SafeTask(transport_queue_safety_, std::move(task_to_run_on_worker)));
+
+ return rtp_video_sender_->OnEncodedImage(encoded_image, codec_specific_info);
+}
+
+void VideoSendStreamImpl::OnDroppedFrame(
+ EncodedImageCallback::DropReason reason) {
+ activity_ = true;
+}
+
+std::map<uint32_t, RtpState> VideoSendStreamImpl::GetRtpStates() const {
+ return rtp_video_sender_->GetRtpStates();
+}
+
+std::map<uint32_t, RtpPayloadState> VideoSendStreamImpl::GetRtpPayloadStates()
+ const {
+ return rtp_video_sender_->GetRtpPayloadStates();
+}
+
+uint32_t VideoSendStreamImpl::OnBitrateUpdated(BitrateAllocationUpdate update) {
+ RTC_DCHECK_RUN_ON(rtp_transport_queue_);
+ RTC_DCHECK(rtp_video_sender_->IsActive())
+ << "VideoSendStream::Start has not been called.";
+
+ // When the BWE algorithm doesn't pass a stable estimate, we'll use the
+ // unstable one instead.
+ if (update.stable_target_bitrate.IsZero()) {
+ update.stable_target_bitrate = update.target_bitrate;
+ }
+
+ rtp_video_sender_->OnBitrateUpdated(update, stats_proxy_->GetSendFrameRate());
+ encoder_target_rate_bps_ = rtp_video_sender_->GetPayloadBitrateBps();
+ const uint32_t protection_bitrate_bps =
+ rtp_video_sender_->GetProtectionBitrateBps();
+ DataRate link_allocation = DataRate::Zero();
+ if (encoder_target_rate_bps_ > protection_bitrate_bps) {
+ link_allocation =
+ DataRate::BitsPerSec(encoder_target_rate_bps_ - protection_bitrate_bps);
+ }
+ DataRate overhead =
+ update.target_bitrate - DataRate::BitsPerSec(encoder_target_rate_bps_);
+ DataRate encoder_stable_target_rate = update.stable_target_bitrate;
+ if (encoder_stable_target_rate > overhead) {
+ encoder_stable_target_rate = encoder_stable_target_rate - overhead;
+ } else {
+ encoder_stable_target_rate = DataRate::BitsPerSec(encoder_target_rate_bps_);
+ }
+
+ encoder_target_rate_bps_ =
+ std::min(encoder_max_bitrate_bps_, encoder_target_rate_bps_);
+
+ encoder_stable_target_rate =
+ std::min(DataRate::BitsPerSec(encoder_max_bitrate_bps_),
+ encoder_stable_target_rate);
+
+ DataRate encoder_target_rate = DataRate::BitsPerSec(encoder_target_rate_bps_);
+ link_allocation = std::max(encoder_target_rate, link_allocation);
+ video_stream_encoder_->OnBitrateUpdated(
+ encoder_target_rate, encoder_stable_target_rate, link_allocation,
+ rtc::dchecked_cast<uint8_t>(update.packet_loss_ratio * 256),
+ update.round_trip_time.ms(), update.cwnd_reduce_ratio);
+ stats_proxy_->OnSetEncoderTargetRate(encoder_target_rate_bps_);
+ return protection_bitrate_bps;
+}
+
+} // namespace internal
+} // namespace webrtc
diff --git a/third_party/libwebrtc/video/video_send_stream_impl.h b/third_party/libwebrtc/video/video_send_stream_impl.h
new file mode 100644
index 0000000000..f145450655
--- /dev/null
+++ b/third_party/libwebrtc/video/video_send_stream_impl.h
@@ -0,0 +1,180 @@
+/*
+ * Copyright 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#ifndef VIDEO_VIDEO_SEND_STREAM_IMPL_H_
+#define VIDEO_VIDEO_SEND_STREAM_IMPL_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <atomic>
+#include <map>
+#include <memory>
+#include <vector>
+
+#include "absl/types/optional.h"
+#include "api/field_trials_view.h"
+#include "api/task_queue/pending_task_safety_flag.h"
+#include "api/task_queue/task_queue_base.h"
+#include "api/video/encoded_image.h"
+#include "api/video/video_bitrate_allocation.h"
+#include "api/video/video_bitrate_allocator.h"
+#include "api/video_codecs/video_encoder.h"
+#include "call/bitrate_allocator.h"
+#include "call/rtp_config.h"
+#include "call/rtp_transport_controller_send_interface.h"
+#include "call/rtp_video_sender_interface.h"
+#include "modules/include/module_common_types.h"
+#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
+#include "modules/utility/maybe_worker_thread.h"
+#include "modules/video_coding/include/video_codec_interface.h"
+#include "rtc_base/experiments/field_trial_parser.h"
+#include "rtc_base/system/no_unique_address.h"
+#include "rtc_base/task_utils/repeating_task.h"
+#include "rtc_base/thread_annotations.h"
+#include "video/config/video_encoder_config.h"
+#include "video/send_statistics_proxy.h"
+#include "video/video_stream_encoder_interface.h"
+
+namespace webrtc {
+namespace internal {
+
+// Pacing buffer config; overridden by ALR config if provided.
+struct PacingConfig {
+ explicit PacingConfig(const FieldTrialsView& field_trials);
+ PacingConfig(const PacingConfig&);
+ PacingConfig& operator=(const PacingConfig&) = default;
+ ~PacingConfig();
+ FieldTrialParameter<double> pacing_factor;
+ FieldTrialParameter<TimeDelta> max_pacing_delay;
+};
+
+// VideoSendStreamImpl implements internal::VideoSendStream.
+// It is created and destroyed on `rtp_transport_queue`. The intent is to
+// decrease the need for locking and to ensure methods are called in sequence.
+// Public methods except `DeliverRtcp` must be called on `rtp_transport_queue`.
+// DeliverRtcp is called on the libjingle worker thread or a network thread.
+// An encoder may deliver frames through the EncodedImageCallback on an
+// arbitrary thread.
+class VideoSendStreamImpl : public webrtc::BitrateAllocatorObserver,
+ public VideoStreamEncoderInterface::EncoderSink {
+ public:
+ VideoSendStreamImpl(Clock* clock,
+ SendStatisticsProxy* stats_proxy,
+ RtpTransportControllerSendInterface* transport,
+ BitrateAllocatorInterface* bitrate_allocator,
+ VideoStreamEncoderInterface* video_stream_encoder,
+ const VideoSendStream::Config* config,
+ int initial_encoder_max_bitrate,
+ double initial_encoder_bitrate_priority,
+ VideoEncoderConfig::ContentType content_type,
+ RtpVideoSenderInterface* rtp_video_sender,
+ const FieldTrialsView& field_trials);
+ ~VideoSendStreamImpl() override;
+
+ void DeliverRtcp(const uint8_t* packet, size_t length);
+ void StartPerRtpStream(std::vector<bool> active_layers);
+ void Stop();
+
+ // TODO(holmer): Move these to RtpTransportControllerSend.
+ std::map<uint32_t, RtpState> GetRtpStates() const;
+
+ std::map<uint32_t, RtpPayloadState> GetRtpPayloadStates() const;
+
+ const absl::optional<float>& configured_pacing_factor() const {
+ return configured_pacing_factor_;
+ }
+
+ private:
+ // Implements BitrateAllocatorObserver.
+ uint32_t OnBitrateUpdated(BitrateAllocationUpdate update) override;
+
+ // Implements VideoStreamEncoderInterface::EncoderSink
+ void OnEncoderConfigurationChanged(
+ std::vector<VideoStream> streams,
+ bool is_svc,
+ VideoEncoderConfig::ContentType content_type,
+ int min_transmit_bitrate_bps) override;
+
+ void OnBitrateAllocationUpdated(
+ const VideoBitrateAllocation& allocation) override;
+ void OnVideoLayersAllocationUpdated(
+ VideoLayersAllocation allocation) override;
+
+ // Implements EncodedImageCallback. The implementation routes encoded frames
+ // to the `payload_router_` and `config.pre_encode_callback` if set.
+ // Called on an arbitrary encoder callback thread.
+ EncodedImageCallback::Result OnEncodedImage(
+ const EncodedImage& encoded_image,
+ const CodecSpecificInfo* codec_specific_info) override;
+
+ // Implements EncodedImageCallback.
+ void OnDroppedFrame(EncodedImageCallback::DropReason reason) override;
+
+ // Starts monitoring and sends a keyframe.
+ void StartupVideoSendStream();
+ // Removes the bitrate observer, stops monitoring and notifies the video
+ // encoder of the bitrate update.
+ void StopVideoSendStream() RTC_RUN_ON(rtp_transport_queue_);
+
+ void ConfigureProtection();
+ void ConfigureSsrcs();
+ void SignalEncoderTimedOut();
+ void SignalEncoderActive();
+ MediaStreamAllocationConfig GetAllocationConfig() const
+ RTC_RUN_ON(rtp_transport_queue_);
+
+ RTC_NO_UNIQUE_ADDRESS SequenceChecker thread_checker_;
+ Clock* const clock_;
+ const bool has_alr_probing_;
+ const PacingConfig pacing_config_;
+
+ SendStatisticsProxy* const stats_proxy_;
+ const VideoSendStream::Config* const config_;
+
+ MaybeWorkerThread* const rtp_transport_queue_;
+
+ RepeatingTaskHandle check_encoder_activity_task_
+ RTC_GUARDED_BY(rtp_transport_queue_);
+
+ std::atomic_bool activity_;
+ bool timed_out_ RTC_GUARDED_BY(rtp_transport_queue_);
+
+ RtpTransportControllerSendInterface* const transport_;
+ BitrateAllocatorInterface* const bitrate_allocator_;
+
+ bool disable_padding_;
+ int max_padding_bitrate_;
+ int encoder_min_bitrate_bps_;
+ uint32_t encoder_max_bitrate_bps_;
+ uint32_t encoder_target_rate_bps_;
+ double encoder_bitrate_priority_;
+
+ VideoStreamEncoderInterface* const video_stream_encoder_;
+
+ RtcpBandwidthObserver* const bandwidth_observer_;
+ RtpVideoSenderInterface* const rtp_video_sender_;
+
+ rtc::scoped_refptr<PendingTaskSafetyFlag> transport_queue_safety_ =
+ PendingTaskSafetyFlag::CreateDetached();
+
+ // Context for the most recent and last sent video bitrate allocation. Used to
+ // throttle sending of similar bitrate allocations.
+ struct VbaSendContext {
+ VideoBitrateAllocation last_sent_allocation;
+ absl::optional<VideoBitrateAllocation> throttled_allocation;
+ int64_t last_send_time_ms;
+ };
+ absl::optional<VbaSendContext> video_bitrate_allocation_context_
+ RTC_GUARDED_BY(rtp_transport_queue_);
+ const absl::optional<float> configured_pacing_factor_;
+};
+} // namespace internal
+} // namespace webrtc
+#endif // VIDEO_VIDEO_SEND_STREAM_IMPL_H_
diff --git a/third_party/libwebrtc/video/video_send_stream_impl_unittest.cc b/third_party/libwebrtc/video/video_send_stream_impl_unittest.cc
new file mode 100644
index 0000000000..c38dcd0e1e
--- /dev/null
+++ b/third_party/libwebrtc/video/video_send_stream_impl_unittest.cc
@@ -0,0 +1,1036 @@
+/*
+ * Copyright 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video/video_send_stream_impl.h"
+
+#include <algorithm>
+#include <memory>
+#include <string>
+
+#include "absl/types/optional.h"
+#include "api/rtc_event_log/rtc_event_log.h"
+#include "api/task_queue/task_queue_base.h"
+#include "api/units/time_delta.h"
+#include "api/units/timestamp.h"
+#include "call/rtp_video_sender.h"
+#include "call/test/mock_bitrate_allocator.h"
+#include "call/test/mock_rtp_transport_controller_send.h"
+#include "modules/rtp_rtcp/source/rtp_sequence_number_map.h"
+#include "modules/utility/maybe_worker_thread.h"
+#include "modules/video_coding/fec_controller_default.h"
+#include "rtc_base/event.h"
+#include "rtc_base/experiments/alr_experiment.h"
+#include "rtc_base/fake_clock.h"
+#include "rtc_base/logging.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+#include "test/mock_transport.h"
+#include "test/scoped_key_value_config.h"
+#include "test/time_controller/simulated_time_controller.h"
+#include "video/test/mock_video_stream_encoder.h"
+#include "video/video_send_stream.h"
+
+namespace webrtc {
+
+bool operator==(const BitrateAllocationUpdate& a,
+ const BitrateAllocationUpdate& b) {
+ return a.target_bitrate == b.target_bitrate &&
+ a.round_trip_time == b.round_trip_time &&
+ a.packet_loss_ratio == b.packet_loss_ratio;
+}
+
+namespace internal {
+namespace {
+using ::testing::_;
+using ::testing::AllOf;
+using ::testing::Field;
+using ::testing::Invoke;
+using ::testing::NiceMock;
+using ::testing::Return;
+
+constexpr int64_t kDefaultInitialBitrateBps = 333000;
+const double kDefaultBitratePriority = 0.5;
+
+const float kAlrProbingExperimentPaceMultiplier = 1.0f;
+std::string GetAlrProbingExperimentString() {
+ return std::string(
+ AlrExperimentSettings::kScreenshareProbingBweExperimentName) +
+ "/1.0,2875,80,40,-60,3/";
+}
+class MockRtpVideoSender : public RtpVideoSenderInterface {
+ public:
+ MOCK_METHOD(void, SetActiveModules, (const std::vector<bool>&), (override));
+ MOCK_METHOD(void, Stop, (), (override));
+ MOCK_METHOD(bool, IsActive, (), (override));
+ MOCK_METHOD(void, OnNetworkAvailability, (bool), (override));
+ MOCK_METHOD((std::map<uint32_t, RtpState>),
+ GetRtpStates,
+ (),
+ (const, override));
+ MOCK_METHOD((std::map<uint32_t, RtpPayloadState>),
+ GetRtpPayloadStates,
+ (),
+ (const, override));
+ MOCK_METHOD(void, DeliverRtcp, (const uint8_t*, size_t), (override));
+ MOCK_METHOD(void,
+ OnBitrateAllocationUpdated,
+ (const VideoBitrateAllocation&),
+ (override));
+ MOCK_METHOD(void,
+ OnVideoLayersAllocationUpdated,
+ (const VideoLayersAllocation&),
+ (override));
+ MOCK_METHOD(EncodedImageCallback::Result,
+ OnEncodedImage,
+ (const EncodedImage&, const CodecSpecificInfo*),
+ (override));
+ MOCK_METHOD(void, OnTransportOverheadChanged, (size_t), (override));
+ MOCK_METHOD(void,
+ OnBitrateUpdated,
+ (BitrateAllocationUpdate, int),
+ (override));
+ MOCK_METHOD(uint32_t, GetPayloadBitrateBps, (), (const, override));
+ MOCK_METHOD(uint32_t, GetProtectionBitrateBps, (), (const, override));
+ MOCK_METHOD(void, SetEncodingData, (size_t, size_t, size_t), (override));
+ MOCK_METHOD(std::vector<RtpSequenceNumberMap::Info>,
+ GetSentRtpPacketInfos,
+ (uint32_t ssrc, rtc::ArrayView<const uint16_t> sequence_numbers),
+ (const, override));
+
+ MOCK_METHOD(void, SetFecAllowed, (bool fec_allowed), (override));
+};
+
+BitrateAllocationUpdate CreateAllocation(int bitrate_bps) {
+ BitrateAllocationUpdate update;
+ update.target_bitrate = DataRate::BitsPerSec(bitrate_bps);
+ update.packet_loss_ratio = 0;
+ update.round_trip_time = TimeDelta::Zero();
+ return update;
+}
+} // namespace
+
+class VideoSendStreamImplTest : public ::testing::Test {
+ protected:
+ VideoSendStreamImplTest()
+ : time_controller_(Timestamp::Seconds(1000)),
+ config_(&transport_),
+ send_delay_stats_(time_controller_.GetClock()),
+ worker_queue_(field_trials_,
+ "worker_queue",
+ time_controller_.GetTaskQueueFactory()),
+ encoder_queue_(time_controller_.GetTaskQueueFactory()->CreateTaskQueue(
+ "encoder_queue",
+ TaskQueueFactory::Priority::NORMAL)),
+ stats_proxy_(time_controller_.GetClock(),
+ config_,
+ VideoEncoderConfig::ContentType::kRealtimeVideo,
+ field_trials_) {
+ config_.rtp.ssrcs.push_back(8080);
+ config_.rtp.payload_type = 1;
+
+ EXPECT_CALL(transport_controller_, packet_router())
+ .WillRepeatedly(Return(&packet_router_));
+ EXPECT_CALL(transport_controller_, CreateRtpVideoSender)
+ .WillRepeatedly(Return(&rtp_video_sender_));
+ ON_CALL(rtp_video_sender_, Stop()).WillByDefault(::testing::Invoke([&] {
+ active_modules_.clear();
+ }));
+ ON_CALL(rtp_video_sender_, IsActive())
+ .WillByDefault(::testing::Invoke([&]() {
+ for (bool enabled : active_modules_) {
+ if (enabled)
+ return true;
+ }
+ return false;
+ }));
+ ON_CALL(rtp_video_sender_, SetActiveModules)
+ .WillByDefault(::testing::SaveArg<0>(&active_modules_));
+ ON_CALL(transport_controller_, GetWorkerQueue())
+ .WillByDefault(Return(&worker_queue_));
+ }
+ ~VideoSendStreamImplTest() {}
+
+ std::unique_ptr<VideoSendStreamImpl> CreateVideoSendStreamImpl(
+ int initial_encoder_max_bitrate,
+ double initial_encoder_bitrate_priority,
+ VideoEncoderConfig::ContentType content_type) {
+ RTC_DCHECK(!worker_queue_.IsCurrent());
+
+ EXPECT_CALL(bitrate_allocator_, GetStartBitrate(_))
+ .WillOnce(Return(123000));
+
+ std::map<uint32_t, RtpState> suspended_ssrcs;
+ std::map<uint32_t, RtpPayloadState> suspended_payload_states;
+ auto ret = std::make_unique<VideoSendStreamImpl>(
+ time_controller_.GetClock(), &stats_proxy_, &transport_controller_,
+ &bitrate_allocator_, &video_stream_encoder_, &config_,
+ initial_encoder_max_bitrate, initial_encoder_bitrate_priority,
+ content_type, &rtp_video_sender_, field_trials_);
+
+ // The call to GetStartBitrate() executes asynchronously on the tq.
+ // Ensure all tasks get to run.
+ time_controller_.AdvanceTime(TimeDelta::Zero());
+ testing::Mock::VerifyAndClearExpectations(&bitrate_allocator_);
+
+ return ret;
+ }
+
+ protected:
+ GlobalSimulatedTimeController time_controller_;
+ webrtc::test::ScopedKeyValueConfig field_trials_;
+ NiceMock<MockTransport> transport_;
+ NiceMock<MockRtpTransportControllerSend> transport_controller_;
+ NiceMock<MockBitrateAllocator> bitrate_allocator_;
+ NiceMock<MockVideoStreamEncoder> video_stream_encoder_;
+ NiceMock<MockRtpVideoSender> rtp_video_sender_;
+ std::vector<bool> active_modules_;
+
+ RtcEventLogNull event_log_;
+ VideoSendStream::Config config_;
+ SendDelayStats send_delay_stats_;
+ MaybeWorkerThread worker_queue_;
+ std::unique_ptr<TaskQueueBase, TaskQueueDeleter> encoder_queue_;
+ SendStatisticsProxy stats_proxy_;
+ PacketRouter packet_router_;
+};
+
+TEST_F(VideoSendStreamImplTest, RegistersAsBitrateObserverOnStart) {
+ auto vss_impl = CreateVideoSendStreamImpl(
+ kDefaultInitialBitrateBps, kDefaultBitratePriority,
+ VideoEncoderConfig::ContentType::kRealtimeVideo);
+ const bool kSuspend = false;
+ config_.suspend_below_min_bitrate = kSuspend;
+ EXPECT_CALL(bitrate_allocator_, AddObserver(vss_impl.get(), _))
+ .WillOnce(Invoke(
+ [&](BitrateAllocatorObserver*, MediaStreamAllocationConfig config) {
+ EXPECT_EQ(config.min_bitrate_bps, 0u);
+ EXPECT_EQ(config.max_bitrate_bps, kDefaultInitialBitrateBps);
+ EXPECT_EQ(config.pad_up_bitrate_bps, 0u);
+ EXPECT_EQ(config.enforce_min_bitrate, !kSuspend);
+ EXPECT_EQ(config.bitrate_priority, kDefaultBitratePriority);
+ }));
+ worker_queue_.RunSynchronous([&] {
+ vss_impl->StartPerRtpStream({true});
+ EXPECT_CALL(bitrate_allocator_, RemoveObserver(vss_impl.get())).Times(1);
+ vss_impl->Stop();
+ });
+}
+
+TEST_F(VideoSendStreamImplTest, UpdatesObserverOnConfigurationChange) {
+ const bool kSuspend = false;
+ config_.suspend_below_min_bitrate = kSuspend;
+ config_.rtp.extensions.emplace_back(RtpExtension::kTransportSequenceNumberUri,
+ 1);
+ auto vss_impl = CreateVideoSendStreamImpl(
+ kDefaultInitialBitrateBps, kDefaultBitratePriority,
+ VideoEncoderConfig::ContentType::kRealtimeVideo);
+
+ worker_queue_.RunSynchronous([&] { vss_impl->StartPerRtpStream({true}); });
+
+ // QVGA + VGA configuration matching defaults in
+ // media/engine/simulcast.cc.
+ VideoStream qvga_stream;
+ qvga_stream.width = 320;
+ qvga_stream.height = 180;
+ qvga_stream.max_framerate = 30;
+ qvga_stream.min_bitrate_bps = 30000;
+ qvga_stream.target_bitrate_bps = 150000;
+ qvga_stream.max_bitrate_bps = 200000;
+ qvga_stream.max_qp = 56;
+ qvga_stream.bitrate_priority = 1;
+
+ VideoStream vga_stream;
+ vga_stream.width = 640;
+ vga_stream.height = 360;
+ vga_stream.max_framerate = 30;
+ vga_stream.min_bitrate_bps = 150000;
+ vga_stream.target_bitrate_bps = 500000;
+ vga_stream.max_bitrate_bps = 700000;
+ vga_stream.max_qp = 56;
+ vga_stream.bitrate_priority = 1;
+
+ int min_transmit_bitrate_bps = 30000;
+
+ config_.rtp.ssrcs.emplace_back(1);
+ config_.rtp.ssrcs.emplace_back(2);
+
+ EXPECT_CALL(bitrate_allocator_, AddObserver(vss_impl.get(), _))
+ .WillRepeatedly(Invoke(
+ [&](BitrateAllocatorObserver*, MediaStreamAllocationConfig config) {
+ EXPECT_TRUE(worker_queue_.IsCurrent());
+ EXPECT_EQ(config.min_bitrate_bps,
+ static_cast<uint32_t>(min_transmit_bitrate_bps));
+ EXPECT_EQ(config.max_bitrate_bps,
+ static_cast<uint32_t>(qvga_stream.max_bitrate_bps +
+ vga_stream.max_bitrate_bps));
+ if (config.pad_up_bitrate_bps != 0) {
+ EXPECT_EQ(config.pad_up_bitrate_bps,
+ static_cast<uint32_t>(qvga_stream.target_bitrate_bps +
+ vga_stream.min_bitrate_bps));
+ }
+ EXPECT_EQ(config.enforce_min_bitrate, !kSuspend);
+ }));
+
+ encoder_queue_->PostTask([&] {
+ static_cast<VideoStreamEncoderInterface::EncoderSink*>(vss_impl.get())
+ ->OnEncoderConfigurationChanged(
+ std::vector<VideoStream>{qvga_stream, vga_stream}, false,
+ VideoEncoderConfig::ContentType::kRealtimeVideo,
+ min_transmit_bitrate_bps);
+ });
+ time_controller_.AdvanceTime(TimeDelta::Zero());
+ worker_queue_.RunSynchronous([&] { vss_impl->Stop(); });
+}
+
+TEST_F(VideoSendStreamImplTest, UpdatesObserverOnConfigurationChangeWithAlr) {
+ const bool kSuspend = false;
+ config_.suspend_below_min_bitrate = kSuspend;
+ config_.rtp.extensions.emplace_back(RtpExtension::kTransportSequenceNumberUri,
+ 1);
+ config_.periodic_alr_bandwidth_probing = true;
+ auto vss_impl = CreateVideoSendStreamImpl(
+ kDefaultInitialBitrateBps, kDefaultBitratePriority,
+ VideoEncoderConfig::ContentType::kScreen);
+ worker_queue_.RunSynchronous([&] { vss_impl->StartPerRtpStream({true}); });
+
+ // Simulcast screenshare.
+ VideoStream low_stream;
+ low_stream.width = 1920;
+ low_stream.height = 1080;
+ low_stream.max_framerate = 5;
+ low_stream.min_bitrate_bps = 30000;
+ low_stream.target_bitrate_bps = 200000;
+ low_stream.max_bitrate_bps = 1000000;
+ low_stream.num_temporal_layers = 2;
+ low_stream.max_qp = 56;
+ low_stream.bitrate_priority = 1;
+
+ VideoStream high_stream;
+ high_stream.width = 1920;
+ high_stream.height = 1080;
+ high_stream.max_framerate = 30;
+ high_stream.min_bitrate_bps = 60000;
+ high_stream.target_bitrate_bps = 1250000;
+ high_stream.max_bitrate_bps = 1250000;
+ high_stream.num_temporal_layers = 2;
+ high_stream.max_qp = 56;
+ high_stream.bitrate_priority = 1;
+
+ // With ALR probing, this will be the padding target instead of
+ // low_stream.target_bitrate_bps + high_stream.min_bitrate_bps.
+ int min_transmit_bitrate_bps = 400000;
+
+ config_.rtp.ssrcs.emplace_back(1);
+ config_.rtp.ssrcs.emplace_back(2);
+
+ EXPECT_CALL(bitrate_allocator_, AddObserver(vss_impl.get(), _))
+ .WillRepeatedly(Invoke(
+ [&](BitrateAllocatorObserver*, MediaStreamAllocationConfig config) {
+ EXPECT_TRUE(worker_queue_.IsCurrent());
+ EXPECT_EQ(config.min_bitrate_bps,
+ static_cast<uint32_t>(low_stream.min_bitrate_bps));
+ EXPECT_EQ(config.max_bitrate_bps,
+ static_cast<uint32_t>(low_stream.max_bitrate_bps +
+ high_stream.max_bitrate_bps));
+ if (config.pad_up_bitrate_bps != 0) {
+ EXPECT_EQ(config.pad_up_bitrate_bps,
+ static_cast<uint32_t>(min_transmit_bitrate_bps));
+ }
+ EXPECT_EQ(config.enforce_min_bitrate, !kSuspend);
+ }));
+ encoder_queue_->PostTask([&] {
+ static_cast<VideoStreamEncoderInterface::EncoderSink*>(vss_impl.get())
+ ->OnEncoderConfigurationChanged(
+ std::vector<VideoStream>{low_stream, high_stream}, false,
+ VideoEncoderConfig::ContentType::kScreen, min_transmit_bitrate_bps);
+ });
+ time_controller_.AdvanceTime(TimeDelta::Zero());
+ worker_queue_.RunSynchronous([&] { vss_impl->Stop(); });
+}
+
+TEST_F(VideoSendStreamImplTest,
+ UpdatesObserverOnConfigurationChangeWithSimulcastVideoHysteresis) {
+ test::ScopedKeyValueConfig hysteresis_experiment(
+ field_trials_, "WebRTC-VideoRateControl/video_hysteresis:1.25/");
+
+ auto vss_impl = CreateVideoSendStreamImpl(
+ kDefaultInitialBitrateBps, kDefaultBitratePriority,
+ VideoEncoderConfig::ContentType::kRealtimeVideo);
+
+ worker_queue_.RunSynchronous([&] { vss_impl->StartPerRtpStream({true}); });
+ // 2-layer video simulcast.
+ VideoStream low_stream;
+ low_stream.width = 320;
+ low_stream.height = 240;
+ low_stream.max_framerate = 30;
+ low_stream.min_bitrate_bps = 30000;
+ low_stream.target_bitrate_bps = 100000;
+ low_stream.max_bitrate_bps = 200000;
+ low_stream.max_qp = 56;
+ low_stream.bitrate_priority = 1;
+
+ VideoStream high_stream;
+ high_stream.width = 640;
+ high_stream.height = 480;
+ high_stream.max_framerate = 30;
+ high_stream.min_bitrate_bps = 150000;
+ high_stream.target_bitrate_bps = 500000;
+ high_stream.max_bitrate_bps = 750000;
+ high_stream.max_qp = 56;
+ high_stream.bitrate_priority = 1;
+
+ config_.rtp.ssrcs.emplace_back(1);
+ config_.rtp.ssrcs.emplace_back(2);
+
+ EXPECT_CALL(bitrate_allocator_, AddObserver(vss_impl.get(), _))
+ .WillRepeatedly(Invoke([&](BitrateAllocatorObserver*,
+ MediaStreamAllocationConfig config) {
+ EXPECT_TRUE(worker_queue_.IsCurrent());
+ EXPECT_EQ(config.min_bitrate_bps,
+ static_cast<uint32_t>(low_stream.min_bitrate_bps));
+ EXPECT_EQ(config.max_bitrate_bps,
+ static_cast<uint32_t>(low_stream.max_bitrate_bps +
+ high_stream.max_bitrate_bps));
+ if (config.pad_up_bitrate_bps != 0) {
+ EXPECT_EQ(config.pad_up_bitrate_bps,
+ static_cast<uint32_t>(low_stream.target_bitrate_bps +
+ 1.25 * high_stream.min_bitrate_bps));
+ }
+ }));
+
+ encoder_queue_->PostTask([&] {
+ static_cast<VideoStreamEncoderInterface::EncoderSink*>(vss_impl.get())
+ ->OnEncoderConfigurationChanged(
+ std::vector<VideoStream>{low_stream, high_stream}, false,
+ VideoEncoderConfig::ContentType::kRealtimeVideo,
+ /*min_transmit_bitrate_bps=*/0);
+ });
+ time_controller_.AdvanceTime(TimeDelta::Zero());
+ worker_queue_.RunSynchronous([&] { vss_impl->Stop(); });
+}
+
+TEST_F(VideoSendStreamImplTest, SetsScreensharePacingFactorWithFeedback) {
+ test::ScopedFieldTrials alr_experiment(GetAlrProbingExperimentString());
+
+ constexpr int kId = 1;
+ config_.rtp.extensions.emplace_back(RtpExtension::kTransportSequenceNumberUri,
+ kId);
+ EXPECT_CALL(transport_controller_,
+ SetPacingFactor(kAlrProbingExperimentPaceMultiplier))
+ .Times(1);
+ auto vss_impl = CreateVideoSendStreamImpl(
+ kDefaultInitialBitrateBps, kDefaultBitratePriority,
+ VideoEncoderConfig::ContentType::kScreen);
+ worker_queue_.RunSynchronous([&] {
+ vss_impl->StartPerRtpStream({true});
+ vss_impl->Stop();
+ });
+}
+
+TEST_F(VideoSendStreamImplTest, DoesNotSetPacingFactorWithoutFeedback) {
+ test::ScopedFieldTrials alr_experiment(GetAlrProbingExperimentString());
+ auto vss_impl = CreateVideoSendStreamImpl(
+ kDefaultInitialBitrateBps, kDefaultBitratePriority,
+ VideoEncoderConfig::ContentType::kScreen);
+ worker_queue_.RunSynchronous([&] {
+ EXPECT_CALL(transport_controller_, SetPacingFactor(_)).Times(0);
+ vss_impl->StartPerRtpStream({true});
+ vss_impl->Stop();
+ });
+}
+
+TEST_F(VideoSendStreamImplTest, ForwardsVideoBitrateAllocationWhenEnabled) {
+ auto vss_impl = CreateVideoSendStreamImpl(
+ kDefaultInitialBitrateBps, kDefaultBitratePriority,
+ VideoEncoderConfig::ContentType::kScreen);
+
+ EXPECT_CALL(transport_controller_, SetPacingFactor(_)).Times(0);
+ VideoStreamEncoderInterface::EncoderSink* const sink =
+ static_cast<VideoStreamEncoderInterface::EncoderSink*>(vss_impl.get());
+ worker_queue_.RunSynchronous([&] { vss_impl->StartPerRtpStream({true}); });
+ // Populate a test instance of video bitrate allocation.
+ VideoBitrateAllocation alloc;
+ alloc.SetBitrate(0, 0, 10000);
+ alloc.SetBitrate(0, 1, 20000);
+ alloc.SetBitrate(1, 0, 30000);
+ alloc.SetBitrate(1, 1, 40000);
+
+ EXPECT_CALL(rtp_video_sender_, OnBitrateAllocationUpdated(alloc)).Times(0);
+ encoder_queue_->PostTask([&] {
+ // Encoder starts out paused, don't forward allocation.
+
+ sink->OnBitrateAllocationUpdated(alloc);
+ });
+ time_controller_.AdvanceTime(TimeDelta::Zero());
+
+ worker_queue_.RunSynchronous([&] {
+ // Unpause encoder, allocation should be passed through.
+ const uint32_t kBitrateBps = 100000;
+ EXPECT_CALL(rtp_video_sender_, GetPayloadBitrateBps())
+ .Times(1)
+ .WillOnce(Return(kBitrateBps));
+ static_cast<BitrateAllocatorObserver*>(vss_impl.get())
+ ->OnBitrateUpdated(CreateAllocation(kBitrateBps));
+ });
+ EXPECT_CALL(rtp_video_sender_, OnBitrateAllocationUpdated(alloc)).Times(1);
+ encoder_queue_->PostTask([&] { sink->OnBitrateAllocationUpdated(alloc); });
+ time_controller_.AdvanceTime(TimeDelta::Zero());
+ worker_queue_.RunSynchronous([&] {
+ // Pause encoder again, and block allocations.
+ EXPECT_CALL(rtp_video_sender_, GetPayloadBitrateBps())
+ .Times(1)
+ .WillOnce(Return(0));
+ static_cast<BitrateAllocatorObserver*>(vss_impl.get())
+ ->OnBitrateUpdated(CreateAllocation(0));
+ });
+ EXPECT_CALL(rtp_video_sender_, OnBitrateAllocationUpdated(alloc)).Times(0);
+ encoder_queue_->PostTask([&] { sink->OnBitrateAllocationUpdated(alloc); });
+ time_controller_.AdvanceTime(TimeDelta::Zero());
+ worker_queue_.RunSynchronous([&] { vss_impl->Stop(); });
+}
+
+TEST_F(VideoSendStreamImplTest, ThrottlesVideoBitrateAllocationWhenTooSimilar) {
+ auto vss_impl = CreateVideoSendStreamImpl(
+ kDefaultInitialBitrateBps, kDefaultBitratePriority,
+ VideoEncoderConfig::ContentType::kScreen);
+ worker_queue_.RunSynchronous([&] {
+ vss_impl->StartPerRtpStream({true});
+ // Unpause encoder, to allows allocations to be passed through.
+ const uint32_t kBitrateBps = 100000;
+ EXPECT_CALL(rtp_video_sender_, GetPayloadBitrateBps())
+ .Times(1)
+ .WillOnce(Return(kBitrateBps));
+ static_cast<BitrateAllocatorObserver*>(vss_impl.get())
+ ->OnBitrateUpdated(CreateAllocation(kBitrateBps));
+ });
+ VideoStreamEncoderInterface::EncoderSink* const sink =
+ static_cast<VideoStreamEncoderInterface::EncoderSink*>(vss_impl.get());
+
+ // Populate a test instance of video bitrate allocation.
+ VideoBitrateAllocation alloc;
+ alloc.SetBitrate(0, 0, 10000);
+ alloc.SetBitrate(0, 1, 20000);
+ alloc.SetBitrate(1, 0, 30000);
+ alloc.SetBitrate(1, 1, 40000);
+
+ // Initial value.
+ EXPECT_CALL(rtp_video_sender_, OnBitrateAllocationUpdated(alloc)).Times(1);
+ encoder_queue_->PostTask([&] { sink->OnBitrateAllocationUpdated(alloc); });
+ time_controller_.AdvanceTime(TimeDelta::Zero());
+
+ VideoBitrateAllocation updated_alloc = alloc;
+ // Needs 10% increase in bitrate to trigger immediate forward.
+ const uint32_t base_layer_min_update_bitrate_bps =
+ alloc.GetBitrate(0, 0) + alloc.get_sum_bps() / 10;
+
+ // Too small increase, don't forward.
+ updated_alloc.SetBitrate(0, 0, base_layer_min_update_bitrate_bps - 1);
+ EXPECT_CALL(rtp_video_sender_, OnBitrateAllocationUpdated(_)).Times(0);
+ encoder_queue_->PostTask(
+ [&] { sink->OnBitrateAllocationUpdated(updated_alloc); });
+ time_controller_.AdvanceTime(TimeDelta::Zero());
+
+ // Large enough increase, do forward.
+ updated_alloc.SetBitrate(0, 0, base_layer_min_update_bitrate_bps);
+ EXPECT_CALL(rtp_video_sender_, OnBitrateAllocationUpdated(updated_alloc))
+ .Times(1);
+ encoder_queue_->PostTask(
+ [&] { sink->OnBitrateAllocationUpdated(updated_alloc); });
+ time_controller_.AdvanceTime(TimeDelta::Zero());
+
+ // This is now a decrease compared to last forward allocation,
+ // forward immediately.
+ updated_alloc.SetBitrate(0, 0, base_layer_min_update_bitrate_bps - 1);
+ EXPECT_CALL(rtp_video_sender_, OnBitrateAllocationUpdated(updated_alloc))
+ .Times(1);
+ encoder_queue_->PostTask(
+ [&] { sink->OnBitrateAllocationUpdated(updated_alloc); });
+ time_controller_.AdvanceTime(TimeDelta::Zero());
+
+ worker_queue_.RunSynchronous([&] { vss_impl->Stop(); });
+}
+
+TEST_F(VideoSendStreamImplTest, ForwardsVideoBitrateAllocationOnLayerChange) {
+ auto vss_impl = CreateVideoSendStreamImpl(
+ kDefaultInitialBitrateBps, kDefaultBitratePriority,
+ VideoEncoderConfig::ContentType::kScreen);
+
+ worker_queue_.RunSynchronous([&] {
+ vss_impl->StartPerRtpStream({true});
+ // Unpause encoder, to allows allocations to be passed through.
+ const uint32_t kBitrateBps = 100000;
+ EXPECT_CALL(rtp_video_sender_, GetPayloadBitrateBps())
+ .Times(1)
+ .WillOnce(Return(kBitrateBps));
+ static_cast<BitrateAllocatorObserver*>(vss_impl.get())
+ ->OnBitrateUpdated(CreateAllocation(kBitrateBps));
+ });
+ VideoStreamEncoderInterface::EncoderSink* const sink =
+ static_cast<VideoStreamEncoderInterface::EncoderSink*>(vss_impl.get());
+
+ // Populate a test instance of video bitrate allocation.
+ VideoBitrateAllocation alloc;
+ alloc.SetBitrate(0, 0, 10000);
+ alloc.SetBitrate(0, 1, 20000);
+ alloc.SetBitrate(1, 0, 30000);
+ alloc.SetBitrate(1, 1, 40000);
+
+ // Initial value.
+ EXPECT_CALL(rtp_video_sender_, OnBitrateAllocationUpdated(alloc)).Times(1);
+ sink->OnBitrateAllocationUpdated(alloc);
+
+ // Move some bitrate from one layer to a new one, but keep sum the
+ // same. Since layout has changed, immediately trigger forward.
+ VideoBitrateAllocation updated_alloc = alloc;
+ updated_alloc.SetBitrate(2, 0, 10000);
+ updated_alloc.SetBitrate(1, 1, alloc.GetBitrate(1, 1) - 10000);
+ EXPECT_EQ(alloc.get_sum_bps(), updated_alloc.get_sum_bps());
+ EXPECT_CALL(rtp_video_sender_, OnBitrateAllocationUpdated(updated_alloc))
+ .Times(1);
+ encoder_queue_->PostTask(
+ [&] { sink->OnBitrateAllocationUpdated(updated_alloc); });
+ time_controller_.AdvanceTime(TimeDelta::Zero());
+
+ worker_queue_.RunSynchronous([&] { vss_impl->Stop(); });
+}
+
+TEST_F(VideoSendStreamImplTest, ForwardsVideoBitrateAllocationAfterTimeout) {
+ auto vss_impl = CreateVideoSendStreamImpl(
+ kDefaultInitialBitrateBps, kDefaultBitratePriority,
+ VideoEncoderConfig::ContentType::kScreen);
+ worker_queue_.RunSynchronous([&] {
+ vss_impl->StartPerRtpStream({true});
+ const uint32_t kBitrateBps = 100000;
+ // Unpause encoder, to allows allocations to be passed through.
+ EXPECT_CALL(rtp_video_sender_, GetPayloadBitrateBps())
+ .Times(1)
+ .WillRepeatedly(Return(kBitrateBps));
+ static_cast<BitrateAllocatorObserver*>(vss_impl.get())
+ ->OnBitrateUpdated(CreateAllocation(kBitrateBps));
+ });
+ VideoStreamEncoderInterface::EncoderSink* const sink =
+ static_cast<VideoStreamEncoderInterface::EncoderSink*>(vss_impl.get());
+
+ // Populate a test instance of video bitrate allocation.
+ VideoBitrateAllocation alloc;
+
+ alloc.SetBitrate(0, 0, 10000);
+ alloc.SetBitrate(0, 1, 20000);
+ alloc.SetBitrate(1, 0, 30000);
+ alloc.SetBitrate(1, 1, 40000);
+
+ EncodedImage encoded_image;
+ CodecSpecificInfo codec_specific;
+ EXPECT_CALL(rtp_video_sender_, OnEncodedImage)
+ .WillRepeatedly(Return(
+ EncodedImageCallback::Result(EncodedImageCallback::Result::OK)));
+ // Max time we will throttle similar video bitrate allocations.
+ static constexpr int64_t kMaxVbaThrottleTimeMs = 500;
+
+ {
+ // Initial value.
+ EXPECT_CALL(rtp_video_sender_, OnBitrateAllocationUpdated(alloc)).Times(1);
+ encoder_queue_->PostTask([&] { sink->OnBitrateAllocationUpdated(alloc); });
+ time_controller_.AdvanceTime(TimeDelta::Zero());
+ }
+
+ {
+ EXPECT_CALL(rtp_video_sender_, OnBitrateAllocationUpdated(alloc)).Times(0);
+ encoder_queue_->PostTask([&] {
+ // Sending same allocation again, this one should be throttled.
+ sink->OnBitrateAllocationUpdated(alloc);
+ });
+ time_controller_.AdvanceTime(TimeDelta::Zero());
+ }
+
+ time_controller_.AdvanceTime(TimeDelta::Millis(kMaxVbaThrottleTimeMs));
+ {
+ EXPECT_CALL(rtp_video_sender_, OnBitrateAllocationUpdated(alloc)).Times(1);
+ encoder_queue_->PostTask([&] {
+ // Sending similar allocation again after timeout, should
+ // forward.
+ sink->OnBitrateAllocationUpdated(alloc);
+ });
+ time_controller_.AdvanceTime(TimeDelta::Zero());
+ }
+
+ {
+ EXPECT_CALL(rtp_video_sender_, OnBitrateAllocationUpdated(alloc)).Times(0);
+ encoder_queue_->PostTask([&] {
+ // Sending similar allocation again without timeout, throttle.
+ sink->OnBitrateAllocationUpdated(alloc);
+ });
+ time_controller_.AdvanceTime(TimeDelta::Zero());
+ }
+
+ {
+ EXPECT_CALL(rtp_video_sender_, OnBitrateAllocationUpdated(alloc)).Times(0);
+ encoder_queue_->PostTask([&] {
+ // Send encoded image, should be a noop.
+ static_cast<EncodedImageCallback*>(vss_impl.get())
+ ->OnEncodedImage(encoded_image, &codec_specific);
+ });
+ time_controller_.AdvanceTime(TimeDelta::Zero());
+ }
+
+ {
+ // Advance time and send encoded image, this should wake up and
+ // send cached bitrate allocation.
+ time_controller_.AdvanceTime(TimeDelta::Millis(kMaxVbaThrottleTimeMs));
+
+ EXPECT_CALL(rtp_video_sender_, OnBitrateAllocationUpdated(alloc)).Times(1);
+ encoder_queue_->PostTask([&] {
+ static_cast<EncodedImageCallback*>(vss_impl.get())
+ ->OnEncodedImage(encoded_image, &codec_specific);
+ });
+ time_controller_.AdvanceTime(TimeDelta::Zero());
+ }
+
+ {
+ // Advance time and send encoded image, there should be no
+ // cached allocation to send.
+ time_controller_.AdvanceTime(TimeDelta::Millis(kMaxVbaThrottleTimeMs));
+ EXPECT_CALL(rtp_video_sender_, OnBitrateAllocationUpdated(alloc)).Times(0);
+ encoder_queue_->PostTask([&] {
+ static_cast<EncodedImageCallback*>(vss_impl.get())
+ ->OnEncodedImage(encoded_image, &codec_specific);
+ });
+ time_controller_.AdvanceTime(TimeDelta::Zero());
+ }
+
+ worker_queue_.RunSynchronous([&] { vss_impl->Stop(); });
+}
+
+TEST_F(VideoSendStreamImplTest, CallsVideoStreamEncoderOnBitrateUpdate) {
+ const bool kSuspend = false;
+ config_.suspend_below_min_bitrate = kSuspend;
+ config_.rtp.extensions.emplace_back(RtpExtension::kTransportSequenceNumberUri,
+ 1);
+ auto vss_impl = CreateVideoSendStreamImpl(
+ kDefaultInitialBitrateBps, kDefaultBitratePriority,
+ VideoEncoderConfig::ContentType::kRealtimeVideo);
+ worker_queue_.RunSynchronous([&] { vss_impl->StartPerRtpStream({true}); });
+ VideoStream qvga_stream;
+ qvga_stream.width = 320;
+ qvga_stream.height = 180;
+ qvga_stream.max_framerate = 30;
+ qvga_stream.min_bitrate_bps = 30000;
+ qvga_stream.target_bitrate_bps = 150000;
+ qvga_stream.max_bitrate_bps = 200000;
+ qvga_stream.max_qp = 56;
+ qvga_stream.bitrate_priority = 1;
+
+ int min_transmit_bitrate_bps = 30000;
+
+ config_.rtp.ssrcs.emplace_back(1);
+
+ encoder_queue_->PostTask([&] {
+ static_cast<VideoStreamEncoderInterface::EncoderSink*>(vss_impl.get())
+ ->OnEncoderConfigurationChanged(
+ std::vector<VideoStream>{qvga_stream}, false,
+ VideoEncoderConfig::ContentType::kRealtimeVideo,
+ min_transmit_bitrate_bps);
+ });
+ time_controller_.AdvanceTime(TimeDelta::Zero());
+
+ worker_queue_.RunSynchronous([&] {
+ const DataRate network_constrained_rate =
+ DataRate::BitsPerSec(qvga_stream.target_bitrate_bps);
+ BitrateAllocationUpdate update;
+ update.target_bitrate = network_constrained_rate;
+ update.stable_target_bitrate = network_constrained_rate;
+ update.round_trip_time = TimeDelta::Millis(1);
+ EXPECT_CALL(rtp_video_sender_, OnBitrateUpdated(update, _));
+ EXPECT_CALL(rtp_video_sender_, GetPayloadBitrateBps())
+ .WillOnce(Return(network_constrained_rate.bps()));
+ EXPECT_CALL(
+ video_stream_encoder_,
+ OnBitrateUpdated(network_constrained_rate, network_constrained_rate,
+ network_constrained_rate, 0, _, 0));
+ static_cast<BitrateAllocatorObserver*>(vss_impl.get())
+ ->OnBitrateUpdated(update);
+
+ // Test allocation where the link allocation is larger than the
+ // target, meaning we have some headroom on the link.
+ const DataRate qvga_max_bitrate =
+ DataRate::BitsPerSec(qvga_stream.max_bitrate_bps);
+ const DataRate headroom = DataRate::BitsPerSec(50000);
+ const DataRate rate_with_headroom = qvga_max_bitrate + headroom;
+ update.target_bitrate = rate_with_headroom;
+ update.stable_target_bitrate = rate_with_headroom;
+ EXPECT_CALL(rtp_video_sender_, OnBitrateUpdated(update, _));
+ EXPECT_CALL(rtp_video_sender_, GetPayloadBitrateBps())
+ .WillOnce(Return(rate_with_headroom.bps()));
+ EXPECT_CALL(video_stream_encoder_,
+ OnBitrateUpdated(qvga_max_bitrate, qvga_max_bitrate,
+ rate_with_headroom, 0, _, 0));
+ static_cast<BitrateAllocatorObserver*>(vss_impl.get())
+ ->OnBitrateUpdated(update);
+
+ // Add protection bitrate to the mix, this should be subtracted
+ // from the headroom.
+ const uint32_t protection_bitrate_bps = 10000;
+ EXPECT_CALL(rtp_video_sender_, GetProtectionBitrateBps())
+ .WillOnce(Return(protection_bitrate_bps));
+
+ EXPECT_CALL(rtp_video_sender_, OnBitrateUpdated(update, _));
+ EXPECT_CALL(rtp_video_sender_, GetPayloadBitrateBps())
+ .WillOnce(Return(rate_with_headroom.bps()));
+ const DataRate headroom_minus_protection =
+ rate_with_headroom - DataRate::BitsPerSec(protection_bitrate_bps);
+ EXPECT_CALL(video_stream_encoder_,
+ OnBitrateUpdated(qvga_max_bitrate, qvga_max_bitrate,
+ headroom_minus_protection, 0, _, 0));
+ static_cast<BitrateAllocatorObserver*>(vss_impl.get())
+ ->OnBitrateUpdated(update);
+
+ // Protection bitrate exceeds head room, link allocation should be
+ // capped to target bitrate.
+ EXPECT_CALL(rtp_video_sender_, GetProtectionBitrateBps())
+ .WillOnce(Return(headroom.bps() + 1000));
+ EXPECT_CALL(rtp_video_sender_, OnBitrateUpdated(update, _));
+ EXPECT_CALL(rtp_video_sender_, GetPayloadBitrateBps())
+ .WillOnce(Return(rate_with_headroom.bps()));
+ EXPECT_CALL(video_stream_encoder_,
+ OnBitrateUpdated(qvga_max_bitrate, qvga_max_bitrate,
+ qvga_max_bitrate, 0, _, 0));
+ static_cast<BitrateAllocatorObserver*>(vss_impl.get())
+ ->OnBitrateUpdated(update);
+
+ // Set rates to zero on stop.
+ EXPECT_CALL(video_stream_encoder_,
+ OnBitrateUpdated(DataRate::Zero(), DataRate::Zero(),
+ DataRate::Zero(), 0, 0, 0));
+ vss_impl->Stop();
+ });
+}
+
+TEST_F(VideoSendStreamImplTest, DisablesPaddingOnPausedEncoder) {
+ int padding_bitrate = 0;
+ std::unique_ptr<VideoSendStreamImpl> vss_impl = CreateVideoSendStreamImpl(
+ kDefaultInitialBitrateBps, kDefaultBitratePriority,
+ VideoEncoderConfig::ContentType::kRealtimeVideo);
+
+ // Capture padding bitrate for testing.
+ EXPECT_CALL(bitrate_allocator_, AddObserver(vss_impl.get(), _))
+ .WillRepeatedly(Invoke(
+ [&](BitrateAllocatorObserver*, MediaStreamAllocationConfig config) {
+ padding_bitrate = config.pad_up_bitrate_bps;
+ }));
+ // If observer is removed, no padding will be sent.
+ EXPECT_CALL(bitrate_allocator_, RemoveObserver(vss_impl.get()))
+ .WillRepeatedly(
+ Invoke([&](BitrateAllocatorObserver*) { padding_bitrate = 0; }));
+
+ EXPECT_CALL(rtp_video_sender_, OnEncodedImage)
+ .WillRepeatedly(Return(
+ EncodedImageCallback::Result(EncodedImageCallback::Result::OK)));
+ const bool kSuspend = false;
+ config_.suspend_below_min_bitrate = kSuspend;
+ config_.rtp.extensions.emplace_back(RtpExtension::kTransportSequenceNumberUri,
+ 1);
+ VideoStream qvga_stream;
+ qvga_stream.width = 320;
+ qvga_stream.height = 180;
+ qvga_stream.max_framerate = 30;
+ qvga_stream.min_bitrate_bps = 30000;
+ qvga_stream.target_bitrate_bps = 150000;
+ qvga_stream.max_bitrate_bps = 200000;
+ qvga_stream.max_qp = 56;
+ qvga_stream.bitrate_priority = 1;
+
+ int min_transmit_bitrate_bps = 30000;
+
+ config_.rtp.ssrcs.emplace_back(1);
+ worker_queue_.RunSynchronous([&] { vss_impl->StartPerRtpStream({true}); });
+ // Starts without padding.
+ EXPECT_EQ(0, padding_bitrate);
+ encoder_queue_->PostTask([&] {
+ // Reconfigure e.g. due to a fake frame.
+ static_cast<VideoStreamEncoderInterface::EncoderSink*>(vss_impl.get())
+ ->OnEncoderConfigurationChanged(
+ std::vector<VideoStream>{qvga_stream}, false,
+ VideoEncoderConfig::ContentType::kRealtimeVideo,
+ min_transmit_bitrate_bps);
+ });
+ time_controller_.AdvanceTime(TimeDelta::Zero());
+ // Still no padding because no actual frames were passed, only
+ // reconfiguration happened.
+ EXPECT_EQ(0, padding_bitrate);
+
+ worker_queue_.RunSynchronous([&] {
+ // Unpause encoder.
+ const uint32_t kBitrateBps = 100000;
+ EXPECT_CALL(rtp_video_sender_, GetPayloadBitrateBps())
+ .Times(1)
+ .WillOnce(Return(kBitrateBps));
+ static_cast<BitrateAllocatorObserver*>(vss_impl.get())
+ ->OnBitrateUpdated(CreateAllocation(kBitrateBps));
+ });
+
+ encoder_queue_->PostTask([&] {
+ // A frame is encoded.
+ EncodedImage encoded_image;
+ CodecSpecificInfo codec_specific;
+ static_cast<EncodedImageCallback*>(vss_impl.get())
+ ->OnEncodedImage(encoded_image, &codec_specific);
+ });
+ time_controller_.AdvanceTime(TimeDelta::Zero());
+ // Only after actual frame is encoded are we enabling the padding.
+ EXPECT_GT(padding_bitrate, 0);
+
+ time_controller_.AdvanceTime(TimeDelta::Seconds(5));
+ // Since no more frames are sent the last 5s, no padding is supposed to be
+ // sent.
+ EXPECT_EQ(0, padding_bitrate);
+ testing::Mock::VerifyAndClearExpectations(&bitrate_allocator_);
+ worker_queue_.RunSynchronous([&] { vss_impl->Stop(); });
+}
+
+TEST_F(VideoSendStreamImplTest, KeepAliveOnDroppedFrame) {
+ std::unique_ptr<VideoSendStreamImpl> vss_impl = CreateVideoSendStreamImpl(
+ kDefaultInitialBitrateBps, kDefaultBitratePriority,
+ VideoEncoderConfig::ContentType::kRealtimeVideo);
+ EXPECT_CALL(bitrate_allocator_, RemoveObserver(vss_impl.get())).Times(0);
+ worker_queue_.RunSynchronous([&] {
+ vss_impl->StartPerRtpStream({true});
+ const uint32_t kBitrateBps = 100000;
+ EXPECT_CALL(rtp_video_sender_, GetPayloadBitrateBps())
+ .Times(1)
+ .WillOnce(Return(kBitrateBps));
+ static_cast<BitrateAllocatorObserver*>(vss_impl.get())
+ ->OnBitrateUpdated(CreateAllocation(kBitrateBps));
+ });
+ encoder_queue_->PostTask([&] {
+ // Keep the stream from deallocating by dropping a frame.
+ static_cast<EncodedImageCallback*>(vss_impl.get())
+ ->OnDroppedFrame(EncodedImageCallback::DropReason::kDroppedByEncoder);
+ });
+ time_controller_.AdvanceTime(TimeDelta::Seconds(2));
+ worker_queue_.RunSynchronous([&] {
+ testing::Mock::VerifyAndClearExpectations(&bitrate_allocator_);
+ vss_impl->Stop();
+ });
+}
+
+TEST_F(VideoSendStreamImplTest, ConfiguresBitratesForSvc) {
+ struct TestConfig {
+ bool screenshare = false;
+ bool alr = false;
+ int min_padding_bitrate_bps = 0;
+ };
+
+ std::vector<TestConfig> test_variants;
+ for (bool screenshare : {false, true}) {
+ for (bool alr : {false, true}) {
+ for (int min_padding : {0, 400000}) {
+ test_variants.push_back({screenshare, alr, min_padding});
+ }
+ }
+ }
+
+ for (const TestConfig& test_config : test_variants) {
+ const bool kSuspend = false;
+ config_.suspend_below_min_bitrate = kSuspend;
+ config_.rtp.extensions.emplace_back(
+ RtpExtension::kTransportSequenceNumberUri, 1);
+ config_.periodic_alr_bandwidth_probing = test_config.alr;
+ auto vss_impl = CreateVideoSendStreamImpl(
+ kDefaultInitialBitrateBps, kDefaultBitratePriority,
+ test_config.screenshare
+ ? VideoEncoderConfig::ContentType::kScreen
+ : VideoEncoderConfig::ContentType::kRealtimeVideo);
+
+ worker_queue_.RunSynchronous([&] { vss_impl->StartPerRtpStream({true}); });
+
+ // Svc
+ VideoStream stream;
+ stream.width = 1920;
+ stream.height = 1080;
+ stream.max_framerate = 30;
+ stream.min_bitrate_bps = 60000;
+ stream.target_bitrate_bps = 6000000;
+ stream.max_bitrate_bps = 1250000;
+ stream.num_temporal_layers = 2;
+ stream.max_qp = 56;
+ stream.bitrate_priority = 1;
+
+ config_.rtp.ssrcs.emplace_back(1);
+ config_.rtp.ssrcs.emplace_back(2);
+
+ EXPECT_CALL(
+ bitrate_allocator_,
+ AddObserver(
+ vss_impl.get(),
+ AllOf(Field(&MediaStreamAllocationConfig::min_bitrate_bps,
+ static_cast<uint32_t>(stream.min_bitrate_bps)),
+ Field(&MediaStreamAllocationConfig::max_bitrate_bps,
+ static_cast<uint32_t>(stream.max_bitrate_bps)),
+ // Stream not yet active - no padding.
+ Field(&MediaStreamAllocationConfig::pad_up_bitrate_bps, 0u),
+ Field(&MediaStreamAllocationConfig::enforce_min_bitrate,
+ !kSuspend))));
+ encoder_queue_->PostTask([&] {
+ static_cast<VideoStreamEncoderInterface::EncoderSink*>(vss_impl.get())
+ ->OnEncoderConfigurationChanged(
+ std::vector<VideoStream>{stream}, true,
+ test_config.screenshare
+ ? VideoEncoderConfig::ContentType::kScreen
+ : VideoEncoderConfig::ContentType::kRealtimeVideo,
+ test_config.min_padding_bitrate_bps);
+ });
+ time_controller_.AdvanceTime(TimeDelta::Zero());
+ ::testing::Mock::VerifyAndClearExpectations(&bitrate_allocator_);
+
+ // Simulate an encoded image, this will turn the stream active and
+ // enable padding.
+ EXPECT_CALL(rtp_video_sender_, OnEncodedImage)
+ .WillRepeatedly(Return(
+ EncodedImageCallback::Result(EncodedImageCallback::Result::OK)));
+ // Screensharing implicitly forces ALR.
+ const bool using_alr = test_config.alr || test_config.screenshare;
+ // If ALR is used, pads only to min bitrate as rampup is handled by
+ // probing. Otherwise target_bitrate contains the padding target.
+ int expected_padding =
+ using_alr ? stream.min_bitrate_bps
+ : static_cast<int>(stream.target_bitrate_bps *
+ (test_config.screenshare ? 1.35 : 1.2));
+ // Min padding bitrate may override padding target.
+ expected_padding =
+ std::max(expected_padding, test_config.min_padding_bitrate_bps);
+ EXPECT_CALL(
+ bitrate_allocator_,
+ AddObserver(
+ vss_impl.get(),
+ AllOf(Field(&MediaStreamAllocationConfig::min_bitrate_bps,
+ static_cast<uint32_t>(stream.min_bitrate_bps)),
+ Field(&MediaStreamAllocationConfig::max_bitrate_bps,
+ static_cast<uint32_t>(stream.max_bitrate_bps)),
+ // Stream now active - min bitrate use as padding target
+ // when ALR is active.
+ Field(&MediaStreamAllocationConfig::pad_up_bitrate_bps,
+ expected_padding),
+ Field(&MediaStreamAllocationConfig::enforce_min_bitrate,
+ !kSuspend))));
+ encoder_queue_->PostTask([&] {
+ EncodedImage encoded_image;
+ CodecSpecificInfo codec_specific;
+
+ static_cast<EncodedImageCallback*>(vss_impl.get())
+ ->OnEncodedImage(encoded_image, &codec_specific);
+ });
+ time_controller_.AdvanceTime(TimeDelta::Zero());
+ ::testing::Mock::VerifyAndClearExpectations(&bitrate_allocator_);
+
+ worker_queue_.RunSynchronous([&] { vss_impl->Stop(); });
+ }
+}
+} // namespace internal
+} // namespace webrtc
diff --git a/third_party/libwebrtc/video/video_send_stream_tests.cc b/third_party/libwebrtc/video/video_send_stream_tests.cc
new file mode 100644
index 0000000000..f0563569ee
--- /dev/null
+++ b/third_party/libwebrtc/video/video_send_stream_tests.cc
@@ -0,0 +1,4289 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include <algorithm> // max
+#include <memory>
+#include <vector>
+
+#include "absl/algorithm/container.h"
+#include "absl/strings/match.h"
+#include "api/sequence_checker.h"
+#include "api/task_queue/default_task_queue_factory.h"
+#include "api/task_queue/task_queue_base.h"
+#include "api/test/metrics/global_metrics_logger_and_exporter.h"
+#include "api/test/metrics/metric.h"
+#include "api/test/simulated_network.h"
+#include "api/units/time_delta.h"
+#include "api/video/builtin_video_bitrate_allocator_factory.h"
+#include "api/video/encoded_image.h"
+#include "api/video/video_bitrate_allocation.h"
+#include "api/video_codecs/video_encoder.h"
+#include "call/call.h"
+#include "call/fake_network_pipe.h"
+#include "call/rtp_transport_controller_send.h"
+#include "call/simulated_network.h"
+#include "call/video_send_stream.h"
+#include "media/engine/internal_encoder_factory.h"
+#include "media/engine/simulcast_encoder_adapter.h"
+#include "media/engine/webrtc_video_engine.h"
+#include "modules/rtp_rtcp/include/rtp_header_extension_map.h"
+#include "modules/rtp_rtcp/source/create_video_rtp_depacketizer.h"
+#include "modules/rtp_rtcp/source/rtcp_sender.h"
+#include "modules/rtp_rtcp/source/rtp_header_extensions.h"
+#include "modules/rtp_rtcp/source/rtp_packet.h"
+#include "modules/rtp_rtcp/source/rtp_rtcp_impl2.h"
+#include "modules/rtp_rtcp/source/rtp_util.h"
+#include "modules/rtp_rtcp/source/video_rtp_depacketizer_vp9.h"
+#include "modules/video_coding/codecs/interface/common_constants.h"
+#include "modules/video_coding/codecs/vp8/include/vp8.h"
+#include "modules/video_coding/codecs/vp9/include/vp9.h"
+#include "modules/video_coding/svc/create_scalability_structure.h"
+#include "modules/video_coding/svc/scalability_mode_util.h"
+#include "modules/video_coding/svc/scalable_video_controller.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/event.h"
+#include "rtc_base/experiments/alr_experiment.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/platform_thread.h"
+#include "rtc_base/rate_limiter.h"
+#include "rtc_base/strings/string_builder.h"
+#include "rtc_base/synchronization/mutex.h"
+#include "rtc_base/task_queue_for_test.h"
+#include "rtc_base/time_utils.h"
+#include "rtc_base/unique_id_generator.h"
+#include "system_wrappers/include/sleep.h"
+#include "test/call_test.h"
+#include "test/configurable_frame_size_encoder.h"
+#include "test/fake_encoder.h"
+#include "test/fake_texture_frame.h"
+#include "test/frame_forwarder.h"
+#include "test/frame_generator_capturer.h"
+#include "test/frame_utils.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+#include "test/null_transport.h"
+#include "test/rtcp_packet_parser.h"
+#include "test/rtp_rtcp_observer.h"
+#include "test/video_encoder_proxy_factory.h"
+#include "video/config/encoder_stream_factory.h"
+#include "video/send_statistics_proxy.h"
+#include "video/transport_adapter.h"
+#include "video/video_send_stream.h"
+
+namespace webrtc {
+namespace test {
+class VideoSendStreamPeer {
+ public:
+ explicit VideoSendStreamPeer(webrtc::VideoSendStream* base_class_stream)
+ : internal_stream_(
+ static_cast<internal::VideoSendStream*>(base_class_stream)) {}
+ absl::optional<float> GetPacingFactorOverride() const {
+ return internal_stream_->GetPacingFactorOverride();
+ }
+
+ private:
+ internal::VideoSendStream const* const internal_stream_;
+};
+} // namespace test
+
+namespace {
+enum : int { // The first valid value is 1.
+ kAbsSendTimeExtensionId = 1,
+ kTimestampOffsetExtensionId,
+ kTransportSequenceNumberExtensionId,
+ kVideoContentTypeExtensionId,
+ kVideoRotationExtensionId,
+ kVideoTimingExtensionId,
+};
+
+// Readability convenience enum for `WaitBitrateChanged()`.
+enum class WaitUntil : bool { kZero = false, kNonZero = true };
+
+constexpr int64_t kRtcpIntervalMs = 1000;
+
+enum VideoFormat {
+ kGeneric,
+ kVP8,
+};
+
+struct Vp9TestParams {
+ std::string scalability_mode;
+ uint8_t num_spatial_layers;
+ uint8_t num_temporal_layers;
+ InterLayerPredMode inter_layer_pred;
+};
+
+using ParameterizationType = std::tuple<Vp9TestParams, bool>;
+
+std::string ParamInfoToStr(
+ const testing::TestParamInfo<ParameterizationType>& info) {
+ rtc::StringBuilder sb;
+ sb << std::get<0>(info.param).scalability_mode << "_"
+ << (std::get<1>(info.param) ? "WithIdentifier" : "WithoutIdentifier");
+ return sb.str();
+}
+
+} // namespace
+
+class VideoSendStreamTest : public test::CallTest {
+ public:
+ VideoSendStreamTest() {
+ RegisterRtpExtension(RtpExtension(RtpExtension::kTransportSequenceNumberUri,
+ kTransportSequenceNumberExtensionId));
+ }
+
+ protected:
+ void TestNackRetransmission(uint32_t retransmit_ssrc,
+ uint8_t retransmit_payload_type);
+ void TestPacketFragmentationSize(VideoFormat format, bool with_fec);
+
+ void TestVp9NonFlexMode(const Vp9TestParams& params,
+ bool use_scalability_mode_identifier);
+
+ void TestRequestSourceRotateVideo(bool support_orientation_ext);
+
+ void TestTemporalLayers(VideoEncoderFactory* encoder_factory,
+ const std::string& payload_name,
+ const std::vector<int>& num_temporal_layers,
+ const std::vector<ScalabilityMode>& scalability_mode);
+};
+
+TEST_F(VideoSendStreamTest, CanStartStartedStream) {
+ SendTask(task_queue(), [this]() {
+ CreateSenderCall();
+
+ test::NullTransport transport;
+ CreateSendConfig(1, 0, 0, &transport);
+ CreateVideoStreams();
+ GetVideoSendStream()->Start();
+ GetVideoSendStream()->Start();
+ DestroyStreams();
+ DestroyCalls();
+ });
+}
+
+TEST_F(VideoSendStreamTest, CanStopStoppedStream) {
+ SendTask(task_queue(), [this]() {
+ CreateSenderCall();
+
+ test::NullTransport transport;
+ CreateSendConfig(1, 0, 0, &transport);
+ CreateVideoStreams();
+ GetVideoSendStream()->Stop();
+ GetVideoSendStream()->Stop();
+ DestroyStreams();
+ DestroyCalls();
+ });
+}
+
+TEST_F(VideoSendStreamTest, SupportsCName) {
+ static std::string kCName = "PjQatC14dGfbVwGPUOA9IH7RlsFDbWl4AhXEiDsBizo=";
+ class CNameObserver : public test::SendTest {
+ public:
+ CNameObserver() : SendTest(kDefaultTimeout) {}
+
+ private:
+ Action OnSendRtcp(const uint8_t* packet, size_t length) override {
+ test::RtcpPacketParser parser;
+ EXPECT_TRUE(parser.Parse(packet, length));
+ if (parser.sdes()->num_packets() > 0) {
+ EXPECT_EQ(1u, parser.sdes()->chunks().size());
+ EXPECT_EQ(kCName, parser.sdes()->chunks()[0].cname);
+
+ observation_complete_.Set();
+ }
+
+ return SEND_PACKET;
+ }
+
+ void ModifyVideoConfigs(
+ VideoSendStream::Config* send_config,
+ std::vector<VideoReceiveStreamInterface::Config>* receive_configs,
+ VideoEncoderConfig* encoder_config) override {
+ send_config->rtp.c_name = kCName;
+ }
+
+ void PerformTest() override {
+ EXPECT_TRUE(Wait()) << "Timed out while waiting for RTCP with CNAME.";
+ }
+ } test;
+
+ RunBaseTest(&test);
+}
+
+TEST_F(VideoSendStreamTest, SupportsAbsoluteSendTime) {
+ class AbsoluteSendTimeObserver : public test::SendTest {
+ public:
+ AbsoluteSendTimeObserver() : SendTest(kDefaultTimeout) {
+ extensions_.Register<AbsoluteSendTime>(kAbsSendTimeExtensionId);
+ }
+
+ Action OnSendRtp(const uint8_t* packet, size_t length) override {
+ RtpPacket rtp_packet(&extensions_);
+ EXPECT_TRUE(rtp_packet.Parse(packet, length));
+
+ uint32_t abs_send_time = 0;
+ EXPECT_FALSE(rtp_packet.HasExtension<TransmissionOffset>());
+ EXPECT_TRUE(rtp_packet.GetExtension<AbsoluteSendTime>(&abs_send_time));
+ if (abs_send_time != 0) {
+ // Wait for at least one packet with a non-zero send time. The send time
+ // is a 16-bit value derived from the system clock, and it is valid
+ // for a packet to have a zero send time. To tell that from an
+ // unpopulated value we'll wait for a packet with non-zero send time.
+ observation_complete_.Set();
+ } else {
+ RTC_LOG(LS_WARNING)
+ << "Got a packet with zero absoluteSendTime, waiting"
+ " for another packet...";
+ }
+
+ return SEND_PACKET;
+ }
+
+ void ModifyVideoConfigs(
+ VideoSendStream::Config* send_config,
+ std::vector<VideoReceiveStreamInterface::Config>* receive_configs,
+ VideoEncoderConfig* encoder_config) override {
+ send_config->rtp.extensions.clear();
+ send_config->rtp.extensions.push_back(
+ RtpExtension(RtpExtension::kAbsSendTimeUri, kAbsSendTimeExtensionId));
+ }
+
+ void PerformTest() override {
+ EXPECT_TRUE(Wait()) << "Timed out while waiting for single RTP packet.";
+ }
+
+ private:
+ RtpHeaderExtensionMap extensions_;
+ } test;
+
+ RunBaseTest(&test);
+}
+
+TEST_F(VideoSendStreamTest, SupportsTransmissionTimeOffset) {
+ static const int kEncodeDelayMs = 5;
+ class TransmissionTimeOffsetObserver : public test::SendTest {
+ public:
+ TransmissionTimeOffsetObserver()
+ : SendTest(kDefaultTimeout), encoder_factory_([]() {
+ return std::make_unique<test::DelayedEncoder>(
+ Clock::GetRealTimeClock(), kEncodeDelayMs);
+ }) {
+ extensions_.Register<TransmissionOffset>(kTimestampOffsetExtensionId);
+ }
+
+ private:
+ Action OnSendRtp(const uint8_t* packet, size_t length) override {
+ RtpPacket rtp_packet(&extensions_);
+ EXPECT_TRUE(rtp_packet.Parse(packet, length));
+
+ int32_t toffset = 0;
+ EXPECT_TRUE(rtp_packet.GetExtension<TransmissionOffset>(&toffset));
+ EXPECT_FALSE(rtp_packet.HasExtension<AbsoluteSendTime>());
+ EXPECT_GT(toffset, 0);
+ observation_complete_.Set();
+
+ return SEND_PACKET;
+ }
+
+ void ModifyVideoConfigs(
+ VideoSendStream::Config* send_config,
+ std::vector<VideoReceiveStreamInterface::Config>* receive_configs,
+ VideoEncoderConfig* encoder_config) override {
+ send_config->encoder_settings.encoder_factory = &encoder_factory_;
+ send_config->rtp.extensions.clear();
+ send_config->rtp.extensions.push_back(RtpExtension(
+ RtpExtension::kTimestampOffsetUri, kTimestampOffsetExtensionId));
+ }
+
+ void PerformTest() override {
+ EXPECT_TRUE(Wait()) << "Timed out while waiting for a single RTP packet.";
+ }
+
+ test::FunctionVideoEncoderFactory encoder_factory_;
+ RtpHeaderExtensionMap extensions_;
+ } test;
+
+ RunBaseTest(&test);
+}
+
+TEST_F(VideoSendStreamTest, SupportsTransportWideSequenceNumbers) {
+ static const uint8_t kExtensionId = kTransportSequenceNumberExtensionId;
+ class TransportWideSequenceNumberObserver : public test::SendTest {
+ public:
+ TransportWideSequenceNumberObserver()
+ : SendTest(kDefaultTimeout), encoder_factory_([]() {
+ return std::make_unique<test::FakeEncoder>(
+ Clock::GetRealTimeClock());
+ }) {
+ extensions_.Register<TransportSequenceNumber>(kExtensionId);
+ }
+
+ private:
+ Action OnSendRtp(const uint8_t* packet, size_t length) override {
+ RtpPacket rtp_packet(&extensions_);
+ EXPECT_TRUE(rtp_packet.Parse(packet, length));
+
+ EXPECT_TRUE(rtp_packet.HasExtension<TransportSequenceNumber>());
+ EXPECT_FALSE(rtp_packet.HasExtension<TransmissionOffset>());
+ EXPECT_FALSE(rtp_packet.HasExtension<AbsoluteSendTime>());
+
+ observation_complete_.Set();
+
+ return SEND_PACKET;
+ }
+
+ void ModifyVideoConfigs(
+ VideoSendStream::Config* send_config,
+ std::vector<VideoReceiveStreamInterface::Config>* receive_configs,
+ VideoEncoderConfig* encoder_config) override {
+ send_config->encoder_settings.encoder_factory = &encoder_factory_;
+ }
+
+ void PerformTest() override {
+ EXPECT_TRUE(Wait()) << "Timed out while waiting for a single RTP packet.";
+ }
+
+ test::FunctionVideoEncoderFactory encoder_factory_;
+ RtpHeaderExtensionMap extensions_;
+ } test;
+
+ RunBaseTest(&test);
+}
+
+TEST_F(VideoSendStreamTest, SupportsVideoRotation) {
+ class VideoRotationObserver : public test::SendTest {
+ public:
+ VideoRotationObserver() : SendTest(kDefaultTimeout) {
+ extensions_.Register<VideoOrientation>(kVideoRotationExtensionId);
+ }
+
+ Action OnSendRtp(const uint8_t* packet, size_t length) override {
+ RtpPacket rtp_packet(&extensions_);
+ EXPECT_TRUE(rtp_packet.Parse(packet, length));
+ // Only the last packet of the frame is required to have the extension.
+ if (!rtp_packet.Marker())
+ return SEND_PACKET;
+ EXPECT_EQ(rtp_packet.GetExtension<VideoOrientation>(), kVideoRotation_90);
+ observation_complete_.Set();
+ return SEND_PACKET;
+ }
+
+ void ModifyVideoConfigs(
+ VideoSendStream::Config* send_config,
+ std::vector<VideoReceiveStreamInterface::Config>* receive_configs,
+ VideoEncoderConfig* encoder_config) override {
+ send_config->rtp.extensions.clear();
+ send_config->rtp.extensions.push_back(RtpExtension(
+ RtpExtension::kVideoRotationUri, kVideoRotationExtensionId));
+ }
+
+ void OnFrameGeneratorCapturerCreated(
+ test::FrameGeneratorCapturer* frame_generator_capturer) override {
+ frame_generator_capturer->SetFakeRotation(kVideoRotation_90);
+ }
+
+ void PerformTest() override {
+ EXPECT_TRUE(Wait()) << "Timed out while waiting for single RTP packet.";
+ }
+
+ private:
+ RtpHeaderExtensionMap extensions_;
+ } test;
+
+ RunBaseTest(&test);
+}
+
+TEST_F(VideoSendStreamTest, SupportsVideoContentType) {
+ class VideoContentTypeObserver : public test::SendTest {
+ public:
+ VideoContentTypeObserver()
+ : SendTest(kDefaultTimeout), first_frame_sent_(false) {
+ extensions_.Register<VideoContentTypeExtension>(
+ kVideoContentTypeExtensionId);
+ }
+
+ Action OnSendRtp(const uint8_t* packet, size_t length) override {
+ RtpPacket rtp_packet(&extensions_);
+ EXPECT_TRUE(rtp_packet.Parse(packet, length));
+ // Only the last packet of the key-frame must have extension.
+ if (!rtp_packet.Marker() || first_frame_sent_)
+ return SEND_PACKET;
+ // First marker bit seen means that the first frame is sent.
+ first_frame_sent_ = true;
+ VideoContentType type;
+ EXPECT_TRUE(rtp_packet.GetExtension<VideoContentTypeExtension>(&type));
+ EXPECT_TRUE(videocontenttypehelpers::IsScreenshare(type));
+ observation_complete_.Set();
+ return SEND_PACKET;
+ }
+
+ void ModifyVideoConfigs(
+ VideoSendStream::Config* send_config,
+ std::vector<VideoReceiveStreamInterface::Config>* receive_configs,
+ VideoEncoderConfig* encoder_config) override {
+ send_config->rtp.extensions.clear();
+ send_config->rtp.extensions.push_back(RtpExtension(
+ RtpExtension::kVideoContentTypeUri, kVideoContentTypeExtensionId));
+ encoder_config->content_type = VideoEncoderConfig::ContentType::kScreen;
+ }
+
+ void PerformTest() override {
+ EXPECT_TRUE(Wait()) << "Timed out while waiting for single RTP packet.";
+ }
+
+ private:
+ bool first_frame_sent_;
+ RtpHeaderExtensionMap extensions_;
+ } test;
+
+ RunBaseTest(&test);
+}
+
+TEST_F(VideoSendStreamTest, SupportsVideoTimingFrames) {
+ class VideoTimingObserver : public test::SendTest {
+ public:
+ VideoTimingObserver()
+ : SendTest(kDefaultTimeout), first_frame_sent_(false) {
+ extensions_.Register<VideoTimingExtension>(kVideoTimingExtensionId);
+ }
+
+ Action OnSendRtp(const uint8_t* packet, size_t length) override {
+ RtpPacket rtp_packet(&extensions_);
+ EXPECT_TRUE(rtp_packet.Parse(packet, length));
+ // Only the last packet of the frame must have extension.
+ // Also don't check packets of the second frame if they happen to get
+ // through before the test terminates.
+ if (!rtp_packet.Marker() || first_frame_sent_)
+ return SEND_PACKET;
+ EXPECT_TRUE(rtp_packet.HasExtension<VideoTimingExtension>());
+ observation_complete_.Set();
+ first_frame_sent_ = true;
+ return SEND_PACKET;
+ }
+
+ void ModifyVideoConfigs(
+ VideoSendStream::Config* send_config,
+ std::vector<VideoReceiveStreamInterface::Config>* receive_configs,
+ VideoEncoderConfig* encoder_config) override {
+ send_config->rtp.extensions.clear();
+ send_config->rtp.extensions.push_back(
+ RtpExtension(RtpExtension::kVideoTimingUri, kVideoTimingExtensionId));
+ }
+
+ void PerformTest() override {
+ EXPECT_TRUE(Wait()) << "Timed out while waiting for timing frames.";
+ }
+
+ private:
+ RtpHeaderExtensionMap extensions_;
+ bool first_frame_sent_;
+ } test;
+
+ RunBaseTest(&test);
+}
+
+class FakeReceiveStatistics : public ReceiveStatisticsProvider {
+ public:
+ FakeReceiveStatistics(uint32_t send_ssrc,
+ uint32_t last_sequence_number,
+ uint32_t cumulative_lost,
+ uint8_t fraction_lost) {
+ stat_.SetMediaSsrc(send_ssrc);
+ stat_.SetExtHighestSeqNum(last_sequence_number);
+ stat_.SetCumulativeLost(cumulative_lost);
+ stat_.SetFractionLost(fraction_lost);
+ }
+
+ std::vector<rtcp::ReportBlock> RtcpReportBlocks(size_t max_blocks) override {
+ EXPECT_GE(max_blocks, 1u);
+ return {stat_};
+ }
+
+ private:
+ rtcp::ReportBlock stat_;
+};
+
+class UlpfecObserver : public test::EndToEndTest {
+ public:
+ // Some of the test cases are expected to time out.
+ // Use a shorter timeout window than the default one for those.
+ static constexpr TimeDelta kReducedTimeout = TimeDelta::Seconds(10);
+
+ UlpfecObserver(bool header_extensions_enabled,
+ bool use_nack,
+ bool expect_red,
+ bool expect_ulpfec,
+ const std::string& codec,
+ VideoEncoderFactory* encoder_factory)
+ : EndToEndTest(expect_ulpfec ? VideoSendStreamTest::kDefaultTimeout
+ : kReducedTimeout),
+ encoder_factory_(encoder_factory),
+ payload_name_(codec),
+ use_nack_(use_nack),
+ expect_red_(expect_red),
+ expect_ulpfec_(expect_ulpfec),
+ sent_media_(false),
+ sent_ulpfec_(false),
+ header_extensions_enabled_(header_extensions_enabled) {
+ extensions_.Register<AbsoluteSendTime>(kAbsSendTimeExtensionId);
+ extensions_.Register<TransportSequenceNumber>(
+ kTransportSequenceNumberExtensionId);
+ }
+
+ private:
+ Action OnSendRtp(const uint8_t* packet, size_t length) override {
+ RtpPacket rtp_packet(&extensions_);
+ EXPECT_TRUE(rtp_packet.Parse(packet, length));
+
+ int encapsulated_payload_type = -1;
+ if (rtp_packet.PayloadType() == VideoSendStreamTest::kRedPayloadType) {
+ EXPECT_TRUE(expect_red_);
+ encapsulated_payload_type = rtp_packet.payload()[0];
+ if (encapsulated_payload_type !=
+ VideoSendStreamTest::kFakeVideoSendPayloadType) {
+ EXPECT_EQ(VideoSendStreamTest::kUlpfecPayloadType,
+ encapsulated_payload_type);
+ }
+ } else {
+ EXPECT_EQ(VideoSendStreamTest::kFakeVideoSendPayloadType,
+ rtp_packet.PayloadType());
+ if (rtp_packet.payload_size() > 0) {
+ // Not padding-only, media received outside of RED.
+ EXPECT_FALSE(expect_red_);
+ sent_media_ = true;
+ }
+ }
+
+ if (header_extensions_enabled_) {
+ uint32_t abs_send_time;
+ EXPECT_TRUE(rtp_packet.GetExtension<AbsoluteSendTime>(&abs_send_time));
+ uint16_t transport_seq_num;
+ EXPECT_TRUE(
+ rtp_packet.GetExtension<TransportSequenceNumber>(&transport_seq_num));
+ if (!first_packet_) {
+ uint32_t kHalf24BitsSpace = 0xFFFFFF / 2;
+ if (abs_send_time <= kHalf24BitsSpace &&
+ prev_abs_send_time_ > kHalf24BitsSpace) {
+ // 24 bits wrap.
+ EXPECT_GT(prev_abs_send_time_, abs_send_time);
+ } else {
+ EXPECT_GE(abs_send_time, prev_abs_send_time_);
+ }
+
+ uint16_t seq_num_diff = transport_seq_num - prev_transport_seq_num_;
+ EXPECT_EQ(1, seq_num_diff);
+ }
+ first_packet_ = false;
+ prev_abs_send_time_ = abs_send_time;
+ prev_transport_seq_num_ = transport_seq_num;
+ }
+
+ if (encapsulated_payload_type != -1) {
+ if (encapsulated_payload_type ==
+ VideoSendStreamTest::kUlpfecPayloadType) {
+ EXPECT_TRUE(expect_ulpfec_);
+ sent_ulpfec_ = true;
+ } else {
+ sent_media_ = true;
+ }
+ }
+
+ if (sent_media_ && sent_ulpfec_) {
+ observation_complete_.Set();
+ }
+
+ return SEND_PACKET;
+ }
+
+ BuiltInNetworkBehaviorConfig GetSendTransportConfig() const override {
+ // At low RTT (< kLowRttNackMs) -> NACK only, no FEC.
+ // Configure some network delay.
+ const int kNetworkDelayMs = 100;
+ BuiltInNetworkBehaviorConfig config;
+ config.loss_percent = 5;
+ config.queue_delay_ms = kNetworkDelayMs;
+ return config;
+ }
+
+ void ModifyVideoConfigs(
+ VideoSendStream::Config* send_config,
+ std::vector<VideoReceiveStreamInterface::Config>* receive_configs,
+ VideoEncoderConfig* encoder_config) override {
+ if (use_nack_) {
+ send_config->rtp.nack.rtp_history_ms =
+ (*receive_configs)[0].rtp.nack.rtp_history_ms =
+ VideoSendStreamTest::kNackRtpHistoryMs;
+ }
+ send_config->encoder_settings.encoder_factory = encoder_factory_;
+ send_config->rtp.payload_name = payload_name_;
+ send_config->rtp.ulpfec.red_payload_type =
+ VideoSendStreamTest::kRedPayloadType;
+ send_config->rtp.ulpfec.ulpfec_payload_type =
+ VideoSendStreamTest::kUlpfecPayloadType;
+ if (!header_extensions_enabled_) {
+ send_config->rtp.extensions.clear();
+ } else {
+ send_config->rtp.extensions.push_back(
+ RtpExtension(RtpExtension::kAbsSendTimeUri, kAbsSendTimeExtensionId));
+ }
+ (*receive_configs)[0].rtp.extensions = send_config->rtp.extensions;
+ encoder_config->codec_type = PayloadStringToCodecType(payload_name_);
+ (*receive_configs)[0].rtp.red_payload_type =
+ send_config->rtp.ulpfec.red_payload_type;
+ (*receive_configs)[0].rtp.ulpfec_payload_type =
+ send_config->rtp.ulpfec.ulpfec_payload_type;
+ }
+
+ void PerformTest() override {
+ EXPECT_EQ(expect_ulpfec_, Wait())
+ << "Timed out waiting for ULPFEC and/or media packets.";
+ }
+
+ VideoEncoderFactory* encoder_factory_;
+ RtpHeaderExtensionMap extensions_;
+ const std::string payload_name_;
+ const bool use_nack_;
+ const bool expect_red_;
+ const bool expect_ulpfec_;
+ bool sent_media_;
+ bool sent_ulpfec_;
+ const bool header_extensions_enabled_;
+ bool first_packet_ = true;
+ uint32_t prev_abs_send_time_ = 0;
+ uint16_t prev_transport_seq_num_ = 0;
+};
+
+TEST_F(VideoSendStreamTest, SupportsUlpfecWithExtensions) {
+ test::FunctionVideoEncoderFactory encoder_factory(
+ []() { return VP8Encoder::Create(); });
+ UlpfecObserver test(true, false, true, true, "VP8", &encoder_factory);
+ RunBaseTest(&test);
+}
+
+TEST_F(VideoSendStreamTest, SupportsUlpfecWithoutExtensions) {
+ test::FunctionVideoEncoderFactory encoder_factory(
+ []() { return VP8Encoder::Create(); });
+ UlpfecObserver test(false, false, true, true, "VP8", &encoder_factory);
+ RunBaseTest(&test);
+}
+
+class VideoSendStreamWithoutUlpfecTest : public test::CallTest {
+ protected:
+ VideoSendStreamWithoutUlpfecTest()
+ : field_trial_(field_trials_, "WebRTC-DisableUlpFecExperiment/Enabled/") {
+ }
+
+ test::ScopedKeyValueConfig field_trial_;
+};
+
+TEST_F(VideoSendStreamWithoutUlpfecTest, NoUlpfecIfDisabledThroughFieldTrial) {
+ test::FunctionVideoEncoderFactory encoder_factory(
+ []() { return VP8Encoder::Create(); });
+ UlpfecObserver test(false, false, false, false, "VP8", &encoder_factory);
+ RunBaseTest(&test);
+}
+
+// The FEC scheme used is not efficient for H264, so we should not use RED/FEC
+// since we'll still have to re-request FEC packets, effectively wasting
+// bandwidth since the receiver has to wait for FEC retransmissions to determine
+// that the received state is actually decodable.
+TEST_F(VideoSendStreamTest, DoesNotUtilizeUlpfecForH264WithNackEnabled) {
+ test::FunctionVideoEncoderFactory encoder_factory([]() {
+ return std::make_unique<test::FakeH264Encoder>(Clock::GetRealTimeClock());
+ });
+ UlpfecObserver test(false, true, false, false, "H264", &encoder_factory);
+ RunBaseTest(&test);
+}
+
+// Without retransmissions FEC for H264 is fine.
+TEST_F(VideoSendStreamTest, DoesUtilizeUlpfecForH264WithoutNackEnabled) {
+ test::FunctionVideoEncoderFactory encoder_factory([]() {
+ return std::make_unique<test::FakeH264Encoder>(Clock::GetRealTimeClock());
+ });
+ UlpfecObserver test(false, false, true, true, "H264", &encoder_factory);
+ RunBaseTest(&test);
+}
+
+TEST_F(VideoSendStreamTest, DoesUtilizeUlpfecForVp8WithNackEnabled) {
+ test::FunctionVideoEncoderFactory encoder_factory(
+ []() { return VP8Encoder::Create(); });
+ UlpfecObserver test(false, true, true, true, "VP8", &encoder_factory);
+ RunBaseTest(&test);
+}
+
+#if defined(RTC_ENABLE_VP9)
+TEST_F(VideoSendStreamTest, DoesUtilizeUlpfecForVp9WithNackEnabled) {
+ test::FunctionVideoEncoderFactory encoder_factory(
+ []() { return VP9Encoder::Create(); });
+ UlpfecObserver test(false, true, true, true, "VP9", &encoder_factory);
+ RunBaseTest(&test);
+}
+#endif // defined(RTC_ENABLE_VP9)
+
+TEST_F(VideoSendStreamTest, SupportsUlpfecWithMultithreadedH264) {
+ std::unique_ptr<TaskQueueFactory> task_queue_factory =
+ CreateDefaultTaskQueueFactory();
+ test::FunctionVideoEncoderFactory encoder_factory([&]() {
+ return std::make_unique<test::MultithreadedFakeH264Encoder>(
+ Clock::GetRealTimeClock(), task_queue_factory.get());
+ });
+ UlpfecObserver test(false, false, true, true, "H264", &encoder_factory);
+ RunBaseTest(&test);
+}
+
+// TODO(brandtr): Move these FlexFEC tests when we have created
+// FlexfecSendStream.
+class FlexfecObserver : public test::EndToEndTest {
+ public:
+ FlexfecObserver(bool header_extensions_enabled,
+ bool use_nack,
+ const std::string& codec,
+ VideoEncoderFactory* encoder_factory,
+ size_t num_video_streams)
+ : EndToEndTest(VideoSendStreamTest::kDefaultTimeout),
+ encoder_factory_(encoder_factory),
+ payload_name_(codec),
+ use_nack_(use_nack),
+ sent_media_(false),
+ sent_flexfec_(false),
+ header_extensions_enabled_(header_extensions_enabled),
+ num_video_streams_(num_video_streams) {
+ extensions_.Register<AbsoluteSendTime>(kAbsSendTimeExtensionId);
+ extensions_.Register<TransmissionOffset>(kTimestampOffsetExtensionId);
+ extensions_.Register<TransportSequenceNumber>(
+ kTransportSequenceNumberExtensionId);
+ }
+
+ size_t GetNumFlexfecStreams() const override { return 1; }
+ size_t GetNumVideoStreams() const override { return num_video_streams_; }
+
+ private:
+ Action OnSendRtp(const uint8_t* packet, size_t length) override {
+ RtpPacket rtp_packet(&extensions_);
+ EXPECT_TRUE(rtp_packet.Parse(packet, length));
+
+ if (rtp_packet.PayloadType() == VideoSendStreamTest::kFlexfecPayloadType) {
+ EXPECT_EQ(VideoSendStreamTest::kFlexfecSendSsrc, rtp_packet.Ssrc());
+ sent_flexfec_ = true;
+ } else {
+ EXPECT_EQ(VideoSendStreamTest::kFakeVideoSendPayloadType,
+ rtp_packet.PayloadType());
+ EXPECT_THAT(::testing::make_tuple(VideoSendStreamTest::kVideoSendSsrcs,
+ num_video_streams_),
+ ::testing::Contains(rtp_packet.Ssrc()));
+ sent_media_ = true;
+ }
+
+ if (header_extensions_enabled_) {
+ EXPECT_TRUE(rtp_packet.HasExtension<AbsoluteSendTime>());
+ EXPECT_TRUE(rtp_packet.HasExtension<TransmissionOffset>());
+ EXPECT_TRUE(rtp_packet.HasExtension<TransportSequenceNumber>());
+ }
+
+ if (sent_media_ && sent_flexfec_) {
+ observation_complete_.Set();
+ }
+
+ return SEND_PACKET;
+ }
+
+ BuiltInNetworkBehaviorConfig GetSendTransportConfig() const {
+ // At low RTT (< kLowRttNackMs) -> NACK only, no FEC.
+ // Therefore we need some network delay.
+ const int kNetworkDelayMs = 100;
+ BuiltInNetworkBehaviorConfig config;
+ config.loss_percent = 5;
+ config.queue_delay_ms = kNetworkDelayMs;
+ return config;
+ }
+
+ BuiltInNetworkBehaviorConfig GetReceiveTransportConfig() const {
+ // We need the RTT to be >200 ms to send FEC and the network delay for the
+ // send transport is 100 ms, so add 100 ms (but no loss) on the return link.
+ BuiltInNetworkBehaviorConfig config;
+ config.loss_percent = 0;
+ config.queue_delay_ms = 100;
+ return config;
+ }
+
+ void ModifyVideoConfigs(
+ VideoSendStream::Config* send_config,
+ std::vector<VideoReceiveStreamInterface::Config>* receive_configs,
+ VideoEncoderConfig* encoder_config) override {
+ if (use_nack_) {
+ send_config->rtp.nack.rtp_history_ms =
+ (*receive_configs)[0].rtp.nack.rtp_history_ms =
+ VideoSendStreamTest::kNackRtpHistoryMs;
+ }
+ send_config->encoder_settings.encoder_factory = encoder_factory_;
+ send_config->rtp.payload_name = payload_name_;
+ if (header_extensions_enabled_) {
+ send_config->rtp.extensions.push_back(
+ RtpExtension(RtpExtension::kAbsSendTimeUri, kAbsSendTimeExtensionId));
+ send_config->rtp.extensions.push_back(RtpExtension(
+ RtpExtension::kTimestampOffsetUri, kTimestampOffsetExtensionId));
+ } else {
+ send_config->rtp.extensions.clear();
+ }
+ (*receive_configs)[0].rtp.extensions = send_config->rtp.extensions;
+ encoder_config->codec_type = PayloadStringToCodecType(payload_name_);
+ }
+
+ void PerformTest() override {
+ EXPECT_TRUE(Wait())
+ << "Timed out waiting for FlexFEC and/or media packets.";
+ }
+
+ VideoEncoderFactory* encoder_factory_;
+ RtpHeaderExtensionMap extensions_;
+ const std::string payload_name_;
+ const bool use_nack_;
+ bool sent_media_;
+ bool sent_flexfec_;
+ const bool header_extensions_enabled_;
+ const size_t num_video_streams_;
+};
+
+TEST_F(VideoSendStreamTest, SupportsFlexfecVp8) {
+ test::FunctionVideoEncoderFactory encoder_factory(
+ []() { return VP8Encoder::Create(); });
+ FlexfecObserver test(false, false, "VP8", &encoder_factory, 1);
+ RunBaseTest(&test);
+}
+
+TEST_F(VideoSendStreamTest, SupportsFlexfecSimulcastVp8) {
+ test::FunctionVideoEncoderFactory encoder_factory(
+ []() { return VP8Encoder::Create(); });
+ FlexfecObserver test(false, false, "VP8", &encoder_factory, 2);
+ RunBaseTest(&test);
+}
+
+TEST_F(VideoSendStreamTest, SupportsFlexfecWithNackVp8) {
+ test::FunctionVideoEncoderFactory encoder_factory(
+ []() { return VP8Encoder::Create(); });
+ FlexfecObserver test(false, true, "VP8", &encoder_factory, 1);
+ RunBaseTest(&test);
+}
+
+TEST_F(VideoSendStreamTest, SupportsFlexfecWithRtpExtensionsVp8) {
+ test::FunctionVideoEncoderFactory encoder_factory(
+ []() { return VP8Encoder::Create(); });
+ FlexfecObserver test(true, false, "VP8", &encoder_factory, 1);
+ RunBaseTest(&test);
+}
+
+#if defined(RTC_ENABLE_VP9)
+TEST_F(VideoSendStreamTest, SupportsFlexfecVp9) {
+ test::FunctionVideoEncoderFactory encoder_factory(
+ []() { return VP9Encoder::Create(); });
+ FlexfecObserver test(false, false, "VP9", &encoder_factory, 1);
+ RunBaseTest(&test);
+}
+
+TEST_F(VideoSendStreamTest, SupportsFlexfecWithNackVp9) {
+ test::FunctionVideoEncoderFactory encoder_factory(
+ []() { return VP9Encoder::Create(); });
+ FlexfecObserver test(false, true, "VP9", &encoder_factory, 1);
+ RunBaseTest(&test);
+}
+#endif // defined(RTC_ENABLE_VP9)
+
+TEST_F(VideoSendStreamTest, SupportsFlexfecH264) {
+ test::FunctionVideoEncoderFactory encoder_factory([]() {
+ return std::make_unique<test::FakeH264Encoder>(Clock::GetRealTimeClock());
+ });
+ FlexfecObserver test(false, false, "H264", &encoder_factory, 1);
+ RunBaseTest(&test);
+}
+
+TEST_F(VideoSendStreamTest, SupportsFlexfecWithNackH264) {
+ test::FunctionVideoEncoderFactory encoder_factory([]() {
+ return std::make_unique<test::FakeH264Encoder>(Clock::GetRealTimeClock());
+ });
+ FlexfecObserver test(false, true, "H264", &encoder_factory, 1);
+ RunBaseTest(&test);
+}
+
+TEST_F(VideoSendStreamTest, SupportsFlexfecWithMultithreadedH264) {
+ std::unique_ptr<TaskQueueFactory> task_queue_factory =
+ CreateDefaultTaskQueueFactory();
+ test::FunctionVideoEncoderFactory encoder_factory([&]() {
+ return std::make_unique<test::MultithreadedFakeH264Encoder>(
+ Clock::GetRealTimeClock(), task_queue_factory.get());
+ });
+
+ FlexfecObserver test(false, false, "H264", &encoder_factory, 1);
+ RunBaseTest(&test);
+}
+
+void VideoSendStreamTest::TestNackRetransmission(
+ uint32_t retransmit_ssrc,
+ uint8_t retransmit_payload_type) {
+ class NackObserver : public test::SendTest {
+ public:
+ explicit NackObserver(uint32_t retransmit_ssrc,
+ uint8_t retransmit_payload_type)
+ : SendTest(kDefaultTimeout),
+ send_count_(0),
+ retransmit_count_(0),
+ retransmit_ssrc_(retransmit_ssrc),
+ retransmit_payload_type_(retransmit_payload_type) {}
+
+ private:
+ Action OnSendRtp(const uint8_t* packet, size_t length) override {
+ RtpPacket rtp_packet;
+ EXPECT_TRUE(rtp_packet.Parse(packet, length));
+
+ // NACK packets two times at some arbitrary points.
+ const int kNackedPacketsAtOnceCount = 3;
+ const int kRetransmitTarget = kNackedPacketsAtOnceCount * 2;
+
+ // Skip padding packets because they will never be retransmitted.
+ if (rtp_packet.payload_size() == 0) {
+ return SEND_PACKET;
+ }
+
+ ++send_count_;
+
+ // NACK packets at arbitrary points.
+ if (send_count_ % 25 == 0) {
+ RTCPSender::Configuration config;
+ config.clock = Clock::GetRealTimeClock();
+ config.outgoing_transport = transport_adapter_.get();
+ config.rtcp_report_interval = TimeDelta::Millis(kRtcpIntervalMs);
+ config.local_media_ssrc = kReceiverLocalVideoSsrc;
+ RTCPSender rtcp_sender(config);
+
+ rtcp_sender.SetRTCPStatus(RtcpMode::kReducedSize);
+ rtcp_sender.SetRemoteSSRC(kVideoSendSsrcs[0]);
+
+ RTCPSender::FeedbackState feedback_state;
+ uint16_t nack_sequence_numbers[kNackedPacketsAtOnceCount];
+ int nack_count = 0;
+ for (uint16_t sequence_number :
+ sequence_numbers_pending_retransmission_) {
+ if (nack_count < kNackedPacketsAtOnceCount) {
+ nack_sequence_numbers[nack_count++] = sequence_number;
+ } else {
+ break;
+ }
+ }
+
+ EXPECT_EQ(0, rtcp_sender.SendRTCP(feedback_state, kRtcpNack, nack_count,
+ nack_sequence_numbers));
+ }
+
+ uint16_t sequence_number = rtp_packet.SequenceNumber();
+ if (rtp_packet.Ssrc() == retransmit_ssrc_ &&
+ retransmit_ssrc_ != kVideoSendSsrcs[0]) {
+ // Not kVideoSendSsrcs[0], assume correct RTX packet. Extract sequence
+ // number.
+ const uint8_t* rtx_header = rtp_packet.payload().data();
+ sequence_number = (rtx_header[0] << 8) + rtx_header[1];
+ }
+
+ auto it = sequence_numbers_pending_retransmission_.find(sequence_number);
+ if (it == sequence_numbers_pending_retransmission_.end()) {
+ // Not currently pending retransmission. Add it to retransmission queue
+ // if media and limit not reached.
+ if (rtp_packet.Ssrc() == kVideoSendSsrcs[0] &&
+ rtp_packet.payload_size() > 0 &&
+ retransmit_count_ +
+ sequence_numbers_pending_retransmission_.size() <
+ kRetransmitTarget) {
+ sequence_numbers_pending_retransmission_.insert(sequence_number);
+ return DROP_PACKET;
+ }
+ } else {
+ // Packet is a retransmission, remove it from queue and check if done.
+ sequence_numbers_pending_retransmission_.erase(it);
+ if (++retransmit_count_ == kRetransmitTarget) {
+ EXPECT_EQ(retransmit_ssrc_, rtp_packet.Ssrc());
+ EXPECT_EQ(retransmit_payload_type_, rtp_packet.PayloadType());
+ observation_complete_.Set();
+ }
+ }
+
+ return SEND_PACKET;
+ }
+
+ void ModifyVideoConfigs(
+ VideoSendStream::Config* send_config,
+ std::vector<VideoReceiveStreamInterface::Config>* receive_configs,
+ VideoEncoderConfig* encoder_config) override {
+ transport_adapter_.reset(
+ new internal::TransportAdapter(send_config->send_transport));
+ transport_adapter_->Enable();
+ send_config->rtp.nack.rtp_history_ms = kNackRtpHistoryMs;
+ send_config->rtp.rtx.payload_type = retransmit_payload_type_;
+ if (retransmit_ssrc_ != kVideoSendSsrcs[0])
+ send_config->rtp.rtx.ssrcs.push_back(retransmit_ssrc_);
+ }
+
+ void PerformTest() override {
+ EXPECT_TRUE(Wait()) << "Timed out while waiting for NACK retransmission.";
+ }
+
+ std::unique_ptr<internal::TransportAdapter> transport_adapter_;
+ int send_count_;
+ int retransmit_count_;
+ const uint32_t retransmit_ssrc_;
+ const uint8_t retransmit_payload_type_;
+ std::set<uint16_t> sequence_numbers_pending_retransmission_;
+ } test(retransmit_ssrc, retransmit_payload_type);
+
+ RunBaseTest(&test);
+}
+
+TEST_F(VideoSendStreamTest, RetransmitsNack) {
+ // Normal NACKs should use the send SSRC.
+ TestNackRetransmission(kVideoSendSsrcs[0], kFakeVideoSendPayloadType);
+}
+
+TEST_F(VideoSendStreamTest, RetransmitsNackOverRtx) {
+ // NACKs over RTX should use a separate SSRC.
+ TestNackRetransmission(kSendRtxSsrcs[0], kSendRtxPayloadType);
+}
+
+void VideoSendStreamTest::TestPacketFragmentationSize(VideoFormat format,
+ bool with_fec) {
+ // Use a fake encoder to output a frame of every size in the range [90, 290],
+ // for each size making sure that the exact number of payload bytes received
+ // is correct and that packets are fragmented to respect max packet size.
+ static const size_t kMaxPacketSize = 128;
+ static const size_t start = 90;
+ static const size_t stop = 290;
+
+ // Observer that verifies that the expected number of packets and bytes
+ // arrive for each frame size, from start_size to stop_size.
+ class FrameFragmentationTest : public test::SendTest {
+ public:
+ FrameFragmentationTest(size_t max_packet_size,
+ size_t start_size,
+ size_t stop_size,
+ bool test_generic_packetization,
+ bool use_fec)
+ : SendTest(kLongTimeout),
+ encoder_(stop),
+ encoder_factory_(&encoder_),
+ max_packet_size_(max_packet_size),
+ stop_size_(stop_size),
+ test_generic_packetization_(test_generic_packetization),
+ use_fec_(use_fec),
+ packet_count_(0),
+ packets_lost_(0),
+ last_packet_count_(0),
+ last_packets_lost_(0),
+ accumulated_size_(0),
+ accumulated_payload_(0),
+ fec_packet_received_(false),
+ current_size_rtp_(start_size),
+ current_size_frame_(static_cast<int>(start_size)) {
+ // Fragmentation required, this test doesn't make sense without it.
+ encoder_.SetFrameSize(start_size);
+ RTC_DCHECK_GT(stop_size, max_packet_size);
+ if (!test_generic_packetization_)
+ encoder_.SetCodecType(kVideoCodecVP8);
+ }
+
+ private:
+ Action OnSendRtp(const uint8_t* packet, size_t size) override {
+ size_t length = size;
+ RtpPacket rtp_packet;
+ EXPECT_TRUE(rtp_packet.Parse(packet, length));
+
+ EXPECT_LE(length, max_packet_size_);
+
+ if (use_fec_ && rtp_packet.payload_size() > 0) {
+ uint8_t payload_type = rtp_packet.payload()[0];
+ bool is_fec = rtp_packet.PayloadType() == kRedPayloadType &&
+ payload_type == kUlpfecPayloadType;
+ if (is_fec) {
+ fec_packet_received_ = true;
+ return SEND_PACKET;
+ }
+ }
+
+ accumulated_size_ += length;
+
+ if (use_fec_)
+ TriggerLossReport(rtp_packet);
+
+ if (test_generic_packetization_) {
+ size_t overhead = rtp_packet.headers_size() + rtp_packet.padding_size();
+ // Only remove payload header and RED header if the packet actually
+ // contains payload.
+ if (length > overhead) {
+ overhead += (1 /* Generic header */);
+ if (use_fec_)
+ overhead += 1; // RED for FEC header.
+ }
+ EXPECT_GE(length, overhead);
+ accumulated_payload_ += length - overhead;
+ }
+
+ // Marker bit set indicates last packet of a frame.
+ if (rtp_packet.Marker()) {
+ if (use_fec_ && accumulated_payload_ == current_size_rtp_ - 1) {
+ // With FEC enabled, frame size is incremented asynchronously, so
+ // "old" frames one byte too small may arrive. Accept, but don't
+ // increase expected frame size.
+ accumulated_size_ = 0;
+ accumulated_payload_ = 0;
+ return SEND_PACKET;
+ }
+
+ EXPECT_GE(accumulated_size_, current_size_rtp_);
+ if (test_generic_packetization_) {
+ EXPECT_EQ(current_size_rtp_, accumulated_payload_);
+ }
+
+ // Last packet of frame; reset counters.
+ accumulated_size_ = 0;
+ accumulated_payload_ = 0;
+ if (current_size_rtp_ == stop_size_) {
+ // Done! (Don't increase size again, might arrive more @ stop_size).
+ observation_complete_.Set();
+ } else {
+ // Increase next expected frame size. If testing with FEC, make sure
+ // a FEC packet has been received for this frame size before
+ // proceeding, to make sure that redundancy packets don't exceed
+ // size limit.
+ if (!use_fec_) {
+ ++current_size_rtp_;
+ } else if (fec_packet_received_) {
+ fec_packet_received_ = false;
+ ++current_size_rtp_;
+
+ MutexLock lock(&mutex_);
+ ++current_size_frame_;
+ }
+ }
+ }
+
+ return SEND_PACKET;
+ }
+
+ void TriggerLossReport(const RtpPacket& rtp_packet) {
+ // Send lossy receive reports to trigger FEC enabling.
+ const int kLossPercent = 5;
+ if (++packet_count_ % (100 / kLossPercent) == 0) {
+ packets_lost_++;
+ int loss_delta = packets_lost_ - last_packets_lost_;
+ int packets_delta = packet_count_ - last_packet_count_;
+ last_packet_count_ = packet_count_;
+ last_packets_lost_ = packets_lost_;
+ uint8_t loss_ratio =
+ static_cast<uint8_t>(loss_delta * 255 / packets_delta);
+ FakeReceiveStatistics lossy_receive_stats(
+ kVideoSendSsrcs[0], rtp_packet.SequenceNumber(),
+ packets_lost_, // Cumulative lost.
+ loss_ratio); // Loss percent.
+ RTCPSender::Configuration config;
+ config.clock = Clock::GetRealTimeClock();
+ config.receive_statistics = &lossy_receive_stats;
+ config.outgoing_transport = transport_adapter_.get();
+ config.rtcp_report_interval = TimeDelta::Millis(kRtcpIntervalMs);
+ config.local_media_ssrc = kVideoSendSsrcs[0];
+ RTCPSender rtcp_sender(config);
+
+ rtcp_sender.SetRTCPStatus(RtcpMode::kReducedSize);
+ rtcp_sender.SetRemoteSSRC(kVideoSendSsrcs[0]);
+
+ RTCPSender::FeedbackState feedback_state;
+
+ EXPECT_EQ(0, rtcp_sender.SendRTCP(feedback_state, kRtcpRr));
+ }
+ }
+
+ void UpdateConfiguration() {
+ MutexLock lock(&mutex_);
+ // Increase frame size for next encoded frame, in the context of the
+ // encoder thread.
+ if (!use_fec_ && current_size_frame_ < static_cast<int32_t>(stop_size_)) {
+ ++current_size_frame_;
+ }
+ encoder_.SetFrameSize(static_cast<size_t>(current_size_frame_));
+ }
+ void ModifySenderBitrateConfig(
+ BitrateConstraints* bitrate_config) override {
+ const int kMinBitrateBps = 300000;
+ bitrate_config->min_bitrate_bps = kMinBitrateBps;
+ }
+
+ void ModifyVideoConfigs(
+ VideoSendStream::Config* send_config,
+ std::vector<VideoReceiveStreamInterface::Config>* receive_configs,
+ VideoEncoderConfig* encoder_config) override {
+ transport_adapter_.reset(
+ new internal::TransportAdapter(send_config->send_transport));
+ transport_adapter_->Enable();
+ if (use_fec_) {
+ send_config->rtp.ulpfec.red_payload_type = kRedPayloadType;
+ send_config->rtp.ulpfec.ulpfec_payload_type = kUlpfecPayloadType;
+ }
+
+ if (!test_generic_packetization_)
+ send_config->rtp.payload_name = "VP8";
+
+ send_config->encoder_settings.encoder_factory = &encoder_factory_;
+ send_config->rtp.max_packet_size = kMaxPacketSize;
+ encoder_.RegisterPostEncodeCallback([this]() { UpdateConfiguration(); });
+
+ // Make sure there is at least one extension header, to make the RTP
+ // header larger than the base length of 12 bytes.
+ EXPECT_FALSE(send_config->rtp.extensions.empty());
+
+ // Setup screen content disables frame dropping which makes this easier.
+ EXPECT_EQ(1u, encoder_config->simulcast_layers.size());
+ encoder_config->simulcast_layers[0].num_temporal_layers = 2;
+ encoder_config->content_type = VideoEncoderConfig::ContentType::kScreen;
+ }
+
+ void PerformTest() override {
+ EXPECT_TRUE(Wait()) << "Timed out while observing incoming RTP packets.";
+ }
+
+ std::unique_ptr<internal::TransportAdapter> transport_adapter_;
+ test::ConfigurableFrameSizeEncoder encoder_;
+ test::VideoEncoderProxyFactory encoder_factory_;
+
+ const size_t max_packet_size_;
+ const size_t stop_size_;
+ const bool test_generic_packetization_;
+ const bool use_fec_;
+
+ uint32_t packet_count_;
+ uint32_t packets_lost_;
+ uint32_t last_packet_count_;
+ uint32_t last_packets_lost_;
+ size_t accumulated_size_;
+ size_t accumulated_payload_;
+ bool fec_packet_received_;
+
+ size_t current_size_rtp_;
+ Mutex mutex_;
+ int current_size_frame_ RTC_GUARDED_BY(mutex_);
+ };
+
+ // Don't auto increment if FEC is used; continue sending frame size until
+ // a FEC packet has been received.
+ FrameFragmentationTest test(kMaxPacketSize, start, stop, format == kGeneric,
+ with_fec);
+
+ RunBaseTest(&test);
+}
+
+// TODO(sprang): Is there any way of speeding up these tests?
+TEST_F(VideoSendStreamTest, FragmentsGenericAccordingToMaxPacketSize) {
+ TestPacketFragmentationSize(kGeneric, false);
+}
+
+TEST_F(VideoSendStreamTest, FragmentsGenericAccordingToMaxPacketSizeWithFec) {
+ TestPacketFragmentationSize(kGeneric, true);
+}
+
+TEST_F(VideoSendStreamTest, FragmentsVp8AccordingToMaxPacketSize) {
+ TestPacketFragmentationSize(kVP8, false);
+}
+
+TEST_F(VideoSendStreamTest, FragmentsVp8AccordingToMaxPacketSizeWithFec) {
+ TestPacketFragmentationSize(kVP8, true);
+}
+
+// This test that padding stops being send after a while if the Camera stops
+// producing video frames and that padding resumes if the camera restarts.
+TEST_F(VideoSendStreamTest, NoPaddingWhenVideoIsMuted) {
+ class NoPaddingWhenVideoIsMuted : public test::SendTest {
+ public:
+ NoPaddingWhenVideoIsMuted()
+ : SendTest(kDefaultTimeout),
+ clock_(Clock::GetRealTimeClock()),
+ capturer_(nullptr) {}
+
+ private:
+ Action OnSendRtp(const uint8_t* packet, size_t length) override {
+ MutexLock lock(&mutex_);
+ last_packet_time_ms_ = clock_->TimeInMilliseconds();
+
+ RtpPacket rtp_packet;
+ rtp_packet.Parse(packet, length);
+ const bool only_padding = rtp_packet.payload_size() == 0;
+
+ if (test_state_ == kBeforeStopCapture) {
+ // Packets are flowing, stop camera.
+ capturer_->Stop();
+ test_state_ = kWaitingForPadding;
+ } else if (test_state_ == kWaitingForPadding && only_padding) {
+ // We're still getting padding, after stopping camera.
+ test_state_ = kWaitingForNoPackets;
+ } else if (test_state_ == kWaitingForMediaAfterCameraRestart &&
+ !only_padding) {
+ // Media packets are flowing again, stop camera a second time.
+ capturer_->Stop();
+ test_state_ = kWaitingForPaddingAfterCameraStopsAgain;
+ } else if (test_state_ == kWaitingForPaddingAfterCameraStopsAgain &&
+ only_padding) {
+ // Padding is still flowing, test ok.
+ observation_complete_.Set();
+ }
+ return SEND_PACKET;
+ }
+
+ Action OnSendRtcp(const uint8_t* packet, size_t length) override {
+ MutexLock lock(&mutex_);
+ const int kNoPacketsThresholdMs = 2000;
+ if (test_state_ == kWaitingForNoPackets &&
+ (last_packet_time_ms_ &&
+ clock_->TimeInMilliseconds() - last_packet_time_ms_.value() >
+ kNoPacketsThresholdMs)) {
+ // No packets seen for `kNoPacketsThresholdMs`, restart camera.
+ capturer_->Start();
+ test_state_ = kWaitingForMediaAfterCameraRestart;
+ }
+ return SEND_PACKET;
+ }
+
+ void ModifyVideoConfigs(
+ VideoSendStream::Config* send_config,
+ std::vector<VideoReceiveStreamInterface::Config>* receive_configs,
+ VideoEncoderConfig* encoder_config) override {
+ // Make sure padding is sent if encoder is not producing media.
+ encoder_config->min_transmit_bitrate_bps = 50000;
+ }
+
+ void OnFrameGeneratorCapturerCreated(
+ test::FrameGeneratorCapturer* frame_generator_capturer) override {
+ MutexLock lock(&mutex_);
+ capturer_ = frame_generator_capturer;
+ }
+
+ void PerformTest() override {
+ EXPECT_TRUE(Wait())
+ << "Timed out while waiting for RTP packets to stop being sent.";
+ }
+
+ enum TestState {
+ kBeforeStopCapture,
+ kWaitingForPadding,
+ kWaitingForNoPackets,
+ kWaitingForMediaAfterCameraRestart,
+ kWaitingForPaddingAfterCameraStopsAgain
+ };
+
+ TestState test_state_ = kBeforeStopCapture;
+ Clock* const clock_;
+ Mutex mutex_;
+ absl::optional<int64_t> last_packet_time_ms_ RTC_GUARDED_BY(mutex_);
+ test::FrameGeneratorCapturer* capturer_ RTC_GUARDED_BY(mutex_);
+ } test;
+
+ RunBaseTest(&test);
+}
+
+TEST_F(VideoSendStreamTest, PaddingIsPrimarilyRetransmissions) {
+ const int kCapacityKbps = 10000; // 10 Mbps
+ class PaddingIsPrimarilyRetransmissions : public test::EndToEndTest {
+ public:
+ PaddingIsPrimarilyRetransmissions()
+ : EndToEndTest(kDefaultTimeout),
+ clock_(Clock::GetRealTimeClock()),
+ padding_length_(0),
+ total_length_(0),
+ call_(nullptr) {}
+
+ private:
+ void OnCallsCreated(Call* sender_call, Call* receiver_call) override {
+ call_ = sender_call;
+ }
+
+ Action OnSendRtp(const uint8_t* packet, size_t length) override {
+ MutexLock lock(&mutex_);
+
+ RtpPacket rtp_packet;
+ rtp_packet.Parse(packet, length);
+ padding_length_ += rtp_packet.padding_size();
+ total_length_ += length;
+ return SEND_PACKET;
+ }
+
+ BuiltInNetworkBehaviorConfig GetSendTransportConfig() const override {
+ const int kNetworkDelayMs = 50;
+ BuiltInNetworkBehaviorConfig config;
+ config.loss_percent = 10;
+ config.link_capacity_kbps = kCapacityKbps;
+ config.queue_delay_ms = kNetworkDelayMs;
+ return config;
+ }
+
+ void ModifyVideoConfigs(
+ VideoSendStream::Config* send_config,
+ std::vector<VideoReceiveStreamInterface::Config>* receive_configs,
+ VideoEncoderConfig* encoder_config) override {
+ // Turn on RTX.
+ send_config->rtp.rtx.payload_type = kFakeVideoSendPayloadType;
+ send_config->rtp.rtx.ssrcs.push_back(kSendRtxSsrcs[0]);
+ }
+
+ void PerformTest() override {
+ // TODO(isheriff): Some platforms do not ramp up as expected to full
+ // capacity due to packet scheduling delays. Fix that before getting
+ // rid of this.
+ SleepMs(5000);
+ {
+ MutexLock lock(&mutex_);
+ // Expect padding to be a small percentage of total bytes sent.
+ EXPECT_LT(padding_length_, .1 * total_length_);
+ }
+ }
+
+ Mutex mutex_;
+ Clock* const clock_;
+ size_t padding_length_ RTC_GUARDED_BY(mutex_);
+ size_t total_length_ RTC_GUARDED_BY(mutex_);
+ Call* call_;
+ } test;
+
+ RunBaseTest(&test);
+}
+
+// This test first observes "high" bitrate use at which point it sends a REMB to
+// indicate that it should be lowered significantly. The test then observes that
+// the bitrate observed is sinking well below the min-transmit-bitrate threshold
+// to verify that the min-transmit bitrate respects incoming REMB.
+//
+// Note that the test starts at "high" bitrate and does not ramp up to "higher"
+// bitrate since no receiver block or remb is sent in the initial phase.
+TEST_F(VideoSendStreamTest, MinTransmitBitrateRespectsRemb) {
+ static const int kMinTransmitBitrateBps = 400000;
+ static const int kHighBitrateBps = 150000;
+ static const int kRembBitrateBps = 80000;
+ static const int kRembRespectedBitrateBps = 100000;
+ class BitrateObserver : public test::SendTest {
+ public:
+ explicit BitrateObserver(TaskQueueBase* task_queue)
+ : SendTest(kDefaultTimeout),
+ task_queue_(task_queue),
+ retranmission_rate_limiter_(Clock::GetRealTimeClock(), 1000),
+ stream_(nullptr),
+ bitrate_capped_(false),
+ task_safety_flag_(PendingTaskSafetyFlag::CreateDetached()) {}
+
+ private:
+ Action OnSendRtp(const uint8_t* packet, size_t length) override {
+ if (IsRtcpPacket(rtc::MakeArrayView(packet, length)))
+ return DROP_PACKET;
+
+ RtpPacket rtp_packet;
+ RTC_CHECK(rtp_packet.Parse(packet, length));
+ const uint32_t ssrc = rtp_packet.Ssrc();
+ RTC_DCHECK(stream_);
+
+ task_queue_->PostTask(SafeTask(task_safety_flag_, [this, ssrc]() {
+ VideoSendStream::Stats stats = stream_->GetStats();
+ if (!stats.substreams.empty()) {
+ EXPECT_EQ(1u, stats.substreams.size());
+ int total_bitrate_bps =
+ stats.substreams.begin()->second.total_bitrate_bps;
+ test::GetGlobalMetricsLogger()->LogSingleValueMetric(
+ "bitrate_stats_min_transmit_bitrate_low_remb", "bitrate_bps",
+ static_cast<size_t>(total_bitrate_bps) / 1000.0,
+ test::Unit::kKilobitsPerSecond,
+ test::ImprovementDirection::kNeitherIsBetter);
+ if (total_bitrate_bps > kHighBitrateBps) {
+ rtp_rtcp_->SetRemb(kRembBitrateBps, {ssrc});
+ bitrate_capped_ = true;
+ } else if (bitrate_capped_ &&
+ total_bitrate_bps < kRembRespectedBitrateBps) {
+ observation_complete_.Set();
+ }
+ }
+ }));
+
+ // Packets don't have to be delivered since the test is the receiver.
+ return DROP_PACKET;
+ }
+
+ void OnVideoStreamsCreated(VideoSendStream* send_stream,
+ const std::vector<VideoReceiveStreamInterface*>&
+ receive_streams) override {
+ stream_ = send_stream;
+ RtpRtcpInterface::Configuration config;
+ config.clock = Clock::GetRealTimeClock();
+ config.outgoing_transport = feedback_transport_.get();
+ config.retransmission_rate_limiter = &retranmission_rate_limiter_;
+ rtp_rtcp_ = ModuleRtpRtcpImpl2::Create(config);
+ rtp_rtcp_->SetRTCPStatus(RtcpMode::kReducedSize);
+ }
+
+ void ModifyVideoConfigs(
+ VideoSendStream::Config* send_config,
+ std::vector<VideoReceiveStreamInterface::Config>* receive_configs,
+ VideoEncoderConfig* encoder_config) override {
+ feedback_transport_.reset(
+ new internal::TransportAdapter(send_config->send_transport));
+ feedback_transport_->Enable();
+ encoder_config->min_transmit_bitrate_bps = kMinTransmitBitrateBps;
+ }
+
+ void OnStreamsStopped() override {
+ task_safety_flag_->SetNotAlive();
+ rtp_rtcp_.reset();
+ }
+
+ void PerformTest() override {
+ EXPECT_TRUE(Wait())
+ << "Timeout while waiting for low bitrate stats after REMB.";
+ }
+
+ TaskQueueBase* const task_queue_;
+ std::unique_ptr<ModuleRtpRtcpImpl2> rtp_rtcp_;
+ std::unique_ptr<internal::TransportAdapter> feedback_transport_;
+ RateLimiter retranmission_rate_limiter_;
+ VideoSendStream* stream_;
+ bool bitrate_capped_;
+ rtc::scoped_refptr<PendingTaskSafetyFlag> task_safety_flag_;
+ } test(task_queue());
+
+ RunBaseTest(&test);
+}
+
+TEST_F(VideoSendStreamTest, ChangingNetworkRoute) {
+ static const int kStartBitrateBps = 300000;
+ static const int kNewMaxBitrateBps = 1234567;
+ static const uint8_t kExtensionId = kTransportSequenceNumberExtensionId;
+ class ChangingNetworkRouteTest : public test::EndToEndTest {
+ public:
+ explicit ChangingNetworkRouteTest(TaskQueueBase* task_queue)
+ : EndToEndTest(test::CallTest::kDefaultTimeout),
+ task_queue_(task_queue),
+ call_(nullptr) {
+ module_process_thread_.Detach();
+ task_queue_thread_.Detach();
+ extensions_.Register<TransportSequenceNumber>(kExtensionId);
+ }
+
+ ~ChangingNetworkRouteTest() {
+ // Block until all already posted tasks run to avoid 'use after free'
+ // when such task accesses `this`.
+ SendTask(task_queue_, [] {});
+ }
+
+ void OnCallsCreated(Call* sender_call, Call* receiver_call) override {
+ RTC_DCHECK_RUN_ON(&task_queue_thread_);
+ RTC_DCHECK(!call_);
+ call_ = sender_call;
+ }
+
+ void ModifyVideoConfigs(
+ VideoSendStream::Config* send_config,
+ std::vector<VideoReceiveStreamInterface::Config>* receive_configs,
+ VideoEncoderConfig* encoder_config) override {
+ RTC_DCHECK_RUN_ON(&task_queue_thread_);
+ send_config->rtp.extensions.clear();
+ send_config->rtp.extensions.push_back(RtpExtension(
+ RtpExtension::kTransportSequenceNumberUri, kExtensionId));
+ (*receive_configs)[0].rtp.extensions = send_config->rtp.extensions;
+ }
+
+ void ModifyAudioConfigs(AudioSendStream::Config* send_config,
+ std::vector<AudioReceiveStreamInterface::Config>*
+ receive_configs) override {
+ RTC_DCHECK_RUN_ON(&task_queue_thread_);
+ send_config->rtp.extensions.clear();
+ send_config->rtp.extensions.push_back(RtpExtension(
+ RtpExtension::kTransportSequenceNumberUri, kExtensionId));
+ (*receive_configs)[0].rtp.extensions.clear();
+ (*receive_configs)[0].rtp.extensions = send_config->rtp.extensions;
+ }
+
+ Action OnSendRtp(const uint8_t* packet, size_t length) override {
+ RTC_DCHECK_RUN_ON(&module_process_thread_);
+ task_queue_->PostTask([this]() {
+ RTC_DCHECK_RUN_ON(&task_queue_thread_);
+ if (!call_)
+ return;
+ Call::Stats stats = call_->GetStats();
+ if (stats.send_bandwidth_bps > kStartBitrateBps)
+ observation_complete_.Set();
+ });
+ return SEND_PACKET;
+ }
+
+ void OnStreamsStopped() override {
+ RTC_DCHECK_RUN_ON(&task_queue_thread_);
+ call_ = nullptr;
+ }
+
+ void PerformTest() override {
+ rtc::NetworkRoute new_route;
+ new_route.connected = true;
+ new_route.local = rtc::RouteEndpoint::CreateWithNetworkId(10);
+ new_route.remote = rtc::RouteEndpoint::CreateWithNetworkId(20);
+ BitrateConstraints bitrate_config;
+
+ SendTask(task_queue_,
+ [this, &new_route, &bitrate_config]() {
+ RTC_DCHECK_RUN_ON(&task_queue_thread_);
+ call_->GetTransportControllerSend()->OnNetworkRouteChanged(
+ "transport", new_route);
+ bitrate_config.start_bitrate_bps = kStartBitrateBps;
+ call_->GetTransportControllerSend()->SetSdpBitrateParameters(
+ bitrate_config);
+ });
+
+ EXPECT_TRUE(Wait())
+ << "Timed out while waiting for start bitrate to be exceeded.";
+
+ SendTask(
+ task_queue_, [this, &new_route, &bitrate_config]() {
+ RTC_DCHECK_RUN_ON(&task_queue_thread_);
+ bitrate_config.start_bitrate_bps = -1;
+ bitrate_config.max_bitrate_bps = kNewMaxBitrateBps;
+ call_->GetTransportControllerSend()->SetSdpBitrateParameters(
+ bitrate_config);
+ // TODO(holmer): We should set the last sent packet id here and
+ // verify that we correctly ignore any packet loss reported prior to
+ // that id.
+ new_route.local = rtc::RouteEndpoint::CreateWithNetworkId(
+ new_route.local.network_id() + 1);
+ call_->GetTransportControllerSend()->OnNetworkRouteChanged(
+ "transport", new_route);
+ EXPECT_GE(call_->GetStats().send_bandwidth_bps, kStartBitrateBps);
+ });
+ }
+
+ private:
+ webrtc::SequenceChecker module_process_thread_;
+ webrtc::SequenceChecker task_queue_thread_;
+ TaskQueueBase* const task_queue_;
+ RtpHeaderExtensionMap extensions_;
+ Call* call_ RTC_GUARDED_BY(task_queue_thread_);
+ } test(task_queue());
+
+ RunBaseTest(&test);
+}
+
+// Test that if specified, relay cap is lifted on transition to direct
+// connection.
+// TODO(https://bugs.webrtc.org/13353): Test disabled due to flakiness.
+TEST_F(VideoSendStreamTest, DISABLED_RelayToDirectRoute) {
+ static const int kStartBitrateBps = 300000;
+ static const int kRelayBandwidthCapBps = 800000;
+ static const int kMinPacketsToSend = 100;
+ webrtc::test::ScopedKeyValueConfig field_trials(
+ field_trials_, "WebRTC-Bwe-NetworkRouteConstraints/relay_cap:" +
+ std::to_string(kRelayBandwidthCapBps) + "bps/");
+
+ class RelayToDirectRouteTest : public test::EndToEndTest {
+ public:
+ explicit RelayToDirectRouteTest(TaskQueueBase* task_queue)
+ : EndToEndTest(test::CallTest::kDefaultTimeout),
+ task_queue_(task_queue),
+ call_(nullptr),
+ packets_sent_(0),
+ relayed_phase_(true) {
+ module_process_thread_.Detach();
+ task_queue_thread_.Detach();
+ }
+
+ ~RelayToDirectRouteTest() {
+ // Block until all already posted tasks run to avoid 'use after free'
+ // when such task accesses `this`.
+ SendTask(task_queue_, [] {});
+ }
+
+ void OnCallsCreated(Call* sender_call, Call* receiver_call) override {
+ RTC_DCHECK_RUN_ON(&task_queue_thread_);
+ RTC_DCHECK(!call_);
+ call_ = sender_call;
+ }
+
+ Action OnSendRtp(const uint8_t* packet, size_t length) override {
+ RTC_DCHECK_RUN_ON(&module_process_thread_);
+ task_queue_->PostTask([this]() {
+ RTC_DCHECK_RUN_ON(&task_queue_thread_);
+ if (!call_)
+ return;
+ bool had_time_to_exceed_cap_in_relayed_phase =
+ relayed_phase_ && ++packets_sent_ > kMinPacketsToSend;
+ bool did_exceed_cap =
+ call_->GetStats().send_bandwidth_bps > kRelayBandwidthCapBps;
+ if (did_exceed_cap || had_time_to_exceed_cap_in_relayed_phase)
+ observation_complete_.Set();
+ });
+ return SEND_PACKET;
+ }
+
+ void OnStreamsStopped() override {
+ RTC_DCHECK_RUN_ON(&task_queue_thread_);
+ call_ = nullptr;
+ }
+
+ void PerformTest() override {
+ rtc::NetworkRoute route;
+ route.connected = true;
+ route.local = rtc::RouteEndpoint::CreateWithNetworkId(10);
+ route.remote = rtc::RouteEndpoint::CreateWithNetworkId(20);
+
+ SendTask(task_queue_, [this, &route]() {
+ RTC_DCHECK_RUN_ON(&task_queue_thread_);
+ relayed_phase_ = true;
+ route.remote = route.remote.CreateWithTurn(true);
+ call_->GetTransportControllerSend()->OnNetworkRouteChanged("transport",
+ route);
+ BitrateConstraints bitrate_config;
+ bitrate_config.start_bitrate_bps = kStartBitrateBps;
+
+ call_->GetTransportControllerSend()->SetSdpBitrateParameters(
+ bitrate_config);
+ });
+
+ EXPECT_TRUE(Wait())
+ << "Timeout waiting for sufficient packets sent count.";
+
+ SendTask(task_queue_, [this, &route]() {
+ RTC_DCHECK_RUN_ON(&task_queue_thread_);
+ EXPECT_LE(call_->GetStats().send_bandwidth_bps, kRelayBandwidthCapBps);
+
+ route.remote = route.remote.CreateWithTurn(false);
+ call_->GetTransportControllerSend()->OnNetworkRouteChanged("transport",
+ route);
+ relayed_phase_ = false;
+ observation_complete_.Reset();
+ });
+
+ EXPECT_TRUE(Wait())
+ << "Timeout while waiting for bandwidth to outgrow relay cap.";
+ }
+
+ private:
+ webrtc::SequenceChecker module_process_thread_;
+ webrtc::SequenceChecker task_queue_thread_;
+ TaskQueueBase* const task_queue_;
+ Call* call_ RTC_GUARDED_BY(task_queue_thread_);
+ int packets_sent_ RTC_GUARDED_BY(task_queue_thread_);
+ bool relayed_phase_ RTC_GUARDED_BY(task_queue_thread_);
+ } test(task_queue());
+
+ RunBaseTest(&test);
+}
+
+TEST_F(VideoSendStreamTest, ChangingTransportOverhead) {
+ class ChangingTransportOverheadTest : public test::EndToEndTest {
+ public:
+ explicit ChangingTransportOverheadTest(TaskQueueBase* task_queue)
+ : EndToEndTest(test::CallTest::kDefaultTimeout),
+ task_queue_(task_queue),
+ call_(nullptr),
+ packets_sent_(0),
+ transport_overhead_(0) {}
+
+ void OnCallsCreated(Call* sender_call, Call* receiver_call) override {
+ call_ = sender_call;
+ }
+
+ Action OnSendRtp(const uint8_t* packet, size_t length) override {
+ EXPECT_LE(length, kMaxRtpPacketSize);
+ MutexLock lock(&lock_);
+ if (++packets_sent_ < 100)
+ return SEND_PACKET;
+ observation_complete_.Set();
+ return SEND_PACKET;
+ }
+
+ void ModifyVideoConfigs(
+ VideoSendStream::Config* send_config,
+ std::vector<VideoReceiveStreamInterface::Config>* receive_configs,
+ VideoEncoderConfig* encoder_config) override {
+ send_config->rtp.max_packet_size = kMaxRtpPacketSize;
+ }
+
+ void PerformTest() override {
+ SendTask(task_queue_, [this]() {
+ transport_overhead_ = 100;
+ call_->GetTransportControllerSend()->OnTransportOverheadChanged(
+ transport_overhead_);
+ });
+
+ EXPECT_TRUE(Wait());
+
+ {
+ MutexLock lock(&lock_);
+ packets_sent_ = 0;
+ }
+
+ SendTask(task_queue_, [this]() {
+ transport_overhead_ = 500;
+ call_->GetTransportControllerSend()->OnTransportOverheadChanged(
+ transport_overhead_);
+ });
+
+ EXPECT_TRUE(Wait());
+ }
+
+ private:
+ TaskQueueBase* const task_queue_;
+ Call* call_;
+ Mutex lock_;
+ int packets_sent_ RTC_GUARDED_BY(lock_);
+ int transport_overhead_;
+ const size_t kMaxRtpPacketSize = 1000;
+ } test(task_queue());
+
+ RunBaseTest(&test);
+}
+
+// Test class takes takes as argument a switch selecting if type switch should
+// occur and a function pointer to reset the send stream. This is necessary
+// since you cannot change the content type of a VideoSendStream, you need to
+// recreate it. Stopping and recreating the stream can only be done on the main
+// thread and in the context of VideoSendStreamTest (not BaseTest).
+template <typename T>
+class MaxPaddingSetTest : public test::SendTest {
+ public:
+ static const uint32_t kMinTransmitBitrateBps = 400000;
+ static const uint32_t kActualEncodeBitrateBps = 40000;
+ static const uint32_t kMinPacketsToSend = 50;
+
+ MaxPaddingSetTest(bool test_switch_content_type,
+ T* stream_reset_fun,
+ TaskQueueBase* task_queue)
+ : SendTest(test::CallTest::kDefaultTimeout),
+ running_without_padding_(test_switch_content_type),
+ stream_resetter_(stream_reset_fun),
+ task_queue_(task_queue) {
+ RTC_DCHECK(stream_resetter_);
+ module_process_thread_.Detach();
+ task_queue_thread_.Detach();
+ }
+
+ ~MaxPaddingSetTest() {
+ // Block until all already posted tasks run to avoid 'use after free'
+ // when such task accesses `this`.
+ SendTask(task_queue_, [] {});
+ }
+
+ void ModifyVideoConfigs(
+ VideoSendStream::Config* send_config,
+ std::vector<VideoReceiveStreamInterface::Config>* receive_configs,
+ VideoEncoderConfig* encoder_config) override {
+ RTC_DCHECK_RUN_ON(&task_queue_thread_);
+ RTC_DCHECK_EQ(1, encoder_config->number_of_streams);
+ if (running_without_padding_) {
+ encoder_config->min_transmit_bitrate_bps = 0;
+ encoder_config->content_type =
+ VideoEncoderConfig::ContentType::kRealtimeVideo;
+ } else {
+ encoder_config->min_transmit_bitrate_bps = kMinTransmitBitrateBps;
+ encoder_config->content_type = VideoEncoderConfig::ContentType::kScreen;
+ }
+ send_stream_config_ = send_config->Copy();
+ encoder_config_ = encoder_config->Copy();
+ }
+
+ void OnCallsCreated(Call* sender_call, Call* receiver_call) override {
+ RTC_DCHECK_RUN_ON(&task_queue_thread_);
+ RTC_DCHECK(task_queue_->IsCurrent());
+ RTC_DCHECK(!call_);
+ RTC_DCHECK(sender_call);
+ call_ = sender_call;
+ }
+
+ // Called on the pacer thread.
+ Action OnSendRtp(const uint8_t* packet, size_t length) override {
+ RTC_DCHECK_RUN_ON(&module_process_thread_);
+
+ // Check the stats on the correct thread and signal the 'complete' flag
+ // once we detect that we're done.
+
+ task_queue_->PostTask([this]() {
+ RTC_DCHECK_RUN_ON(&task_queue_thread_);
+ // In case we get a callback during teardown.
+ // When this happens, OnStreamsStopped() has been called already,
+ // `call_` is null and the streams are being torn down.
+ if (!call_)
+ return;
+
+ ++packets_sent_;
+
+ Call::Stats stats = call_->GetStats();
+ if (running_without_padding_) {
+ EXPECT_EQ(0, stats.max_padding_bitrate_bps);
+
+ // Wait until at least kMinPacketsToSend frames have been encoded, so
+ // that we have reliable data.
+ if (packets_sent_ < kMinPacketsToSend)
+ return;
+
+ // We've sent kMinPacketsToSend packets with default configuration,
+ // switch to enabling screen content and setting min transmit bitrate.
+ // Note that we need to recreate the stream if changing content type.
+ packets_sent_ = 0;
+
+ encoder_config_.min_transmit_bitrate_bps = kMinTransmitBitrateBps;
+ encoder_config_.content_type = VideoEncoderConfig::ContentType::kScreen;
+
+ running_without_padding_ = false;
+ (*stream_resetter_)(send_stream_config_, encoder_config_);
+ } else {
+ // Make sure the pacer has been configured with a min transmit bitrate.
+ if (stats.max_padding_bitrate_bps > 0) {
+ observation_complete_.Set();
+ }
+ }
+ });
+
+ return SEND_PACKET;
+ }
+
+ // Called on `task_queue_`
+ void OnStreamsStopped() override {
+ RTC_DCHECK_RUN_ON(&task_queue_thread_);
+ RTC_DCHECK(task_queue_->IsCurrent());
+ call_ = nullptr;
+ }
+
+ void PerformTest() override {
+ ASSERT_TRUE(Wait()) << "Timed out waiting for a valid padding bitrate.";
+ }
+
+ private:
+ webrtc::SequenceChecker task_queue_thread_;
+ Call* call_ RTC_GUARDED_BY(task_queue_thread_) = nullptr;
+ VideoSendStream::Config send_stream_config_{nullptr};
+ VideoEncoderConfig encoder_config_;
+ webrtc::SequenceChecker module_process_thread_;
+ uint32_t packets_sent_ RTC_GUARDED_BY(task_queue_thread_) = 0;
+ bool running_without_padding_ RTC_GUARDED_BY(task_queue_thread_);
+ T* const stream_resetter_;
+ TaskQueueBase* const task_queue_;
+};
+
+TEST_F(VideoSendStreamTest, RespectsMinTransmitBitrate) {
+ auto reset_fun = [](const VideoSendStream::Config& send_stream_config,
+ const VideoEncoderConfig& encoder_config) {};
+ MaxPaddingSetTest<decltype(reset_fun)> test(false, &reset_fun, task_queue());
+ RunBaseTest(&test);
+}
+
+TEST_F(VideoSendStreamTest, RespectsMinTransmitBitrateAfterContentSwitch) {
+ // Function for removing and recreating the send stream with a new config.
+ auto reset_fun = [this](const VideoSendStream::Config& send_stream_config,
+ const VideoEncoderConfig& encoder_config) {
+ RTC_DCHECK(task_queue()->IsCurrent());
+ Stop();
+ DestroyVideoSendStreams();
+ SetVideoSendConfig(send_stream_config);
+ SetVideoEncoderConfig(encoder_config);
+ CreateVideoSendStreams();
+ SetVideoDegradation(DegradationPreference::MAINTAIN_RESOLUTION);
+ Start();
+ };
+ MaxPaddingSetTest<decltype(reset_fun)> test(true, &reset_fun, task_queue());
+ RunBaseTest(&test);
+}
+
+// This test verifies that new frame sizes reconfigures encoders even though not
+// (yet) sending. The purpose of this is to permit encoding as quickly as
+// possible once we start sending. Likely the frames being input are from the
+// same source that will be sent later, which just means that we're ready
+// earlier.
+TEST_F(VideoSendStreamTest,
+ EncoderReconfigureOnResolutionChangeWhenNotSending) {
+ class EncoderObserver : public test::FakeEncoder {
+ public:
+ EncoderObserver()
+ : FakeEncoder(Clock::GetRealTimeClock()),
+ last_initialized_frame_width_(0),
+ last_initialized_frame_height_(0) {}
+
+ void WaitForResolution(int width, int height) {
+ {
+ MutexLock lock(&mutex_);
+ if (last_initialized_frame_width_ == width &&
+ last_initialized_frame_height_ == height) {
+ return;
+ }
+ }
+ EXPECT_TRUE(
+ init_encode_called_.Wait(VideoSendStreamTest::kDefaultTimeout));
+ {
+ MutexLock lock(&mutex_);
+ EXPECT_EQ(width, last_initialized_frame_width_);
+ EXPECT_EQ(height, last_initialized_frame_height_);
+ }
+ }
+
+ private:
+ int32_t InitEncode(const VideoCodec* config,
+ const Settings& settings) override {
+ MutexLock lock(&mutex_);
+ last_initialized_frame_width_ = config->width;
+ last_initialized_frame_height_ = config->height;
+ init_encode_called_.Set();
+ return FakeEncoder::InitEncode(config, settings);
+ }
+
+ int32_t Encode(const VideoFrame& input_image,
+ const std::vector<VideoFrameType>* frame_types) override {
+ ADD_FAILURE()
+ << "Unexpected Encode call since the send stream is not started";
+ return 0;
+ }
+
+ Mutex mutex_;
+ rtc::Event init_encode_called_;
+ int last_initialized_frame_width_ RTC_GUARDED_BY(&mutex_);
+ int last_initialized_frame_height_ RTC_GUARDED_BY(&mutex_);
+ };
+
+ test::NullTransport transport;
+ EncoderObserver encoder;
+ test::VideoEncoderProxyFactory encoder_factory(&encoder);
+
+ SendTask(task_queue(), [this, &transport, &encoder_factory]() {
+ CreateSenderCall();
+ CreateSendConfig(1, 0, 0, &transport);
+ GetVideoSendConfig()->encoder_settings.encoder_factory = &encoder_factory;
+ CreateVideoStreams();
+ CreateFrameGeneratorCapturer(kDefaultFramerate, kDefaultWidth,
+ kDefaultHeight);
+ frame_generator_capturer_->Start();
+ });
+
+ encoder.WaitForResolution(kDefaultWidth, kDefaultHeight);
+
+ SendTask(task_queue(), [this]() {
+ frame_generator_capturer_->ChangeResolution(kDefaultWidth * 2,
+ kDefaultHeight * 2);
+ });
+
+ encoder.WaitForResolution(kDefaultWidth * 2, kDefaultHeight * 2);
+
+ SendTask(task_queue(), [this]() {
+ DestroyStreams();
+ DestroyCalls();
+ });
+}
+
+TEST_F(VideoSendStreamTest, CanReconfigureToUseStartBitrateAbovePreviousMax) {
+ class StartBitrateObserver : public test::FakeEncoder {
+ public:
+ StartBitrateObserver()
+ : FakeEncoder(Clock::GetRealTimeClock()), start_bitrate_kbps_(0) {}
+ int32_t InitEncode(const VideoCodec* config,
+ const Settings& settings) override {
+ MutexLock lock(&mutex_);
+ start_bitrate_kbps_ = config->startBitrate;
+ start_bitrate_changed_.Set();
+ return FakeEncoder::InitEncode(config, settings);
+ }
+
+ void SetRates(const RateControlParameters& parameters) override {
+ MutexLock lock(&mutex_);
+ start_bitrate_kbps_ = parameters.bitrate.get_sum_kbps();
+ start_bitrate_changed_.Set();
+ FakeEncoder::SetRates(parameters);
+ }
+
+ int GetStartBitrateKbps() const {
+ MutexLock lock(&mutex_);
+ return start_bitrate_kbps_;
+ }
+
+ bool WaitForStartBitrate() {
+ return start_bitrate_changed_.Wait(VideoSendStreamTest::kDefaultTimeout);
+ }
+
+ private:
+ mutable Mutex mutex_;
+ rtc::Event start_bitrate_changed_;
+ int start_bitrate_kbps_ RTC_GUARDED_BY(mutex_);
+ };
+
+ CreateSenderCall();
+
+ test::NullTransport transport;
+ CreateSendConfig(1, 0, 0, &transport);
+
+ BitrateConstraints bitrate_config;
+ bitrate_config.start_bitrate_bps =
+ 2 * GetVideoEncoderConfig()->max_bitrate_bps;
+ sender_call_->GetTransportControllerSend()->SetSdpBitrateParameters(
+ bitrate_config);
+
+ StartBitrateObserver encoder;
+ test::VideoEncoderProxyFactory encoder_factory(&encoder);
+ GetVideoSendConfig()->encoder_settings.encoder_factory = &encoder_factory;
+
+ CreateVideoStreams();
+
+ // Start capturing and encoding frames to force encoder reconfiguration.
+ CreateFrameGeneratorCapturer(kDefaultFramerate, kDefaultWidth,
+ kDefaultHeight);
+ frame_generator_capturer_->Start();
+ // TODO(crbug/1255737): Added manual current thread message processing because
+ // the test code context is interpreted as the worker thread and we assume
+ // progress on it. The test should probably be ported to use simulated time
+ // instead (ported to a scenario test perhaps?).
+ rtc::Thread::Current()->ProcessMessages(5000);
+
+ EXPECT_TRUE(encoder.WaitForStartBitrate());
+ EXPECT_EQ(GetVideoEncoderConfig()->max_bitrate_bps / 1000,
+ encoder.GetStartBitrateKbps());
+
+ GetVideoEncoderConfig()->max_bitrate_bps =
+ 2 * bitrate_config.start_bitrate_bps;
+ GetVideoSendStream()->ReconfigureVideoEncoder(
+ GetVideoEncoderConfig()->Copy());
+ // TODO(crbug/1255737): Added manual current thread message processing because
+ // the test code context is interpreted as the worker thread and we assume
+ // progress on it. The test should probably be ported to use simulated time
+ // instead (ported to a scenario test perhaps?).
+ rtc::Thread::Current()->ProcessMessages(5000);
+
+ // New bitrate should be reconfigured above the previous max. As there's no
+ // network connection this shouldn't be flaky, as no bitrate should've been
+ // reported in between.
+ EXPECT_TRUE(encoder.WaitForStartBitrate());
+ EXPECT_EQ(bitrate_config.start_bitrate_bps / 1000,
+ encoder.GetStartBitrateKbps());
+
+ DestroyStreams();
+}
+
+class StartStopBitrateObserver : public test::FakeEncoder {
+ public:
+ StartStopBitrateObserver() : FakeEncoder(Clock::GetRealTimeClock()) {}
+ int32_t InitEncode(const VideoCodec* config,
+ const Settings& settings) override {
+ MutexLock lock(&mutex_);
+ encoder_init_.Set();
+ return FakeEncoder::InitEncode(config, settings);
+ }
+
+ void SetRates(const RateControlParameters& parameters) override {
+ MutexLock lock(&mutex_);
+ bitrate_kbps_ = parameters.bitrate.get_sum_kbps();
+ bitrate_changed_.Set();
+ FakeEncoder::SetRates(parameters);
+ }
+
+ bool WaitForEncoderInit() {
+ return encoder_init_.Wait(VideoSendStreamTest::kDefaultTimeout);
+ }
+
+ bool WaitBitrateChanged(WaitUntil until) {
+ do {
+ absl::optional<int> bitrate_kbps;
+ {
+ MutexLock lock(&mutex_);
+ bitrate_kbps = bitrate_kbps_;
+ }
+ if (!bitrate_kbps)
+ continue;
+
+ if ((until == WaitUntil::kNonZero && *bitrate_kbps > 0) ||
+ (until == WaitUntil::kZero && *bitrate_kbps == 0)) {
+ return true;
+ }
+ } while (bitrate_changed_.Wait(VideoSendStreamTest::kDefaultTimeout));
+ return false;
+ }
+
+ private:
+ Mutex mutex_;
+ rtc::Event encoder_init_;
+ rtc::Event bitrate_changed_;
+ absl::optional<int> bitrate_kbps_ RTC_GUARDED_BY(mutex_);
+};
+
+TEST_F(VideoSendStreamTest, EncoderIsProperlyInitializedAndDestroyed) {
+ class EncoderStateObserver : public test::SendTest, public VideoEncoder {
+ public:
+ explicit EncoderStateObserver(TaskQueueBase* task_queue)
+ : SendTest(kDefaultTimeout),
+ task_queue_(task_queue),
+ stream_(nullptr),
+ initialized_(false),
+ callback_registered_(false),
+ num_releases_(0),
+ released_(false),
+ encoder_factory_(this) {}
+
+ bool IsReleased() RTC_LOCKS_EXCLUDED(mutex_) {
+ MutexLock lock(&mutex_);
+ return released_;
+ }
+
+ bool IsReadyForEncode() RTC_LOCKS_EXCLUDED(mutex_) {
+ MutexLock lock(&mutex_);
+ return IsReadyForEncodeLocked();
+ }
+
+ size_t num_releases() RTC_LOCKS_EXCLUDED(mutex_) {
+ MutexLock lock(&mutex_);
+ return num_releases_;
+ }
+
+ private:
+ bool IsReadyForEncodeLocked() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_) {
+ return initialized_ && callback_registered_;
+ }
+
+ void SetFecControllerOverride(
+ FecControllerOverride* fec_controller_override) override {
+ // Ignored.
+ }
+
+ int32_t InitEncode(const VideoCodec* codecSettings,
+ const Settings& settings) override
+ RTC_LOCKS_EXCLUDED(mutex_) {
+ MutexLock lock(&mutex_);
+ EXPECT_FALSE(initialized_);
+ initialized_ = true;
+ released_ = false;
+ return 0;
+ }
+
+ int32_t Encode(const VideoFrame& inputImage,
+ const std::vector<VideoFrameType>* frame_types) override {
+ EXPECT_TRUE(IsReadyForEncode());
+
+ observation_complete_.Set();
+ return 0;
+ }
+
+ int32_t RegisterEncodeCompleteCallback(
+ EncodedImageCallback* callback) override RTC_LOCKS_EXCLUDED(mutex_) {
+ MutexLock lock(&mutex_);
+ EXPECT_TRUE(initialized_);
+ callback_registered_ = true;
+ return 0;
+ }
+
+ int32_t Release() override RTC_LOCKS_EXCLUDED(mutex_) {
+ MutexLock lock(&mutex_);
+ EXPECT_TRUE(IsReadyForEncodeLocked());
+ EXPECT_FALSE(released_);
+ initialized_ = false;
+ callback_registered_ = false;
+ released_ = true;
+ ++num_releases_;
+ return 0;
+ }
+
+ void SetRates(const RateControlParameters& parameters) override {
+ EXPECT_TRUE(IsReadyForEncode());
+ }
+
+ void OnVideoStreamsCreated(VideoSendStream* send_stream,
+ const std::vector<VideoReceiveStreamInterface*>&
+ receive_streams) override {
+ stream_ = send_stream;
+ }
+
+ void ModifyVideoConfigs(
+ VideoSendStream::Config* send_config,
+ std::vector<VideoReceiveStreamInterface::Config>* receive_configs,
+ VideoEncoderConfig* encoder_config) override {
+ send_config->encoder_settings.encoder_factory = &encoder_factory_;
+ encoder_config_ = encoder_config->Copy();
+ }
+
+ void PerformTest() override {
+ EXPECT_TRUE(Wait()) << "Timed out while waiting for Encode.";
+
+ SendTask(task_queue_, [this]() {
+ EXPECT_EQ(0u, num_releases());
+ stream_->ReconfigureVideoEncoder(std::move(encoder_config_));
+ EXPECT_EQ(0u, num_releases());
+ stream_->Stop();
+ // Encoder should not be released before destroying the VideoSendStream.
+ EXPECT_FALSE(IsReleased());
+ EXPECT_TRUE(IsReadyForEncode());
+ stream_->Start();
+ });
+
+ // Sanity check, make sure we still encode frames with this encoder.
+ EXPECT_TRUE(Wait()) << "Timed out while waiting for Encode.";
+ }
+
+ TaskQueueBase* const task_queue_;
+ Mutex mutex_;
+ VideoSendStream* stream_;
+ bool initialized_ RTC_GUARDED_BY(mutex_);
+ bool callback_registered_ RTC_GUARDED_BY(mutex_);
+ size_t num_releases_ RTC_GUARDED_BY(mutex_);
+ bool released_ RTC_GUARDED_BY(mutex_);
+ test::VideoEncoderProxyFactory encoder_factory_;
+ VideoEncoderConfig encoder_config_;
+ } test_encoder(task_queue());
+
+ RunBaseTest(&test_encoder);
+
+ EXPECT_TRUE(test_encoder.IsReleased());
+ EXPECT_EQ(1u, test_encoder.num_releases());
+}
+
+static const size_t kVideoCodecConfigObserverNumberOfTemporalLayers = 3;
+template <typename T>
+class VideoCodecConfigObserver : public test::SendTest,
+ public test::FakeEncoder {
+ public:
+ VideoCodecConfigObserver(VideoCodecType video_codec_type,
+ TaskQueueBase* task_queue)
+ : SendTest(VideoSendStreamTest::kDefaultTimeout),
+ FakeEncoder(Clock::GetRealTimeClock()),
+ video_codec_type_(video_codec_type),
+ stream_(nullptr),
+ encoder_factory_(this),
+ task_queue_(task_queue) {
+ InitCodecSpecifics();
+ }
+
+ private:
+ void ModifyVideoConfigs(
+ VideoSendStream::Config* send_config,
+ std::vector<VideoReceiveStreamInterface::Config>* receive_configs,
+ VideoEncoderConfig* encoder_config) override {
+ send_config->encoder_settings.encoder_factory = &encoder_factory_;
+ send_config->rtp.payload_name = CodecTypeToPayloadString(video_codec_type_);
+
+ encoder_config->codec_type = video_codec_type_;
+ encoder_config->encoder_specific_settings = GetEncoderSpecificSettings();
+ EXPECT_EQ(1u, encoder_config->simulcast_layers.size());
+ encoder_config->simulcast_layers[0].num_temporal_layers =
+ kVideoCodecConfigObserverNumberOfTemporalLayers;
+ encoder_config_ = encoder_config->Copy();
+ }
+
+ void OnVideoStreamsCreated(VideoSendStream* send_stream,
+ const std::vector<VideoReceiveStreamInterface*>&
+ receive_streams) override {
+ stream_ = send_stream;
+ }
+
+ int32_t InitEncode(const VideoCodec* config,
+ const Settings& settings) override {
+ EXPECT_EQ(video_codec_type_, config->codecType);
+ VerifyCodecSpecifics(*config);
+ int ret = FakeEncoder::InitEncode(config, settings);
+ init_encode_event_.Set();
+ return ret;
+ }
+
+ void InitCodecSpecifics();
+ void VerifyCodecSpecifics(const VideoCodec& config) const;
+ rtc::scoped_refptr<VideoEncoderConfig::EncoderSpecificSettings>
+ GetEncoderSpecificSettings() const;
+
+ void PerformTest() override {
+ EXPECT_TRUE(init_encode_event_.Wait(VideoSendStreamTest::kDefaultTimeout));
+ ASSERT_EQ(1, FakeEncoder::GetNumInitializations())
+ << "VideoEncoder not initialized.";
+
+ // Change encoder settings to actually trigger reconfiguration.
+ encoder_config_.frame_drop_enabled = !encoder_config_.frame_drop_enabled;
+ encoder_config_.encoder_specific_settings = GetEncoderSpecificSettings();
+ SendTask(task_queue_, [&]() {
+ stream_->ReconfigureVideoEncoder(std::move(encoder_config_));
+ });
+ ASSERT_TRUE(init_encode_event_.Wait(VideoSendStreamTest::kDefaultTimeout));
+ EXPECT_EQ(2, FakeEncoder::GetNumInitializations())
+ << "ReconfigureVideoEncoder did not reinitialize the encoder with "
+ "new encoder settings.";
+ }
+
+ int32_t Encode(const VideoFrame& input_image,
+ const std::vector<VideoFrameType>* frame_types) override {
+ // Silently skip the encode, FakeEncoder::Encode doesn't produce VP8.
+ return 0;
+ }
+
+ T encoder_settings_;
+ const VideoCodecType video_codec_type_;
+ rtc::Event init_encode_event_;
+ VideoSendStream* stream_;
+ test::VideoEncoderProxyFactory encoder_factory_;
+ VideoEncoderConfig encoder_config_;
+ TaskQueueBase* task_queue_;
+};
+
+template <>
+void VideoCodecConfigObserver<VideoCodecH264>::InitCodecSpecifics() {}
+
+template <>
+void VideoCodecConfigObserver<VideoCodecH264>::VerifyCodecSpecifics(
+ const VideoCodec& config) const {
+ // Check that the number of temporal layers has propagated properly to
+ // VideoCodec.
+ EXPECT_EQ(kVideoCodecConfigObserverNumberOfTemporalLayers,
+ config.H264().numberOfTemporalLayers);
+
+ for (unsigned char i = 0; i < config.numberOfSimulcastStreams; ++i) {
+ EXPECT_EQ(kVideoCodecConfigObserverNumberOfTemporalLayers,
+ config.simulcastStream[i].numberOfTemporalLayers);
+ }
+
+ // Set expected temporal layers as they should have been set when
+ // reconfiguring the encoder and not match the set config.
+ VideoCodecH264 encoder_settings = VideoEncoder::GetDefaultH264Settings();
+ encoder_settings.numberOfTemporalLayers =
+ kVideoCodecConfigObserverNumberOfTemporalLayers;
+ EXPECT_EQ(config.H264(), encoder_settings);
+}
+
+template <>
+rtc::scoped_refptr<VideoEncoderConfig::EncoderSpecificSettings>
+VideoCodecConfigObserver<VideoCodecH264>::GetEncoderSpecificSettings() const {
+ return nullptr;
+}
+
+template <>
+void VideoCodecConfigObserver<VideoCodecVP8>::InitCodecSpecifics() {
+ encoder_settings_ = VideoEncoder::GetDefaultVp8Settings();
+}
+
+template <>
+void VideoCodecConfigObserver<VideoCodecVP8>::VerifyCodecSpecifics(
+ const VideoCodec& config) const {
+ // Check that the number of temporal layers has propagated properly to
+ // VideoCodec.
+ EXPECT_EQ(kVideoCodecConfigObserverNumberOfTemporalLayers,
+ config.VP8().numberOfTemporalLayers);
+
+ for (unsigned char i = 0; i < config.numberOfSimulcastStreams; ++i) {
+ EXPECT_EQ(kVideoCodecConfigObserverNumberOfTemporalLayers,
+ config.simulcastStream[i].numberOfTemporalLayers);
+ }
+
+ // Set expected temporal layers as they should have been set when
+ // reconfiguring the encoder and not match the set config.
+ VideoCodecVP8 encoder_settings = encoder_settings_;
+ encoder_settings.numberOfTemporalLayers =
+ kVideoCodecConfigObserverNumberOfTemporalLayers;
+ EXPECT_EQ(
+ 0, memcmp(&config.VP8(), &encoder_settings, sizeof(encoder_settings_)));
+}
+
+template <>
+rtc::scoped_refptr<VideoEncoderConfig::EncoderSpecificSettings>
+VideoCodecConfigObserver<VideoCodecVP8>::GetEncoderSpecificSettings() const {
+ return rtc::make_ref_counted<VideoEncoderConfig::Vp8EncoderSpecificSettings>(
+ encoder_settings_);
+}
+
+template <>
+void VideoCodecConfigObserver<VideoCodecVP9>::InitCodecSpecifics() {
+ encoder_settings_ = VideoEncoder::GetDefaultVp9Settings();
+}
+
+template <>
+void VideoCodecConfigObserver<VideoCodecVP9>::VerifyCodecSpecifics(
+ const VideoCodec& config) const {
+ // Check that the number of temporal layers has propagated properly to
+ // VideoCodec.
+ EXPECT_EQ(kVideoCodecConfigObserverNumberOfTemporalLayers,
+ config.VP9().numberOfTemporalLayers);
+
+ for (unsigned char i = 0; i < config.numberOfSimulcastStreams; ++i) {
+ EXPECT_EQ(kVideoCodecConfigObserverNumberOfTemporalLayers,
+ config.simulcastStream[i].numberOfTemporalLayers);
+ }
+
+ // Set expected temporal layers as they should have been set when
+ // reconfiguring the encoder and not match the set config.
+ VideoCodecVP9 encoder_settings = encoder_settings_;
+ encoder_settings.numberOfTemporalLayers =
+ kVideoCodecConfigObserverNumberOfTemporalLayers;
+ EXPECT_EQ(
+ 0, memcmp(&(config.VP9()), &encoder_settings, sizeof(encoder_settings_)));
+}
+
+template <>
+rtc::scoped_refptr<VideoEncoderConfig::EncoderSpecificSettings>
+VideoCodecConfigObserver<VideoCodecVP9>::GetEncoderSpecificSettings() const {
+ return rtc::make_ref_counted<VideoEncoderConfig::Vp9EncoderSpecificSettings>(
+ encoder_settings_);
+}
+
+TEST_F(VideoSendStreamTest, EncoderSetupPropagatesVp8Config) {
+ VideoCodecConfigObserver<VideoCodecVP8> test(kVideoCodecVP8, task_queue());
+ RunBaseTest(&test);
+}
+
+TEST_F(VideoSendStreamTest, EncoderSetupPropagatesVp9Config) {
+ VideoCodecConfigObserver<VideoCodecVP9> test(kVideoCodecVP9, task_queue());
+ RunBaseTest(&test);
+}
+
+// Fails on MSAN: https://bugs.chromium.org/p/webrtc/issues/detail?id=11376.
+#if defined(MEMORY_SANITIZER)
+#define MAYBE_EncoderSetupPropagatesH264Config \
+ DISABLED_EncoderSetupPropagatesH264Config
+#else
+#define MAYBE_EncoderSetupPropagatesH264Config EncoderSetupPropagatesH264Config
+#endif
+TEST_F(VideoSendStreamTest, MAYBE_EncoderSetupPropagatesH264Config) {
+ VideoCodecConfigObserver<VideoCodecH264> test(kVideoCodecH264, task_queue());
+ RunBaseTest(&test);
+}
+
+TEST_F(VideoSendStreamTest, RtcpSenderReportContainsMediaBytesSent) {
+ class RtcpSenderReportTest : public test::SendTest {
+ public:
+ RtcpSenderReportTest()
+ : SendTest(kDefaultTimeout),
+ rtp_packets_sent_(0),
+ media_bytes_sent_(0) {}
+
+ private:
+ Action OnSendRtp(const uint8_t* packet, size_t length) override {
+ MutexLock lock(&mutex_);
+ RtpPacket rtp_packet;
+ EXPECT_TRUE(rtp_packet.Parse(packet, length));
+ ++rtp_packets_sent_;
+ media_bytes_sent_ += rtp_packet.payload_size();
+ return SEND_PACKET;
+ }
+
+ Action OnSendRtcp(const uint8_t* packet, size_t length) override {
+ MutexLock lock(&mutex_);
+ test::RtcpPacketParser parser;
+ EXPECT_TRUE(parser.Parse(packet, length));
+
+ if (parser.sender_report()->num_packets() > 0) {
+ // Only compare sent media bytes if SenderPacketCount matches the
+ // number of sent rtp packets (a new rtp packet could be sent before
+ // the rtcp packet).
+ if (parser.sender_report()->sender_octet_count() > 0 &&
+ parser.sender_report()->sender_packet_count() ==
+ rtp_packets_sent_) {
+ EXPECT_EQ(media_bytes_sent_,
+ parser.sender_report()->sender_octet_count());
+ observation_complete_.Set();
+ }
+ }
+
+ return SEND_PACKET;
+ }
+
+ void PerformTest() override {
+ EXPECT_TRUE(Wait()) << "Timed out while waiting for RTCP sender report.";
+ }
+
+ Mutex mutex_;
+ size_t rtp_packets_sent_ RTC_GUARDED_BY(&mutex_);
+ size_t media_bytes_sent_ RTC_GUARDED_BY(&mutex_);
+ } test;
+
+ RunBaseTest(&test);
+}
+
+TEST_F(VideoSendStreamTest, TranslatesTwoLayerScreencastToTargetBitrate) {
+ static const int kScreencastMaxTargetBitrateDeltaKbps = 1;
+
+ class VideoStreamFactory
+ : public VideoEncoderConfig::VideoStreamFactoryInterface {
+ public:
+ VideoStreamFactory() {}
+
+ private:
+ std::vector<VideoStream> CreateEncoderStreams(
+ int frame_width,
+ int frame_height,
+ const VideoEncoderConfig& encoder_config) override {
+ std::vector<VideoStream> streams =
+ test::CreateVideoStreams(frame_width, frame_height, encoder_config);
+ RTC_CHECK_GT(streams[0].max_bitrate_bps,
+ kScreencastMaxTargetBitrateDeltaKbps);
+ streams[0].target_bitrate_bps =
+ streams[0].max_bitrate_bps -
+ kScreencastMaxTargetBitrateDeltaKbps * 1000;
+ return streams;
+ }
+ };
+
+ class ScreencastTargetBitrateTest : public test::SendTest,
+ public test::FakeEncoder {
+ public:
+ ScreencastTargetBitrateTest()
+ : SendTest(kDefaultTimeout),
+ test::FakeEncoder(Clock::GetRealTimeClock()),
+ encoder_factory_(this) {}
+
+ private:
+ int32_t InitEncode(const VideoCodec* config,
+ const Settings& settings) override {
+ EXPECT_EQ(config->numberOfSimulcastStreams, 1);
+ EXPECT_EQ(static_cast<unsigned int>(kScreencastMaxTargetBitrateDeltaKbps),
+ config->simulcastStream[0].maxBitrate -
+ config->simulcastStream[0].targetBitrate);
+ observation_complete_.Set();
+ return test::FakeEncoder::InitEncode(config, settings);
+ }
+
+ void ModifyVideoConfigs(
+ VideoSendStream::Config* send_config,
+ std::vector<VideoReceiveStreamInterface::Config>* receive_configs,
+ VideoEncoderConfig* encoder_config) override {
+ send_config->encoder_settings.encoder_factory = &encoder_factory_;
+ EXPECT_EQ(1u, encoder_config->number_of_streams);
+ encoder_config->video_stream_factory =
+ rtc::make_ref_counted<VideoStreamFactory>();
+ EXPECT_EQ(1u, encoder_config->simulcast_layers.size());
+ encoder_config->simulcast_layers[0].num_temporal_layers = 2;
+ encoder_config->content_type = VideoEncoderConfig::ContentType::kScreen;
+ }
+
+ void PerformTest() override {
+ EXPECT_TRUE(Wait())
+ << "Timed out while waiting for the encoder to be initialized.";
+ }
+ test::VideoEncoderProxyFactory encoder_factory_;
+ } test;
+
+ RunBaseTest(&test);
+}
+
+TEST_F(VideoSendStreamTest, ReconfigureBitratesSetsEncoderBitratesCorrectly) {
+ // These are chosen to be "kind of odd" to not be accidentally checked against
+ // default values.
+ static const int kMinBitrateKbps = 137;
+ static const int kStartBitrateKbps = 345;
+ static const int kLowerMaxBitrateKbps = 312;
+ static const int kMaxBitrateKbps = 413;
+ static const int kIncreasedStartBitrateKbps = 451;
+ static const int kIncreasedMaxBitrateKbps = 597;
+ // TODO(bugs.webrtc.org/12058): If these fields trial are on, we get lower
+ // bitrates than expected by this test, due to encoder pushback and subtracted
+ // overhead.
+ webrtc::test::ScopedKeyValueConfig field_trials(
+ field_trials_, "WebRTC-VideoRateControl/bitrate_adjuster:false/");
+
+ class EncoderBitrateThresholdObserver : public test::SendTest,
+ public VideoBitrateAllocatorFactory,
+ public test::FakeEncoder {
+ public:
+ explicit EncoderBitrateThresholdObserver(TaskQueueBase* task_queue)
+ : SendTest(kDefaultTimeout),
+ FakeEncoder(Clock::GetRealTimeClock()),
+ task_queue_(task_queue),
+ target_bitrate_(0),
+ num_rate_allocator_creations_(0),
+ num_encoder_initializations_(0),
+ call_(nullptr),
+ send_stream_(nullptr),
+ encoder_factory_(this),
+ bitrate_allocator_factory_(
+ CreateBuiltinVideoBitrateAllocatorFactory()) {}
+
+ private:
+ std::unique_ptr<VideoBitrateAllocator> CreateVideoBitrateAllocator(
+ const VideoCodec& codec) override {
+ EXPECT_GE(codec.startBitrate, codec.minBitrate);
+ EXPECT_LE(codec.startBitrate, codec.maxBitrate);
+ if (num_rate_allocator_creations_ == 0) {
+ EXPECT_EQ(static_cast<unsigned int>(kMinBitrateKbps), codec.minBitrate);
+ EXPECT_NEAR(static_cast<unsigned int>(kStartBitrateKbps),
+ codec.startBitrate, 10);
+ EXPECT_EQ(static_cast<unsigned int>(kMaxBitrateKbps), codec.maxBitrate);
+ } else if (num_rate_allocator_creations_ == 1) {
+ EXPECT_EQ(static_cast<unsigned int>(kLowerMaxBitrateKbps),
+ codec.maxBitrate);
+ // The start bitrate should be kept (-1) and capped to the max bitrate.
+ // Since this is not an end-to-end call no receiver should have been
+ // returning a REMB that could lower this estimate.
+ EXPECT_EQ(codec.startBitrate, codec.maxBitrate);
+ } else if (num_rate_allocator_creations_ == 2) {
+ EXPECT_EQ(static_cast<unsigned int>(kIncreasedMaxBitrateKbps),
+ codec.maxBitrate);
+ // The start bitrate will be whatever the rate BitRateController has
+ // currently configured but in the span of the set max and min bitrate.
+ }
+ ++num_rate_allocator_creations_;
+ create_rate_allocator_event_.Set();
+
+ return bitrate_allocator_factory_->CreateVideoBitrateAllocator(codec);
+ }
+
+ int32_t InitEncode(const VideoCodec* codecSettings,
+ const Settings& settings) override {
+ EXPECT_EQ(0, num_encoder_initializations_);
+ EXPECT_EQ(static_cast<unsigned int>(kMinBitrateKbps),
+ codecSettings->minBitrate);
+ EXPECT_NEAR(static_cast<unsigned int>(kStartBitrateKbps),
+ codecSettings->startBitrate, 10);
+ EXPECT_EQ(static_cast<unsigned int>(kMaxBitrateKbps),
+ codecSettings->maxBitrate);
+
+ ++num_encoder_initializations_;
+
+ observation_complete_.Set();
+ init_encode_event_.Set();
+
+ return FakeEncoder::InitEncode(codecSettings, settings);
+ }
+
+ void SetRates(const RateControlParameters& parameters) override {
+ {
+ MutexLock lock(&mutex_);
+ if (target_bitrate_ == parameters.bitrate.get_sum_kbps()) {
+ FakeEncoder::SetRates(parameters);
+ return;
+ }
+ target_bitrate_ = parameters.bitrate.get_sum_kbps();
+ }
+ bitrate_changed_event_.Set();
+ FakeEncoder::SetRates(parameters);
+ }
+
+ void WaitForSetRates(uint32_t expected_bitrate, int abs_error) {
+ // Wait for the expected rate to be set. In some cases there can be
+ // more than one update pending, in which case we keep waiting
+ // until the correct value has been observed.
+ // The target_bitrate_ is reduced by the calculated packet overhead.
+ const int64_t start_time = rtc::TimeMillis();
+ do {
+ MutexLock lock(&mutex_);
+
+ int error = target_bitrate_ - expected_bitrate;
+ if ((error < 0 && error >= -abs_error) ||
+ (error >= 0 && error <= abs_error)) {
+ return;
+ }
+ } while (bitrate_changed_event_.Wait(
+ std::max(TimeDelta::Millis(1),
+ VideoSendStreamTest::kDefaultTimeout -
+ TimeDelta::Millis(rtc::TimeMillis() - start_time))));
+ MutexLock lock(&mutex_);
+ EXPECT_NEAR(target_bitrate_, expected_bitrate, abs_error)
+ << "Timed out while waiting encoder rate to be set.";
+ }
+
+ void ModifySenderBitrateConfig(
+ BitrateConstraints* bitrate_config) override {
+ bitrate_config->min_bitrate_bps = kMinBitrateKbps * 1000;
+ bitrate_config->start_bitrate_bps = kStartBitrateKbps * 1000;
+ bitrate_config->max_bitrate_bps = kMaxBitrateKbps * 1000;
+ }
+
+ void ModifyVideoConfigs(
+ VideoSendStream::Config* send_config,
+ std::vector<VideoReceiveStreamInterface::Config>* receive_configs,
+ VideoEncoderConfig* encoder_config) override {
+ send_config->encoder_settings.encoder_factory = &encoder_factory_;
+ send_config->encoder_settings.bitrate_allocator_factory = this;
+ // Set bitrates lower/higher than min/max to make sure they are properly
+ // capped.
+ encoder_config->max_bitrate_bps = kMaxBitrateKbps * 1000;
+ EXPECT_EQ(1u, encoder_config->simulcast_layers.size());
+ encoder_config->simulcast_layers[0].min_bitrate_bps =
+ kMinBitrateKbps * 1000;
+ encoder_config_ = encoder_config->Copy();
+ }
+
+ void OnCallsCreated(Call* sender_call, Call* receiver_call) override {
+ call_ = sender_call;
+ }
+
+ void OnVideoStreamsCreated(VideoSendStream* send_stream,
+ const std::vector<VideoReceiveStreamInterface*>&
+ receive_streams) override {
+ send_stream_ = send_stream;
+ }
+
+ void PerformTest() override {
+ ASSERT_TRUE(create_rate_allocator_event_.Wait(
+ VideoSendStreamTest::kDefaultTimeout))
+ << "Timed out while waiting for rate allocator to be created.";
+ ASSERT_TRUE(init_encode_event_.Wait(VideoSendStreamTest::kDefaultTimeout))
+ << "Timed out while waiting for encoder to be configured.";
+ WaitForSetRates(kStartBitrateKbps, 80);
+ BitrateConstraints bitrate_config;
+ bitrate_config.start_bitrate_bps = kIncreasedStartBitrateKbps * 1000;
+ bitrate_config.max_bitrate_bps = kIncreasedMaxBitrateKbps * 1000;
+ SendTask(task_queue_, [this, &bitrate_config]() {
+ call_->GetTransportControllerSend()->SetSdpBitrateParameters(
+ bitrate_config);
+ });
+ // Encoder rate is capped by EncoderConfig max_bitrate_bps.
+ WaitForSetRates(kMaxBitrateKbps, 10);
+ encoder_config_.max_bitrate_bps = kLowerMaxBitrateKbps * 1000;
+ SendTask(task_queue_, [&]() {
+ send_stream_->ReconfigureVideoEncoder(encoder_config_.Copy());
+ });
+ ASSERT_TRUE(create_rate_allocator_event_.Wait(
+ VideoSendStreamTest::kDefaultTimeout));
+ EXPECT_EQ(2, num_rate_allocator_creations_)
+ << "Rate allocator should have been recreated.";
+
+ WaitForSetRates(kLowerMaxBitrateKbps, 10);
+ EXPECT_EQ(1, num_encoder_initializations_);
+
+ encoder_config_.max_bitrate_bps = kIncreasedMaxBitrateKbps * 1000;
+ SendTask(task_queue_, [&]() {
+ send_stream_->ReconfigureVideoEncoder(encoder_config_.Copy());
+ });
+ ASSERT_TRUE(create_rate_allocator_event_.Wait(
+ VideoSendStreamTest::kDefaultTimeout));
+ EXPECT_EQ(3, num_rate_allocator_creations_)
+ << "Rate allocator should have been recreated.";
+
+ // Expected target bitrate is the start bitrate set in the call to
+ // call_->GetTransportControllerSend()->SetSdpBitrateParameters.
+ WaitForSetRates(kIncreasedStartBitrateKbps, 10);
+ EXPECT_EQ(1, num_encoder_initializations_);
+ }
+
+ TaskQueueBase* const task_queue_;
+ rtc::Event create_rate_allocator_event_;
+ rtc::Event init_encode_event_;
+ rtc::Event bitrate_changed_event_;
+ Mutex mutex_;
+ uint32_t target_bitrate_ RTC_GUARDED_BY(&mutex_);
+
+ int num_rate_allocator_creations_;
+ int num_encoder_initializations_;
+ webrtc::Call* call_;
+ webrtc::VideoSendStream* send_stream_;
+ test::VideoEncoderProxyFactory encoder_factory_;
+ std::unique_ptr<VideoBitrateAllocatorFactory> bitrate_allocator_factory_;
+ webrtc::VideoEncoderConfig encoder_config_;
+ } test(task_queue());
+
+ RunBaseTest(&test);
+}
+
+TEST_F(VideoSendStreamTest, ReportsSentResolution) {
+ static const size_t kNumStreams = 3;
+ // Unusual resolutions to make sure that they are the ones being reported.
+ static const struct {
+ int width;
+ int height;
+ } kEncodedResolution[kNumStreams] = {{241, 181}, {300, 121}, {121, 221}};
+ class ScreencastTargetBitrateTest : public test::SendTest,
+ public test::FakeEncoder {
+ public:
+ explicit ScreencastTargetBitrateTest(TaskQueueBase* task_queue)
+ : SendTest(kDefaultTimeout),
+ test::FakeEncoder(Clock::GetRealTimeClock()),
+ send_stream_(nullptr),
+ encoder_factory_(this),
+ task_queue_(task_queue) {}
+
+ private:
+ int32_t Encode(const VideoFrame& input_image,
+ const std::vector<VideoFrameType>* frame_types) override {
+ CodecSpecificInfo specifics;
+ specifics.codecType = kVideoCodecGeneric;
+
+ EncodedImage encoded;
+ auto buffer = EncodedImageBuffer::Create(16);
+ memset(buffer->data(), 0, 16);
+ encoded.SetEncodedData(buffer);
+ encoded.SetTimestamp(input_image.timestamp());
+ encoded.capture_time_ms_ = input_image.render_time_ms();
+
+ for (size_t i = 0; i < kNumStreams; ++i) {
+ encoded._frameType = (*frame_types)[i];
+ encoded._encodedWidth = kEncodedResolution[i].width;
+ encoded._encodedHeight = kEncodedResolution[i].height;
+ encoded.SetSpatialIndex(i);
+ EncodedImageCallback* callback;
+ {
+ MutexLock lock(&mutex_);
+ callback = callback_;
+ }
+ RTC_DCHECK(callback);
+ if (callback->OnEncodedImage(encoded, &specifics).error !=
+ EncodedImageCallback::Result::OK) {
+ return -1;
+ }
+ }
+
+ observation_complete_.Set();
+ return 0;
+ }
+ void ModifyVideoConfigs(
+ VideoSendStream::Config* send_config,
+ std::vector<VideoReceiveStreamInterface::Config>* receive_configs,
+ VideoEncoderConfig* encoder_config) override {
+ send_config->encoder_settings.encoder_factory = &encoder_factory_;
+ EXPECT_EQ(kNumStreams, encoder_config->number_of_streams);
+ }
+
+ size_t GetNumVideoStreams() const override { return kNumStreams; }
+
+ void PerformTest() override {
+ EXPECT_TRUE(Wait())
+ << "Timed out while waiting for the encoder to send one frame.";
+ VideoSendStream::Stats stats;
+ SendTask(task_queue_, [&]() { stats = send_stream_->GetStats(); });
+
+ for (size_t i = 0; i < kNumStreams; ++i) {
+ ASSERT_TRUE(stats.substreams.find(kVideoSendSsrcs[i]) !=
+ stats.substreams.end())
+ << "No stats for SSRC: " << kVideoSendSsrcs[i]
+ << ", stats should exist as soon as frames have been encoded.";
+ VideoSendStream::StreamStats ssrc_stats =
+ stats.substreams[kVideoSendSsrcs[i]];
+ EXPECT_EQ(kEncodedResolution[i].width, ssrc_stats.width);
+ EXPECT_EQ(kEncodedResolution[i].height, ssrc_stats.height);
+ }
+ }
+
+ void OnVideoStreamsCreated(VideoSendStream* send_stream,
+ const std::vector<VideoReceiveStreamInterface*>&
+ receive_streams) override {
+ send_stream_ = send_stream;
+ }
+
+ VideoSendStream* send_stream_;
+ test::VideoEncoderProxyFactory encoder_factory_;
+ TaskQueueBase* const task_queue_;
+ } test(task_queue());
+
+ RunBaseTest(&test);
+}
+
+#if defined(RTC_ENABLE_VP9)
+class Vp9HeaderObserver : public test::SendTest {
+ public:
+ explicit Vp9HeaderObserver(const Vp9TestParams& params)
+ : SendTest(VideoSendStreamTest::kLongTimeout),
+ encoder_factory_([]() { return VP9Encoder::Create(); }),
+ params_(params),
+ vp9_settings_(VideoEncoder::GetDefaultVp9Settings()) {}
+
+ virtual void ModifyVideoConfigsHook(
+ VideoSendStream::Config* send_config,
+ std::vector<VideoReceiveStreamInterface::Config>* receive_configs,
+ VideoEncoderConfig* encoder_config) {}
+
+ virtual void InspectHeader(const RTPVideoHeaderVP9& vp9) = 0;
+
+ private:
+ const int kVp9PayloadType = test::CallTest::kVideoSendPayloadType;
+
+ void ModifyVideoConfigs(
+ VideoSendStream::Config* send_config,
+ std::vector<VideoReceiveStreamInterface::Config>* receive_configs,
+ VideoEncoderConfig* encoder_config) override {
+ send_config->encoder_settings.encoder_factory = &encoder_factory_;
+ send_config->rtp.payload_name = "VP9";
+ send_config->rtp.payload_type = kVp9PayloadType;
+ ModifyVideoConfigsHook(send_config, receive_configs, encoder_config);
+ encoder_config->encoder_specific_settings =
+ rtc::make_ref_counted<VideoEncoderConfig::Vp9EncoderSpecificSettings>(
+ vp9_settings_);
+ EXPECT_EQ(1u, encoder_config->number_of_streams);
+ EXPECT_EQ(1u, encoder_config->simulcast_layers.size());
+ encoder_config_ = encoder_config->Copy();
+ }
+
+ void ModifyVideoCaptureStartResolution(int* width,
+ int* height,
+ int* frame_rate) override {
+ expected_width_ = *width;
+ expected_height_ = *height;
+ }
+
+ void PerformTest() override {
+ bool wait = Wait();
+ {
+ // In case of time out, OnSendRtp might still access frames_sent_;
+ MutexLock lock(&mutex_);
+ EXPECT_TRUE(wait) << "Test timed out waiting for VP9 packet, num frames "
+ << frames_sent_;
+ }
+ }
+
+ Action OnSendRtp(const uint8_t* packet, size_t length) override {
+ RtpPacket rtp_packet;
+ EXPECT_TRUE(rtp_packet.Parse(packet, length));
+
+ EXPECT_EQ(kVp9PayloadType, rtp_packet.PayloadType());
+ rtc::ArrayView<const uint8_t> rtp_payload = rtp_packet.payload();
+
+ bool new_packet = !last_packet_sequence_number_.has_value() ||
+ IsNewerSequenceNumber(rtp_packet.SequenceNumber(),
+ *last_packet_sequence_number_);
+ if (!rtp_payload.empty() && new_packet) {
+ RTPVideoHeader video_header;
+ EXPECT_NE(
+ VideoRtpDepacketizerVp9::ParseRtpPayload(rtp_payload, &video_header),
+ 0);
+ EXPECT_EQ(VideoCodecType::kVideoCodecVP9, video_header.codec);
+ // Verify common fields for all configurations.
+ const auto& vp9_header =
+ absl::get<RTPVideoHeaderVP9>(video_header.video_type_header);
+ VerifyCommonHeader(vp9_header);
+ CompareConsecutiveFrames(rtp_packet, video_header);
+ // Verify configuration specific settings.
+ InspectHeader(vp9_header);
+
+ if (rtp_packet.Marker()) {
+ MutexLock lock(&mutex_);
+ ++frames_sent_;
+ }
+ last_packet_marker_ = rtp_packet.Marker();
+ last_packet_sequence_number_ = rtp_packet.SequenceNumber();
+ last_packet_timestamp_ = rtp_packet.Timestamp();
+ last_vp9_ = vp9_header;
+ last_temporal_idx_by_spatial_idx_[vp9_header.spatial_idx] =
+ vp9_header.temporal_idx;
+ }
+ return SEND_PACKET;
+ }
+
+ protected:
+ bool ContinuousPictureId(const RTPVideoHeaderVP9& vp9) const {
+ if (last_vp9_.picture_id > vp9.picture_id) {
+ return vp9.picture_id == 0; // Wrap.
+ } else {
+ return vp9.picture_id == last_vp9_.picture_id + 1;
+ }
+ }
+
+ bool IsTemporalShiftEnabled() const {
+ return params_.scalability_mode.find("_SHIFT") != std::string::npos;
+ }
+
+ void VerifySpatialIdxWithinFrame(const RTPVideoHeaderVP9& vp9) const {
+ bool new_layer = vp9.spatial_idx != last_vp9_.spatial_idx;
+ EXPECT_EQ(new_layer, vp9.beginning_of_frame);
+ EXPECT_EQ(new_layer, last_vp9_.end_of_frame);
+ EXPECT_EQ(new_layer ? last_vp9_.spatial_idx + 1 : last_vp9_.spatial_idx,
+ vp9.spatial_idx);
+ }
+
+ void VerifyTemporalIdxWithinFrame(const RTPVideoHeaderVP9& vp9) const {
+ if (!IsTemporalShiftEnabled()) {
+ EXPECT_EQ(vp9.temporal_idx, last_vp9_.temporal_idx);
+ return;
+ }
+ // Temporal shift.
+ EXPECT_EQ(params_.num_temporal_layers, 2);
+ if (vp9.spatial_idx == params_.num_spatial_layers - 1) {
+ // Lower spatial layers should be shifted.
+ int expected_tid =
+ (!vp9.inter_pic_predicted || vp9.temporal_idx == 1) ? 0 : 1;
+ for (int i = 0; i < vp9.spatial_idx; ++i) {
+ EXPECT_EQ(last_temporal_idx_by_spatial_idx_.at(i), expected_tid);
+ }
+ }
+ // Same within spatial layer.
+ bool new_layer = vp9.spatial_idx != last_vp9_.spatial_idx;
+ if (!new_layer) {
+ EXPECT_EQ(vp9.temporal_idx, last_vp9_.temporal_idx);
+ }
+ }
+
+ void VerifyFixedTemporalLayerStructure(const RTPVideoHeaderVP9& vp9,
+ uint8_t num_layers) const {
+ switch (num_layers) {
+ case 0:
+ VerifyTemporalLayerStructure0(vp9);
+ break;
+ case 1:
+ VerifyTemporalLayerStructure1(vp9);
+ break;
+ case 2:
+ VerifyTemporalLayerStructure2(vp9);
+ break;
+ case 3:
+ VerifyTemporalLayerStructure3(vp9);
+ break;
+ default:
+ RTC_DCHECK_NOTREACHED();
+ }
+ }
+
+ void VerifyTemporalLayerStructure0(const RTPVideoHeaderVP9& vp9) const {
+ EXPECT_EQ(kNoTl0PicIdx, vp9.tl0_pic_idx);
+ EXPECT_EQ(kNoTemporalIdx, vp9.temporal_idx); // no tid
+ // Technically true, but layer indices not available.
+ EXPECT_FALSE(vp9.temporal_up_switch);
+ }
+
+ void VerifyTemporalLayerStructure1(const RTPVideoHeaderVP9& vp9) const {
+ EXPECT_NE(kNoTl0PicIdx, vp9.tl0_pic_idx);
+ EXPECT_EQ(0, vp9.temporal_idx); // 0,0,0,...
+ }
+
+ void VerifyTemporalLayerStructure2(const RTPVideoHeaderVP9& vp9) const {
+ EXPECT_NE(kNoTl0PicIdx, vp9.tl0_pic_idx);
+ EXPECT_GE(vp9.temporal_idx, 0); // 0,1,0,1,... (tid reset on I-frames).
+ EXPECT_LE(vp9.temporal_idx, 1);
+ EXPECT_TRUE(vp9.temporal_up_switch);
+ // Verify temporal structure for the highest spatial layer (the structure
+ // may be shifted for lower spatial layer if temporal shift is configured).
+ if (IsHighestSpatialLayer(vp9) && vp9.beginning_of_frame) {
+ int expected_tid =
+ (!vp9.inter_pic_predicted ||
+ last_temporal_idx_by_spatial_idx_.at(vp9.spatial_idx) == 1)
+ ? 0
+ : 1;
+ EXPECT_EQ(vp9.temporal_idx, expected_tid);
+ }
+ }
+
+ void VerifyTemporalLayerStructure3(const RTPVideoHeaderVP9& vp9) const {
+ EXPECT_NE(kNoTl0PicIdx, vp9.tl0_pic_idx);
+ EXPECT_GE(vp9.temporal_idx, 0); // 0,2,1,2,... (tid reset on I-frames).
+ EXPECT_LE(vp9.temporal_idx, 2);
+ if (IsNewPictureId(vp9) && vp9.inter_pic_predicted) {
+ EXPECT_NE(vp9.temporal_idx, last_vp9_.temporal_idx);
+ EXPECT_TRUE(vp9.temporal_up_switch);
+ switch (vp9.temporal_idx) {
+ case 0:
+ EXPECT_EQ(last_vp9_.temporal_idx, 2);
+ break;
+ case 1:
+ EXPECT_EQ(last_vp9_.temporal_idx, 2);
+ break;
+ case 2:
+ EXPECT_LT(last_vp9_.temporal_idx, 2);
+ break;
+ }
+ }
+ }
+
+ void VerifyTl0Idx(const RTPVideoHeaderVP9& vp9) const {
+ if (vp9.tl0_pic_idx == kNoTl0PicIdx)
+ return;
+
+ uint8_t expected_tl0_idx = last_vp9_.tl0_pic_idx;
+ if (vp9.temporal_idx == 0)
+ ++expected_tl0_idx;
+ EXPECT_EQ(expected_tl0_idx, vp9.tl0_pic_idx);
+ }
+
+ bool IsNewPictureId(const RTPVideoHeaderVP9& vp9) const {
+ return frames_sent_ > 0 && (vp9.picture_id != last_vp9_.picture_id);
+ }
+
+ bool IsHighestSpatialLayer(const RTPVideoHeaderVP9& vp9) const {
+ return vp9.spatial_idx == params_.num_spatial_layers - 1 ||
+ vp9.spatial_idx == kNoSpatialIdx;
+ }
+
+ // Flexible mode (F=1): Non-flexible mode (F=0):
+ //
+ // +-+-+-+-+-+-+-+-+ +-+-+-+-+-+-+-+-+
+ // |I|P|L|F|B|E|V|-| |I|P|L|F|B|E|V|-|
+ // +-+-+-+-+-+-+-+-+ +-+-+-+-+-+-+-+-+
+ // I: |M| PICTURE ID | I: |M| PICTURE ID |
+ // +-+-+-+-+-+-+-+-+ +-+-+-+-+-+-+-+-+
+ // M: | EXTENDED PID | M: | EXTENDED PID |
+ // +-+-+-+-+-+-+-+-+ +-+-+-+-+-+-+-+-+
+ // L: | T |U| S |D| L: | T |U| S |D|
+ // +-+-+-+-+-+-+-+-+ +-+-+-+-+-+-+-+-+
+ // P,F: | P_DIFF |X|N| | TL0PICIDX |
+ // +-+-+-+-+-+-+-+-+ +-+-+-+-+-+-+-+-+
+ // X: |EXTENDED P_DIFF| V: | SS .. |
+ // +-+-+-+-+-+-+-+-+ +-+-+-+-+-+-+-+-+
+ // V: | SS .. |
+ // +-+-+-+-+-+-+-+-+
+ void VerifyCommonHeader(const RTPVideoHeaderVP9& vp9) const {
+ EXPECT_EQ(kMaxTwoBytePictureId, vp9.max_picture_id); // M:1
+ EXPECT_NE(kNoPictureId, vp9.picture_id); // I:1
+ EXPECT_EQ(vp9_settings_.flexibleMode, vp9.flexible_mode); // F
+
+ if (params_.num_spatial_layers > 1) {
+ EXPECT_LT(vp9.spatial_idx, params_.num_spatial_layers);
+ } else if (params_.num_temporal_layers > 1) {
+ EXPECT_EQ(vp9.spatial_idx, 0);
+ } else {
+ EXPECT_EQ(vp9.spatial_idx, kNoSpatialIdx);
+ }
+
+ if (params_.num_temporal_layers > 1) {
+ EXPECT_LT(vp9.temporal_idx, params_.num_temporal_layers);
+ } else if (params_.num_spatial_layers > 1) {
+ EXPECT_EQ(vp9.temporal_idx, 0);
+ } else {
+ EXPECT_EQ(vp9.temporal_idx, kNoTemporalIdx);
+ }
+
+ if (vp9.ss_data_available) // V
+ VerifySsData(vp9);
+
+ if (frames_sent_ == 0)
+ EXPECT_FALSE(vp9.inter_pic_predicted); // P
+
+ if (!vp9.inter_pic_predicted) {
+ if (vp9.temporal_idx == kNoTemporalIdx) {
+ EXPECT_FALSE(vp9.temporal_up_switch);
+ } else {
+ EXPECT_EQ(vp9.temporal_idx, 0);
+ EXPECT_TRUE(vp9.temporal_up_switch);
+ }
+ }
+ }
+
+ // Scalability structure (SS).
+ //
+ // +-+-+-+-+-+-+-+-+
+ // V: | N_S |Y|G|-|-|-|
+ // +-+-+-+-+-+-+-+-+
+ // Y: | WIDTH | N_S + 1 times
+ // +-+-+-+-+-+-+-+-+
+ // | HEIGHT |
+ // +-+-+-+-+-+-+-+-+
+ // G: | N_G |
+ // +-+-+-+-+-+-+-+-+
+ // N_G: | T |U| R |-|-| N_G times
+ // +-+-+-+-+-+-+-+-+
+ // | P_DIFF | R times
+ // +-+-+-+-+-+-+-+-+
+ void VerifySsData(const RTPVideoHeaderVP9& vp9) const {
+ EXPECT_TRUE(vp9.ss_data_available); // V
+ EXPECT_EQ(params_.num_spatial_layers, // N_S + 1
+ vp9.num_spatial_layers);
+ EXPECT_TRUE(vp9.spatial_layer_resolution_present); // Y:1
+
+ ScalableVideoController::StreamLayersConfig config = GetScalabilityConfig();
+ for (int i = config.num_spatial_layers - 1; i >= 0; --i) {
+ double ratio = static_cast<double>(config.scaling_factor_num[i]) /
+ config.scaling_factor_den[i];
+ EXPECT_EQ(expected_width_ * ratio, vp9.width[i]); // WIDTH
+ EXPECT_EQ(expected_height_ * ratio, vp9.height[i]); // HEIGHT
+ }
+ }
+
+ void CompareConsecutiveFrames(const RtpPacket& rtp_packet,
+ const RTPVideoHeader& video) const {
+ const auto& vp9_header =
+ absl::get<RTPVideoHeaderVP9>(video.video_type_header);
+
+ const bool new_temporal_unit =
+ !last_packet_timestamp_.has_value() ||
+ IsNewerTimestamp(rtp_packet.Timestamp(), *last_packet_timestamp_);
+ const bool new_frame =
+ new_temporal_unit || last_vp9_.spatial_idx != vp9_header.spatial_idx;
+
+ EXPECT_EQ(new_frame, video.is_first_packet_in_frame);
+ if (!new_temporal_unit) {
+ EXPECT_FALSE(last_packet_marker_);
+ EXPECT_EQ(*last_packet_timestamp_, rtp_packet.Timestamp());
+ EXPECT_EQ(last_vp9_.picture_id, vp9_header.picture_id);
+ EXPECT_EQ(last_vp9_.tl0_pic_idx, vp9_header.tl0_pic_idx);
+ VerifySpatialIdxWithinFrame(vp9_header);
+ VerifyTemporalIdxWithinFrame(vp9_header);
+ return;
+ }
+ // New frame.
+ EXPECT_TRUE(vp9_header.beginning_of_frame);
+
+ // Compare with last packet in previous frame.
+ if (frames_sent_ == 0)
+ return;
+ EXPECT_TRUE(last_vp9_.end_of_frame);
+ EXPECT_TRUE(last_packet_marker_);
+ EXPECT_TRUE(ContinuousPictureId(vp9_header));
+ VerifyTl0Idx(vp9_header);
+ }
+
+ ScalableVideoController::StreamLayersConfig GetScalabilityConfig() const {
+ absl::optional<ScalabilityMode> scalability_mode =
+ ScalabilityModeFromString(params_.scalability_mode);
+ EXPECT_TRUE(scalability_mode.has_value());
+ absl::optional<ScalableVideoController::StreamLayersConfig> config =
+ ScalabilityStructureConfig(*scalability_mode);
+ EXPECT_TRUE(config.has_value());
+ EXPECT_EQ(config->num_spatial_layers, params_.num_spatial_layers);
+ return *config;
+ }
+
+ test::FunctionVideoEncoderFactory encoder_factory_;
+ const Vp9TestParams params_;
+ VideoCodecVP9 vp9_settings_;
+ webrtc::VideoEncoderConfig encoder_config_;
+ bool last_packet_marker_ = false;
+ absl::optional<uint16_t> last_packet_sequence_number_;
+ absl::optional<uint32_t> last_packet_timestamp_;
+ RTPVideoHeaderVP9 last_vp9_;
+ std::map<int, int> last_temporal_idx_by_spatial_idx_;
+ Mutex mutex_;
+ size_t frames_sent_ = 0;
+ int expected_width_ = 0;
+ int expected_height_ = 0;
+};
+
+class Vp9Test : public VideoSendStreamTest,
+ public ::testing::WithParamInterface<ParameterizationType> {
+ public:
+ Vp9Test()
+ : params_(::testing::get<Vp9TestParams>(GetParam())),
+ use_scalability_mode_identifier_(::testing::get<bool>(GetParam())) {}
+
+ protected:
+ const Vp9TestParams params_;
+ const bool use_scalability_mode_identifier_;
+};
+
+INSTANTIATE_TEST_SUITE_P(
+ ScalabilityMode,
+ Vp9Test,
+ ::testing::Combine(
+ ::testing::ValuesIn<Vp9TestParams>(
+ {{"L1T1", 1, 1, InterLayerPredMode::kOn},
+ {"L1T2", 1, 2, InterLayerPredMode::kOn},
+ {"L1T3", 1, 3, InterLayerPredMode::kOn},
+ {"L2T1", 2, 1, InterLayerPredMode::kOn},
+ {"L2T1_KEY", 2, 1, InterLayerPredMode::kOnKeyPic},
+ {"L2T2", 2, 2, InterLayerPredMode::kOn},
+ {"L2T2_KEY", 2, 2, InterLayerPredMode::kOnKeyPic},
+ {"L2T3", 2, 3, InterLayerPredMode::kOn},
+ {"L2T3_KEY", 2, 3, InterLayerPredMode::kOnKeyPic},
+ {"L3T1", 3, 1, InterLayerPredMode::kOn},
+ {"L3T1_KEY", 3, 1, InterLayerPredMode::kOnKeyPic},
+ {"L3T2", 3, 2, InterLayerPredMode::kOn},
+ {"L3T2_KEY", 3, 2, InterLayerPredMode::kOnKeyPic},
+ {"L3T3", 3, 3, InterLayerPredMode::kOn},
+ {"L3T3_KEY", 3, 3, InterLayerPredMode::kOnKeyPic},
+ {"S2T1", 2, 1, InterLayerPredMode::kOff},
+ {"S2T2", 2, 2, InterLayerPredMode::kOff},
+ {"S2T3", 2, 3, InterLayerPredMode::kOff},
+ {"S3T1", 3, 1, InterLayerPredMode::kOff},
+ {"S3T2", 3, 2, InterLayerPredMode::kOff},
+ {"S3T3", 3, 3, InterLayerPredMode::kOff}}),
+ ::testing::Values(false, true)), // use_scalability_mode_identifier
+ ParamInfoToStr);
+
+INSTANTIATE_TEST_SUITE_P(
+ ScalabilityModeOn,
+ Vp9Test,
+ ::testing::Combine(
+ ::testing::ValuesIn<Vp9TestParams>(
+ {{"L2T1h", 2, 1, InterLayerPredMode::kOn},
+ {"L2T2h", 2, 2, InterLayerPredMode::kOn},
+ {"L2T3h", 2, 3, InterLayerPredMode::kOn},
+ {"L2T2_KEY_SHIFT", 2, 2, InterLayerPredMode::kOnKeyPic},
+ {"L3T1h", 3, 1, InterLayerPredMode::kOn},
+ {"L3T2h", 3, 2, InterLayerPredMode::kOn},
+ {"L3T3h", 3, 3, InterLayerPredMode::kOn},
+ {"S2T1h", 2, 1, InterLayerPredMode::kOff},
+ {"S2T2h", 2, 2, InterLayerPredMode::kOff},
+ {"S2T3h", 2, 3, InterLayerPredMode::kOff},
+ {"S3T1h", 3, 1, InterLayerPredMode::kOff},
+ {"S3T2h", 3, 2, InterLayerPredMode::kOff},
+ {"S3T3h", 3, 3, InterLayerPredMode::kOff}}),
+ ::testing::Values(true)), // use_scalability_mode_identifier
+ ParamInfoToStr);
+
+TEST_P(Vp9Test, NonFlexMode) {
+ TestVp9NonFlexMode(params_, use_scalability_mode_identifier_);
+}
+
+void VideoSendStreamTest::TestVp9NonFlexMode(
+ const Vp9TestParams& params,
+ bool use_scalability_mode_identifier) {
+ static const size_t kNumFramesToSend = 100;
+ // Set to < kNumFramesToSend and coprime to length of temporal layer
+ // structures to verify temporal id reset on key frame.
+ static const int kKeyFrameInterval = 31;
+
+ static const int kWidth = kMinVp9SpatialLayerLongSideLength;
+ static const int kHeight = kMinVp9SpatialLayerShortSideLength;
+ static const float kGoodBitsPerPixel = 0.1f;
+ class NonFlexibleMode : public Vp9HeaderObserver {
+ public:
+ NonFlexibleMode(const Vp9TestParams& params,
+ bool use_scalability_mode_identifier)
+ : Vp9HeaderObserver(params),
+ use_scalability_mode_identifier_(use_scalability_mode_identifier),
+ l_field_(params.num_temporal_layers > 1 ||
+ params.num_spatial_layers > 1) {}
+
+ void ModifyVideoConfigsHook(
+ VideoSendStream::Config* send_config,
+ std::vector<VideoReceiveStreamInterface::Config>* receive_configs,
+ VideoEncoderConfig* encoder_config) override {
+ encoder_config->codec_type = kVideoCodecVP9;
+ int bitrate_bps = 0;
+ for (int sl_idx = 0; sl_idx < params_.num_spatial_layers; ++sl_idx) {
+ const int width = kWidth << sl_idx;
+ const int height = kHeight << sl_idx;
+ const float bpp = kGoodBitsPerPixel / (1 << sl_idx);
+ bitrate_bps += static_cast<int>(width * height * bpp * 30);
+ }
+ encoder_config->max_bitrate_bps = bitrate_bps * 2;
+
+ encoder_config->frame_drop_enabled = false;
+
+ vp9_settings_.flexibleMode = false;
+ vp9_settings_.automaticResizeOn = false;
+ vp9_settings_.keyFrameInterval = kKeyFrameInterval;
+ if (!use_scalability_mode_identifier_) {
+ vp9_settings_.numberOfTemporalLayers = params_.num_temporal_layers;
+ vp9_settings_.numberOfSpatialLayers = params_.num_spatial_layers;
+ vp9_settings_.interLayerPred = params_.inter_layer_pred;
+ } else {
+ absl::optional<ScalabilityMode> mode =
+ ScalabilityModeFromString(params_.scalability_mode);
+ encoder_config->simulcast_layers[0].scalability_mode = mode;
+ EXPECT_TRUE(mode.has_value());
+ }
+ }
+
+ int GetRequiredDivisibility() const {
+ ScalableVideoController::StreamLayersConfig config =
+ GetScalabilityConfig();
+ int required_divisibility = 1;
+ for (int sl_idx = 0; sl_idx < config.num_spatial_layers; ++sl_idx) {
+ required_divisibility = cricket::LeastCommonMultiple(
+ required_divisibility, config.scaling_factor_den[sl_idx]);
+ }
+ return required_divisibility;
+ }
+
+ void ModifyVideoCaptureStartResolution(int* width,
+ int* height,
+ int* frame_rate) override {
+ expected_width_ = kWidth << (params_.num_spatial_layers - 1);
+ expected_height_ = kHeight << (params_.num_spatial_layers - 1);
+ *width = expected_width_;
+ *height = expected_height_;
+ // Top layer may be adjusted to ensure evenly divided layers.
+ int divisibility = GetRequiredDivisibility();
+ expected_width_ -= (expected_width_ % divisibility);
+ expected_height_ -= (expected_height_ % divisibility);
+ }
+
+ void InspectHeader(const RTPVideoHeaderVP9& vp9) override {
+ bool ss_data_expected = !vp9.inter_pic_predicted &&
+ vp9.beginning_of_frame &&
+ !vp9.inter_layer_predicted;
+ EXPECT_EQ(ss_data_expected, vp9.ss_data_available);
+
+ bool is_key_frame = frames_sent_ % kKeyFrameInterval == 0;
+ if (params_.num_spatial_layers > 1) {
+ switch (params_.inter_layer_pred) {
+ case InterLayerPredMode::kOff:
+ EXPECT_FALSE(vp9.inter_layer_predicted);
+ break;
+ case InterLayerPredMode::kOn:
+ EXPECT_EQ(vp9.spatial_idx > 0, vp9.inter_layer_predicted);
+ break;
+ case InterLayerPredMode::kOnKeyPic:
+ EXPECT_EQ(is_key_frame && vp9.spatial_idx > 0,
+ vp9.inter_layer_predicted);
+ break;
+ }
+ } else {
+ EXPECT_FALSE(vp9.inter_layer_predicted);
+ }
+
+ EXPECT_EQ(is_key_frame, !vp9.inter_pic_predicted);
+
+ if (IsNewPictureId(vp9)) {
+ if (params_.num_temporal_layers == 1 &&
+ params_.num_spatial_layers == 1) {
+ EXPECT_EQ(kNoSpatialIdx, vp9.spatial_idx);
+ } else {
+ EXPECT_EQ(0, vp9.spatial_idx);
+ }
+ if (params_.num_spatial_layers > 1)
+ EXPECT_EQ(params_.num_spatial_layers - 1, last_vp9_.spatial_idx);
+ }
+
+ VerifyFixedTemporalLayerStructure(
+ vp9, l_field_ ? params_.num_temporal_layers : 0);
+
+ if (frames_sent_ > kNumFramesToSend)
+ observation_complete_.Set();
+ }
+ const bool use_scalability_mode_identifier_;
+ const bool l_field_;
+
+ private:
+ void ModifySenderBitrateConfig(
+ BitrateConstraints* bitrate_config) override {
+ const int kBitrateBps = 800000;
+ bitrate_config->min_bitrate_bps = kBitrateBps;
+ bitrate_config->start_bitrate_bps = kBitrateBps;
+ }
+ } test(params, use_scalability_mode_identifier);
+
+ RunBaseTest(&test);
+}
+
+TEST_F(VideoSendStreamTest, Vp9NonFlexModeSmallResolution) {
+ static const size_t kNumFramesToSend = 50;
+ static const int kWidth = 4;
+ static const int kHeight = 4;
+ class NonFlexibleModeResolution : public Vp9HeaderObserver {
+ public:
+ explicit NonFlexibleModeResolution(const Vp9TestParams& params)
+ : Vp9HeaderObserver(params) {}
+
+ private:
+ void ModifyVideoConfigsHook(
+ VideoSendStream::Config* send_config,
+ std::vector<VideoReceiveStreamInterface::Config>* receive_configs,
+ VideoEncoderConfig* encoder_config) override {
+ encoder_config->codec_type = kVideoCodecVP9;
+ vp9_settings_.flexibleMode = false;
+ vp9_settings_.numberOfTemporalLayers = params_.num_temporal_layers;
+ vp9_settings_.numberOfSpatialLayers = params_.num_spatial_layers;
+ vp9_settings_.interLayerPred = params_.inter_layer_pred;
+ }
+
+ void InspectHeader(const RTPVideoHeaderVP9& vp9_header) override {
+ if (frames_sent_ > kNumFramesToSend)
+ observation_complete_.Set();
+ }
+
+ void ModifyVideoCaptureStartResolution(int* width,
+ int* height,
+ int* frame_rate) override {
+ expected_width_ = kWidth;
+ expected_height_ = kHeight;
+ *width = kWidth;
+ *height = kHeight;
+ }
+ };
+
+ Vp9TestParams params{"L1T1", 1, 1, InterLayerPredMode::kOn};
+ NonFlexibleModeResolution test(params);
+
+ RunBaseTest(&test);
+}
+
+#if defined(WEBRTC_ANDROID)
+// Crashes on Android; bugs.webrtc.org/7401
+#define MAYBE_Vp9FlexModeRefCount DISABLED_Vp9FlexModeRefCount
+#else
+// TODO(webrtc:9270): Support of flexible mode is temporarily disabled. Enable
+// the test after webrtc:9270 is implemented.
+#define MAYBE_Vp9FlexModeRefCount DISABLED_Vp9FlexModeRefCount
+// #define MAYBE_Vp9FlexModeRefCount Vp9FlexModeRefCount
+#endif
+TEST_F(VideoSendStreamTest, MAYBE_Vp9FlexModeRefCount) {
+ class FlexibleMode : public Vp9HeaderObserver {
+ public:
+ explicit FlexibleMode(const Vp9TestParams& params)
+ : Vp9HeaderObserver(params) {}
+
+ private:
+ void ModifyVideoConfigsHook(
+ VideoSendStream::Config* send_config,
+ std::vector<VideoReceiveStreamInterface::Config>* receive_configs,
+ VideoEncoderConfig* encoder_config) override {
+ encoder_config->codec_type = kVideoCodecVP9;
+ encoder_config->content_type = VideoEncoderConfig::ContentType::kScreen;
+ vp9_settings_.flexibleMode = true;
+ vp9_settings_.numberOfTemporalLayers = params_.num_temporal_layers;
+ vp9_settings_.numberOfSpatialLayers = params_.num_spatial_layers;
+ vp9_settings_.interLayerPred = params_.inter_layer_pred;
+ }
+
+ void InspectHeader(const RTPVideoHeaderVP9& vp9_header) override {
+ EXPECT_TRUE(vp9_header.flexible_mode);
+ EXPECT_EQ(kNoTl0PicIdx, vp9_header.tl0_pic_idx);
+ if (vp9_header.inter_pic_predicted) {
+ EXPECT_GT(vp9_header.num_ref_pics, 0u);
+ observation_complete_.Set();
+ }
+ }
+ };
+
+ Vp9TestParams params{"L2T1", 2, 1, InterLayerPredMode::kOn};
+ FlexibleMode test(params);
+
+ RunBaseTest(&test);
+}
+#endif // defined(RTC_ENABLE_VP9)
+
+void VideoSendStreamTest::TestRequestSourceRotateVideo(
+ bool support_orientation_ext) {
+ CreateSenderCall();
+
+ test::NullTransport transport;
+ CreateSendConfig(1, 0, 0, &transport);
+ GetVideoSendConfig()->rtp.extensions.clear();
+ if (support_orientation_ext) {
+ GetVideoSendConfig()->rtp.extensions.push_back(
+ RtpExtension(RtpExtension::kVideoRotationUri, 1));
+ }
+
+ CreateVideoStreams();
+ test::FrameForwarder forwarder;
+ GetVideoSendStream()->SetSource(&forwarder,
+ DegradationPreference::MAINTAIN_FRAMERATE);
+
+ EXPECT_TRUE(forwarder.sink_wants().rotation_applied !=
+ support_orientation_ext);
+
+ DestroyStreams();
+}
+
+TEST_F(VideoSendStreamTest,
+ RequestSourceRotateIfVideoOrientationExtensionNotSupported) {
+ TestRequestSourceRotateVideo(false);
+}
+
+TEST_F(VideoSendStreamTest,
+ DoNotRequestsRotationIfVideoOrientationExtensionSupported) {
+ TestRequestSourceRotateVideo(true);
+}
+
+TEST_F(VideoSendStreamTest, EncoderConfigMaxFramerateReportedToSource) {
+ static const int kMaxFps = 22;
+ class FpsObserver : public test::SendTest,
+ public test::FrameGeneratorCapturer::SinkWantsObserver {
+ public:
+ FpsObserver() : SendTest(kDefaultTimeout) {}
+
+ void OnFrameGeneratorCapturerCreated(
+ test::FrameGeneratorCapturer* frame_generator_capturer) override {
+ frame_generator_capturer->SetSinkWantsObserver(this);
+ }
+
+ void OnSinkWantsChanged(rtc::VideoSinkInterface<VideoFrame>* sink,
+ const rtc::VideoSinkWants& wants) override {
+ if (wants.max_framerate_fps == kMaxFps)
+ observation_complete_.Set();
+ }
+
+ void ModifyVideoConfigs(
+ VideoSendStream::Config* send_config,
+ std::vector<VideoReceiveStreamInterface::Config>* receive_configs,
+ VideoEncoderConfig* encoder_config) override {
+ encoder_config->simulcast_layers[0].max_framerate = kMaxFps;
+ }
+
+ void PerformTest() override {
+ EXPECT_TRUE(Wait()) << "Timed out while waiting for fps to be reported.";
+ }
+ } test;
+
+ RunBaseTest(&test);
+}
+
+// This test verifies that overhead is removed from the bandwidth estimate by
+// testing that the maximum possible target payload rate is smaller than the
+// maximum bandwidth estimate by the overhead rate.
+TEST_F(VideoSendStreamTest, RemoveOverheadFromBandwidth) {
+ class RemoveOverheadFromBandwidthTest : public test::EndToEndTest,
+ public test::FakeEncoder {
+ public:
+ explicit RemoveOverheadFromBandwidthTest(TaskQueueBase* task_queue)
+ : EndToEndTest(test::CallTest::kDefaultTimeout),
+ FakeEncoder(Clock::GetRealTimeClock()),
+ task_queue_(task_queue),
+ encoder_factory_(this),
+ call_(nullptr),
+ max_bitrate_bps_(0),
+ first_packet_sent_(false) {}
+
+ void SetRates(const RateControlParameters& parameters) override {
+ MutexLock lock(&mutex_);
+ // Wait for the first sent packet so that videosendstream knows
+ // rtp_overhead.
+ if (first_packet_sent_) {
+ max_bitrate_bps_ = parameters.bitrate.get_sum_bps();
+ bitrate_changed_event_.Set();
+ }
+ return FakeEncoder::SetRates(parameters);
+ }
+
+ void OnCallsCreated(Call* sender_call, Call* receiver_call) override {
+ call_ = sender_call;
+ }
+
+ void ModifyVideoConfigs(
+ VideoSendStream::Config* send_config,
+ std::vector<VideoReceiveStreamInterface::Config>* receive_configs,
+ VideoEncoderConfig* encoder_config) override {
+ send_config->rtp.max_packet_size = 1200;
+ send_config->encoder_settings.encoder_factory = &encoder_factory_;
+ EXPECT_FALSE(send_config->rtp.extensions.empty());
+ }
+
+ Action OnSendRtp(const uint8_t* packet, size_t length) override {
+ MutexLock lock(&mutex_);
+ first_packet_sent_ = true;
+ return SEND_PACKET;
+ }
+
+ void PerformTest() override {
+ BitrateConstraints bitrate_config;
+ constexpr int kStartBitrateBps = 60000;
+ constexpr int kMaxBitrateBps = 60000;
+ constexpr int kMinBitrateBps = 10000;
+ bitrate_config.start_bitrate_bps = kStartBitrateBps;
+ bitrate_config.max_bitrate_bps = kMaxBitrateBps;
+ bitrate_config.min_bitrate_bps = kMinBitrateBps;
+ SendTask(task_queue_, [this, &bitrate_config]() {
+ call_->GetTransportControllerSend()->SetSdpBitrateParameters(
+ bitrate_config);
+ call_->GetTransportControllerSend()->OnTransportOverheadChanged(40);
+ });
+
+ // At a bitrate of 60kbps with a packet size of 1200B video and an
+ // overhead of 40B per packet video produces 2240bps overhead.
+ // So the encoder BW should be set to 57760bps.
+ EXPECT_TRUE(
+ bitrate_changed_event_.Wait(VideoSendStreamTest::kDefaultTimeout));
+ {
+ MutexLock lock(&mutex_);
+ EXPECT_LE(max_bitrate_bps_, 57760u);
+ }
+ }
+
+ private:
+ TaskQueueBase* const task_queue_;
+ test::VideoEncoderProxyFactory encoder_factory_;
+ Call* call_;
+ Mutex mutex_;
+ uint32_t max_bitrate_bps_ RTC_GUARDED_BY(&mutex_);
+ bool first_packet_sent_ RTC_GUARDED_BY(&mutex_);
+ rtc::Event bitrate_changed_event_;
+ } test(task_queue());
+ RunBaseTest(&test);
+}
+
+class PacingFactorObserver : public test::SendTest {
+ public:
+ PacingFactorObserver(bool configure_send_side,
+ absl::optional<float> expected_pacing_factor)
+ : test::SendTest(VideoSendStreamTest::kDefaultTimeout),
+ configure_send_side_(configure_send_side),
+ expected_pacing_factor_(expected_pacing_factor) {}
+
+ void ModifyVideoConfigs(
+ VideoSendStream::Config* send_config,
+ std::vector<VideoReceiveStreamInterface::Config>* receive_configs,
+ VideoEncoderConfig* encoder_config) override {
+ // Check if send-side bwe extension is already present, and remove it if
+ // it is not desired.
+ bool has_send_side = false;
+ for (auto it = send_config->rtp.extensions.begin();
+ it != send_config->rtp.extensions.end(); ++it) {
+ if (it->uri == RtpExtension::kTransportSequenceNumberUri) {
+ if (configure_send_side_) {
+ has_send_side = true;
+ } else {
+ send_config->rtp.extensions.erase(it);
+ }
+ break;
+ }
+ }
+
+ if (configure_send_side_ && !has_send_side) {
+ rtc::UniqueNumberGenerator<int> unique_id_generator;
+ unique_id_generator.AddKnownId(0); // First valid RTP extension ID is 1.
+ for (const RtpExtension& extension : send_config->rtp.extensions) {
+ unique_id_generator.AddKnownId(extension.id);
+ }
+ // Want send side, not present by default, so add it.
+ send_config->rtp.extensions.emplace_back(
+ RtpExtension::kTransportSequenceNumberUri, unique_id_generator());
+ }
+
+ // ALR only enabled for screenshare.
+ encoder_config->content_type = VideoEncoderConfig::ContentType::kScreen;
+ }
+
+ void OnVideoStreamsCreated(VideoSendStream* send_stream,
+ const std::vector<VideoReceiveStreamInterface*>&
+ receive_streams) override {
+ auto internal_send_peer = test::VideoSendStreamPeer(send_stream);
+ // Video streams created, check that pacing factor is correctly configured.
+ EXPECT_EQ(expected_pacing_factor_,
+ internal_send_peer.GetPacingFactorOverride());
+ observation_complete_.Set();
+ }
+
+ void PerformTest() override {
+ EXPECT_TRUE(Wait()) << "Timed out while waiting for stream creation.";
+ }
+
+ private:
+ const bool configure_send_side_;
+ const absl::optional<float> expected_pacing_factor_;
+};
+
+std::string GetAlrProbingExperimentString() {
+ return std::string(
+ AlrExperimentSettings::kScreenshareProbingBweExperimentName) +
+ "/1.0,2875,80,40,-60,3/";
+}
+const float kAlrProbingExperimentPaceMultiplier = 1.0f;
+
+TEST_F(VideoSendStreamTest, AlrConfiguredWhenSendSideOn) {
+ test::ScopedFieldTrials alr_experiment(GetAlrProbingExperimentString());
+ // Send-side bwe on, use pacing factor from `kAlrProbingExperiment` above.
+ PacingFactorObserver test_with_send_side(true,
+ kAlrProbingExperimentPaceMultiplier);
+ RunBaseTest(&test_with_send_side);
+}
+
+TEST_F(VideoSendStreamTest, AlrNotConfiguredWhenSendSideOff) {
+ test::ScopedFieldTrials alr_experiment(GetAlrProbingExperimentString());
+ // Send-side bwe off, use configuration should not be overridden.
+ PacingFactorObserver test_without_send_side(false, absl::nullopt);
+ RunBaseTest(&test_without_send_side);
+}
+
+// Test class takes as argument a function pointer to reset the send
+// stream and call OnVideoStreamsCreated. This is necessary since you cannot
+// change the content type of a VideoSendStream, you need to recreate it.
+// Stopping and recreating the stream can only be done on the main thread and in
+// the context of VideoSendStreamTest (not BaseTest). The test switches from
+// realtime to screenshare and back.
+template <typename T>
+class ContentSwitchTest : public test::SendTest {
+ public:
+ enum class StreamState {
+ kBeforeSwitch = 0,
+ kInScreenshare = 1,
+ kAfterSwitchBack = 2,
+ };
+ static const uint32_t kMinPacketsToSend = 50;
+
+ explicit ContentSwitchTest(T* stream_reset_fun, TaskQueueBase* task_queue)
+ : SendTest(test::CallTest::kDefaultTimeout),
+ call_(nullptr),
+ state_(StreamState::kBeforeSwitch),
+ send_stream_(nullptr),
+ send_stream_config_(nullptr),
+ packets_sent_(0),
+ stream_resetter_(stream_reset_fun),
+ task_queue_(task_queue) {
+ RTC_DCHECK(stream_resetter_);
+ }
+
+ void OnVideoStreamsCreated(VideoSendStream* send_stream,
+ const std::vector<VideoReceiveStreamInterface*>&
+ receive_streams) override {
+ MutexLock lock(&mutex_);
+ send_stream_ = send_stream;
+ }
+
+ void ModifyVideoConfigs(
+ VideoSendStream::Config* send_config,
+ std::vector<VideoReceiveStreamInterface::Config>* receive_configs,
+ VideoEncoderConfig* encoder_config) override {
+ RTC_DCHECK_EQ(1, encoder_config->number_of_streams);
+ encoder_config->min_transmit_bitrate_bps = 0;
+ encoder_config->content_type =
+ VideoEncoderConfig::ContentType::kRealtimeVideo;
+ send_stream_config_ = send_config->Copy();
+ encoder_config_ = encoder_config->Copy();
+ }
+
+ void OnCallsCreated(Call* sender_call, Call* receiver_call) override {
+ call_ = sender_call;
+ }
+
+ void OnStreamsStopped() override {
+ MutexLock lock(&mutex_);
+ done_ = true;
+ }
+
+ Action OnSendRtp(const uint8_t* packet, size_t length) override {
+ task_queue_->PostTask([this]() {
+ MutexLock lock(&mutex_);
+ if (done_)
+ return;
+
+ auto internal_send_peer = test::VideoSendStreamPeer(send_stream_);
+ float pacing_factor =
+ internal_send_peer.GetPacingFactorOverride().value_or(0.0f);
+ float expected_pacing_factor = 1.1; // Strict pacing factor.
+ VideoSendStream::Stats stats = send_stream_->GetStats();
+ if (stats.content_type == webrtc::VideoContentType::SCREENSHARE) {
+ expected_pacing_factor = 1.0f; // Currently used pacing factor in ALR.
+ }
+
+ EXPECT_NEAR(expected_pacing_factor, pacing_factor, 1e-6);
+
+ // Wait until at least kMinPacketsToSend packets to be sent, so that
+ // some frames would be encoded.
+ if (++packets_sent_ < kMinPacketsToSend)
+ return;
+
+ if (state_ != StreamState::kAfterSwitchBack) {
+ // We've sent kMinPacketsToSend packets, switch the content type and
+ // move move to the next state. Note that we need to recreate the stream
+ // if changing content type.
+ packets_sent_ = 0;
+ if (encoder_config_.content_type ==
+ VideoEncoderConfig::ContentType::kRealtimeVideo) {
+ encoder_config_.content_type =
+ VideoEncoderConfig::ContentType::kScreen;
+ } else {
+ encoder_config_.content_type =
+ VideoEncoderConfig::ContentType::kRealtimeVideo;
+ }
+ switch (state_) {
+ case StreamState::kBeforeSwitch:
+ state_ = StreamState::kInScreenshare;
+ break;
+ case StreamState::kInScreenshare:
+ state_ = StreamState::kAfterSwitchBack;
+ break;
+ case StreamState::kAfterSwitchBack:
+ RTC_DCHECK_NOTREACHED();
+ break;
+ }
+ content_switch_event_.Set();
+ return;
+ }
+ observation_complete_.Set();
+ });
+
+ return SEND_PACKET;
+ }
+
+ void PerformTest() override {
+ while (GetStreamState() != StreamState::kAfterSwitchBack) {
+ ASSERT_TRUE(content_switch_event_.Wait(test::CallTest::kDefaultTimeout));
+ (*stream_resetter_)(send_stream_config_, encoder_config_, this);
+ }
+
+ ASSERT_TRUE(Wait())
+ << "Timed out waiting for a frame sent after switch back";
+ }
+
+ private:
+ StreamState GetStreamState() {
+ MutexLock lock(&mutex_);
+ return state_;
+ }
+
+ Mutex mutex_;
+ rtc::Event content_switch_event_;
+ Call* call_;
+ bool done_ RTC_GUARDED_BY(mutex_) = false;
+ StreamState state_ RTC_GUARDED_BY(mutex_);
+ VideoSendStream* send_stream_ RTC_GUARDED_BY(mutex_);
+ VideoSendStream::Config send_stream_config_;
+ VideoEncoderConfig encoder_config_;
+ uint32_t packets_sent_ RTC_GUARDED_BY(mutex_);
+ T* stream_resetter_;
+ TaskQueueBase* task_queue_;
+};
+
+TEST_F(VideoSendStreamTest, SwitchesToScreenshareAndBack) {
+ auto reset_fun = [this](const VideoSendStream::Config& send_stream_config,
+ const VideoEncoderConfig& encoder_config,
+ test::BaseTest* test) {
+ SendTask(task_queue(),
+ [this, &send_stream_config, &encoder_config, &test]() {
+ Stop();
+ DestroyVideoSendStreams();
+ SetVideoSendConfig(send_stream_config);
+ SetVideoEncoderConfig(encoder_config);
+ CreateVideoSendStreams();
+ SetVideoDegradation(DegradationPreference::MAINTAIN_RESOLUTION);
+ test->OnVideoStreamsCreated(GetVideoSendStream(),
+ video_receive_streams_);
+ Start();
+ });
+ };
+ ContentSwitchTest<decltype(reset_fun)> test(&reset_fun, task_queue());
+ RunBaseTest(&test);
+}
+
+void VideoSendStreamTest::TestTemporalLayers(
+ VideoEncoderFactory* encoder_factory,
+ const std::string& payload_name,
+ const std::vector<int>& num_temporal_layers,
+ const std::vector<ScalabilityMode>& scalability_mode) {
+ static constexpr int kMaxBitrateBps = 1000000;
+ static constexpr int kMinFramesToObservePerStream = 8;
+
+ class TemporalLayerObserver
+ : public test::EndToEndTest,
+ public test::FrameGeneratorCapturer::SinkWantsObserver {
+ public:
+ TemporalLayerObserver(VideoEncoderFactory* encoder_factory,
+ const std::string& payload_name,
+ const std::vector<int>& num_temporal_layers,
+ const std::vector<ScalabilityMode>& scalability_mode)
+ : EndToEndTest(kDefaultTimeout),
+ encoder_factory_(encoder_factory),
+ payload_name_(payload_name),
+ num_temporal_layers_(num_temporal_layers),
+ scalability_mode_(scalability_mode),
+ depacketizer_(CreateVideoRtpDepacketizer(
+ PayloadStringToCodecType(payload_name))) {}
+
+ private:
+ void OnFrameGeneratorCapturerCreated(
+ test::FrameGeneratorCapturer* frame_generator_capturer) override {
+ frame_generator_capturer->ChangeResolution(640, 360);
+ }
+
+ void OnSinkWantsChanged(rtc::VideoSinkInterface<VideoFrame>* sink,
+ const rtc::VideoSinkWants& wants) override {}
+
+ void ModifySenderBitrateConfig(
+ BitrateConstraints* bitrate_config) override {
+ bitrate_config->start_bitrate_bps = kMaxBitrateBps / 2;
+ }
+
+ size_t GetNumVideoStreams() const override {
+ if (scalability_mode_.empty()) {
+ return num_temporal_layers_.size();
+ } else {
+ return scalability_mode_.size();
+ }
+ }
+
+ void ModifyVideoConfigs(
+ VideoSendStream::Config* send_config,
+ std::vector<VideoReceiveStreamInterface::Config>* receive_configs,
+ VideoEncoderConfig* encoder_config) override {
+ webrtc::VideoEncoder::EncoderInfo encoder_info;
+ send_config->encoder_settings.encoder_factory = encoder_factory_;
+ send_config->rtp.payload_name = payload_name_;
+ send_config->rtp.payload_type = test::CallTest::kVideoSendPayloadType;
+ encoder_config->video_format.name = payload_name_;
+ encoder_config->codec_type = PayloadStringToCodecType(payload_name_);
+ encoder_config->video_stream_factory =
+ rtc::make_ref_counted<cricket::EncoderStreamFactory>(
+ payload_name_, /*max_qp=*/56, /*is_screenshare=*/false,
+ /*conference_mode=*/false, encoder_info);
+ encoder_config->max_bitrate_bps = kMaxBitrateBps;
+ if (absl::EqualsIgnoreCase(payload_name_, "VP9")) {
+ encoder_config->encoder_specific_settings = rtc::make_ref_counted<
+ VideoEncoderConfig::Vp9EncoderSpecificSettings>(
+ VideoEncoder::GetDefaultVp9Settings());
+ }
+ if (scalability_mode_.empty()) {
+ for (size_t i = 0; i < num_temporal_layers_.size(); ++i) {
+ VideoStream& stream = encoder_config->simulcast_layers[i];
+ stream.num_temporal_layers = num_temporal_layers_[i];
+ configured_num_temporal_layers_[send_config->rtp.ssrcs[i]] =
+ num_temporal_layers_[i];
+ }
+ } else {
+ for (size_t i = 0; i < scalability_mode_.size(); ++i) {
+ VideoStream& stream = encoder_config->simulcast_layers[i];
+ stream.scalability_mode = scalability_mode_[i];
+
+ configured_num_temporal_layers_[send_config->rtp.ssrcs[i]] =
+ ScalabilityModeToNumTemporalLayers(scalability_mode_[i]);
+ }
+ }
+ }
+
+ struct ParsedPacket {
+ uint32_t timestamp;
+ uint32_t ssrc;
+ int temporal_idx;
+ };
+
+ bool ParsePayload(const uint8_t* packet,
+ size_t length,
+ ParsedPacket& parsed) const {
+ RtpPacket rtp_packet;
+ EXPECT_TRUE(rtp_packet.Parse(packet, length));
+
+ if (rtp_packet.payload_size() == 0) {
+ return false; // Padding packet.
+ }
+ parsed.timestamp = rtp_packet.Timestamp();
+ parsed.ssrc = rtp_packet.Ssrc();
+
+ absl::optional<VideoRtpDepacketizer::ParsedRtpPayload> parsed_payload =
+ depacketizer_->Parse(rtp_packet.PayloadBuffer());
+ EXPECT_TRUE(parsed_payload);
+
+ if (const auto* vp8_header = absl::get_if<RTPVideoHeaderVP8>(
+ &parsed_payload->video_header.video_type_header)) {
+ parsed.temporal_idx = vp8_header->temporalIdx;
+ } else if (const auto* vp9_header = absl::get_if<RTPVideoHeaderVP9>(
+ &parsed_payload->video_header.video_type_header)) {
+ parsed.temporal_idx = vp9_header->temporal_idx;
+ } else {
+ RTC_DCHECK_NOTREACHED();
+ }
+ return true;
+ }
+
+ Action OnSendRtp(const uint8_t* packet, size_t length) override {
+ ParsedPacket parsed;
+ if (!ParsePayload(packet, length, parsed))
+ return SEND_PACKET;
+
+ uint32_t ssrc = parsed.ssrc;
+ int temporal_idx =
+ parsed.temporal_idx == kNoTemporalIdx ? 0 : parsed.temporal_idx;
+ max_observed_tl_idxs_[ssrc] =
+ std::max(temporal_idx, max_observed_tl_idxs_[ssrc]);
+
+ if (last_observed_packet_.count(ssrc) == 0 ||
+ parsed.timestamp != last_observed_packet_[ssrc].timestamp) {
+ num_observed_frames_[ssrc]++;
+ }
+ last_observed_packet_[ssrc] = parsed;
+
+ if (HighestTemporalLayerSentPerStream())
+ observation_complete_.Set();
+
+ return SEND_PACKET;
+ }
+
+ bool HighestTemporalLayerSentPerStream() const {
+ if (num_observed_frames_.size() !=
+ configured_num_temporal_layers_.size()) {
+ return false;
+ }
+ for (const auto& num_frames : num_observed_frames_) {
+ if (num_frames.second < kMinFramesToObservePerStream) {
+ return false;
+ }
+ }
+ if (max_observed_tl_idxs_.size() !=
+ configured_num_temporal_layers_.size()) {
+ return false;
+ }
+ for (const auto& max_tl_idx : max_observed_tl_idxs_) {
+ uint32_t ssrc = max_tl_idx.first;
+ int configured_num_tls =
+ configured_num_temporal_layers_.find(ssrc)->second;
+ if (max_tl_idx.second != configured_num_tls - 1)
+ return false;
+ }
+ return true;
+ }
+
+ void PerformTest() override { EXPECT_TRUE(Wait()); }
+
+ VideoEncoderFactory* const encoder_factory_;
+ const std::string payload_name_;
+ const std::vector<int> num_temporal_layers_;
+ const std::vector<ScalabilityMode> scalability_mode_;
+ const std::unique_ptr<VideoRtpDepacketizer> depacketizer_;
+ // Mapped by SSRC.
+ std::map<uint32_t, int> configured_num_temporal_layers_;
+ std::map<uint32_t, int> max_observed_tl_idxs_;
+ std::map<uint32_t, int> num_observed_frames_;
+ std::map<uint32_t, ParsedPacket> last_observed_packet_;
+ } test(encoder_factory, payload_name, num_temporal_layers, scalability_mode);
+
+ RunBaseTest(&test);
+}
+
+TEST_F(VideoSendStreamTest, TestTemporalLayersVp8) {
+ InternalEncoderFactory internal_encoder_factory;
+ test::FunctionVideoEncoderFactory encoder_factory(
+ [&internal_encoder_factory]() {
+ return std::make_unique<SimulcastEncoderAdapter>(
+ &internal_encoder_factory, SdpVideoFormat("VP8"));
+ });
+
+ TestTemporalLayers(&encoder_factory, "VP8",
+ /*num_temporal_layers=*/{2},
+ /*scalability_mode=*/{});
+}
+
+TEST_F(VideoSendStreamTest, TestTemporalLayersVp8Simulcast) {
+ InternalEncoderFactory internal_encoder_factory;
+ test::FunctionVideoEncoderFactory encoder_factory(
+ [&internal_encoder_factory]() {
+ return std::make_unique<SimulcastEncoderAdapter>(
+ &internal_encoder_factory, SdpVideoFormat("VP8"));
+ });
+
+ TestTemporalLayers(&encoder_factory, "VP8",
+ /*num_temporal_layers=*/{2, 2},
+ /*scalability_mode=*/{});
+}
+
+TEST_F(VideoSendStreamTest, TestTemporalLayersVp8SimulcastWithDifferentNumTls) {
+ InternalEncoderFactory internal_encoder_factory;
+ test::FunctionVideoEncoderFactory encoder_factory(
+ [&internal_encoder_factory]() {
+ return std::make_unique<SimulcastEncoderAdapter>(
+ &internal_encoder_factory, SdpVideoFormat("VP8"));
+ });
+
+ TestTemporalLayers(&encoder_factory, "VP8",
+ /*num_temporal_layers=*/{3, 1},
+ /*scalability_mode=*/{});
+}
+
+TEST_F(VideoSendStreamTest, TestTemporalLayersVp8SimulcastWithoutSimAdapter) {
+ test::FunctionVideoEncoderFactory encoder_factory(
+ []() { return VP8Encoder::Create(); });
+
+ TestTemporalLayers(&encoder_factory, "VP8",
+ /*num_temporal_layers=*/{2, 2},
+ /*scalability_mode=*/{});
+}
+
+TEST_F(VideoSendStreamTest, TestScalabilityModeVp8L1T2) {
+ InternalEncoderFactory internal_encoder_factory;
+ test::FunctionVideoEncoderFactory encoder_factory(
+ [&internal_encoder_factory]() {
+ return std::make_unique<SimulcastEncoderAdapter>(
+ &internal_encoder_factory, SdpVideoFormat("VP8"));
+ });
+
+ TestTemporalLayers(&encoder_factory, "VP8",
+ /*num_temporal_layers=*/{}, {ScalabilityMode::kL1T2});
+}
+
+TEST_F(VideoSendStreamTest, TestScalabilityModeVp8Simulcast) {
+ InternalEncoderFactory internal_encoder_factory;
+ test::FunctionVideoEncoderFactory encoder_factory(
+ [&internal_encoder_factory]() {
+ return std::make_unique<SimulcastEncoderAdapter>(
+ &internal_encoder_factory, SdpVideoFormat("VP8"));
+ });
+
+ TestTemporalLayers(&encoder_factory, "VP8",
+ /*num_temporal_layers=*/{},
+ {ScalabilityMode::kL1T2, ScalabilityMode::kL1T2});
+}
+
+TEST_F(VideoSendStreamTest, TestScalabilityModeVp8SimulcastWithDifferentMode) {
+ InternalEncoderFactory internal_encoder_factory;
+ test::FunctionVideoEncoderFactory encoder_factory(
+ [&internal_encoder_factory]() {
+ return std::make_unique<SimulcastEncoderAdapter>(
+ &internal_encoder_factory, SdpVideoFormat("VP8"));
+ });
+
+ TestTemporalLayers(&encoder_factory, "VP8",
+ /*num_temporal_layers=*/{},
+ {ScalabilityMode::kL1T3, ScalabilityMode::kL1T1});
+}
+
+TEST_F(VideoSendStreamTest, TestScalabilityModeVp8SimulcastWithoutSimAdapter) {
+ test::FunctionVideoEncoderFactory encoder_factory(
+ []() { return VP8Encoder::Create(); });
+
+ TestTemporalLayers(&encoder_factory, "VP8",
+ /*num_temporal_layers=*/{},
+ {ScalabilityMode::kL1T2, ScalabilityMode::kL1T2});
+}
+
+TEST_F(VideoSendStreamTest, TestTemporalLayersVp9) {
+ test::FunctionVideoEncoderFactory encoder_factory(
+ []() { return VP9Encoder::Create(); });
+
+ TestTemporalLayers(&encoder_factory, "VP9",
+ /*num_temporal_layers=*/{2},
+ /*scalability_mode=*/{});
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/video/video_source_sink_controller.cc b/third_party/libwebrtc/video/video_source_sink_controller.cc
new file mode 100644
index 0000000000..2f7b37585d
--- /dev/null
+++ b/third_party/libwebrtc/video/video_source_sink_controller.cc
@@ -0,0 +1,193 @@
+/*
+ * Copyright 2020 The WebRTC Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video/video_source_sink_controller.h"
+
+#include <algorithm>
+#include <limits>
+#include <utility>
+
+#include "rtc_base/logging.h"
+#include "rtc_base/numerics/safe_conversions.h"
+#include "rtc_base/strings/string_builder.h"
+
+namespace webrtc {
+
+VideoSourceSinkController::VideoSourceSinkController(
+ rtc::VideoSinkInterface<VideoFrame>* sink,
+ rtc::VideoSourceInterface<VideoFrame>* source)
+ : sink_(sink), source_(source) {
+ RTC_DCHECK(sink_);
+}
+
+VideoSourceSinkController::~VideoSourceSinkController() {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+}
+
+void VideoSourceSinkController::SetSource(
+ rtc::VideoSourceInterface<VideoFrame>* source) {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+
+ rtc::VideoSourceInterface<VideoFrame>* old_source = source_;
+ source_ = source;
+
+ if (old_source != source && old_source)
+ old_source->RemoveSink(sink_);
+
+ if (!source)
+ return;
+
+ source->AddOrUpdateSink(sink_, CurrentSettingsToSinkWants());
+}
+
+bool VideoSourceSinkController::HasSource() const {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+ return source_ != nullptr;
+}
+
+void VideoSourceSinkController::RequestRefreshFrame() {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+ if (source_)
+ source_->RequestRefreshFrame();
+}
+
+void VideoSourceSinkController::PushSourceSinkSettings() {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+ if (!source_)
+ return;
+ rtc::VideoSinkWants wants = CurrentSettingsToSinkWants();
+ source_->AddOrUpdateSink(sink_, wants);
+}
+
+VideoSourceRestrictions VideoSourceSinkController::restrictions() const {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+ return restrictions_;
+}
+
+absl::optional<size_t> VideoSourceSinkController::pixels_per_frame_upper_limit()
+ const {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+ return pixels_per_frame_upper_limit_;
+}
+
+absl::optional<double> VideoSourceSinkController::frame_rate_upper_limit()
+ const {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+ return frame_rate_upper_limit_;
+}
+
+bool VideoSourceSinkController::rotation_applied() const {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+ return rotation_applied_;
+}
+
+int VideoSourceSinkController::resolution_alignment() const {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+ return resolution_alignment_;
+}
+
+const std::vector<rtc::VideoSinkWants::FrameSize>&
+VideoSourceSinkController::resolutions() const {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+ return resolutions_;
+}
+
+bool VideoSourceSinkController::active() const {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+ return active_;
+}
+
+absl::optional<rtc::VideoSinkWants::FrameSize>
+VideoSourceSinkController::requested_resolution() const {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+ return requested_resolution_;
+}
+
+void VideoSourceSinkController::SetRestrictions(
+ VideoSourceRestrictions restrictions) {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+ restrictions_ = std::move(restrictions);
+}
+
+void VideoSourceSinkController::SetPixelsPerFrameUpperLimit(
+ absl::optional<size_t> pixels_per_frame_upper_limit) {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+ pixels_per_frame_upper_limit_ = std::move(pixels_per_frame_upper_limit);
+}
+
+void VideoSourceSinkController::SetFrameRateUpperLimit(
+ absl::optional<double> frame_rate_upper_limit) {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+ frame_rate_upper_limit_ = std::move(frame_rate_upper_limit);
+}
+
+void VideoSourceSinkController::SetRotationApplied(bool rotation_applied) {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+ rotation_applied_ = rotation_applied;
+}
+
+void VideoSourceSinkController::SetResolutionAlignment(
+ int resolution_alignment) {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+ resolution_alignment_ = resolution_alignment;
+}
+
+void VideoSourceSinkController::SetResolutions(
+ std::vector<rtc::VideoSinkWants::FrameSize> resolutions) {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+ resolutions_ = std::move(resolutions);
+}
+
+void VideoSourceSinkController::SetActive(bool active) {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+ active_ = active;
+}
+
+void VideoSourceSinkController::SetRequestedResolution(
+ absl::optional<rtc::VideoSinkWants::FrameSize> requested_resolution) {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+ requested_resolution_ = std::move(requested_resolution);
+}
+
+// RTC_EXCLUSIVE_LOCKS_REQUIRED(sequence_checker_)
+rtc::VideoSinkWants VideoSourceSinkController::CurrentSettingsToSinkWants()
+ const {
+ rtc::VideoSinkWants wants;
+ wants.rotation_applied = rotation_applied_;
+ // `wants.black_frames` is not used, it always has its default value false.
+ wants.max_pixel_count =
+ rtc::dchecked_cast<int>(restrictions_.max_pixels_per_frame().value_or(
+ std::numeric_limits<int>::max()));
+ wants.target_pixel_count =
+ restrictions_.target_pixels_per_frame().has_value()
+ ? absl::optional<int>(rtc::dchecked_cast<int>(
+ restrictions_.target_pixels_per_frame().value()))
+ : absl::nullopt;
+ wants.max_framerate_fps =
+ restrictions_.max_frame_rate().has_value()
+ ? static_cast<int>(restrictions_.max_frame_rate().value())
+ : std::numeric_limits<int>::max();
+ wants.resolution_alignment = resolution_alignment_;
+ wants.max_pixel_count =
+ std::min(wants.max_pixel_count,
+ rtc::dchecked_cast<int>(pixels_per_frame_upper_limit_.value_or(
+ std::numeric_limits<int>::max())));
+ wants.max_framerate_fps =
+ std::min(wants.max_framerate_fps,
+ frame_rate_upper_limit_.has_value()
+ ? static_cast<int>(frame_rate_upper_limit_.value())
+ : std::numeric_limits<int>::max());
+ wants.resolutions = resolutions_;
+ wants.is_active = active_;
+ wants.requested_resolution = requested_resolution_;
+ return wants;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/video/video_source_sink_controller.h b/third_party/libwebrtc/video/video_source_sink_controller.h
new file mode 100644
index 0000000000..1bb6ef61bf
--- /dev/null
+++ b/third_party/libwebrtc/video/video_source_sink_controller.h
@@ -0,0 +1,102 @@
+/*
+ * Copyright 2020 The WebRTC Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VIDEO_VIDEO_SOURCE_SINK_CONTROLLER_H_
+#define VIDEO_VIDEO_SOURCE_SINK_CONTROLLER_H_
+
+#include <string>
+#include <vector>
+
+#include "absl/types/optional.h"
+#include "api/sequence_checker.h"
+#include "api/video/video_frame.h"
+#include "api/video/video_sink_interface.h"
+#include "api/video/video_source_interface.h"
+#include "call/adaptation/video_source_restrictions.h"
+#include "rtc_base/system/no_unique_address.h"
+
+namespace webrtc {
+
+// Responsible for configuring source/sink settings, i.e. performing
+// rtc::VideoSourceInterface<VideoFrame>::AddOrUpdateSink(). It does this by
+// storing settings internally which are converted to rtc::VideoSinkWants when
+// PushSourceSinkSettings() is performed.
+class VideoSourceSinkController {
+ public:
+ VideoSourceSinkController(rtc::VideoSinkInterface<VideoFrame>* sink,
+ rtc::VideoSourceInterface<VideoFrame>* source);
+
+ ~VideoSourceSinkController();
+
+ void SetSource(rtc::VideoSourceInterface<VideoFrame>* source);
+ bool HasSource() const;
+
+ // Requests a refresh frame from the current source, if set.
+ void RequestRefreshFrame();
+
+ // Must be called in order for changes to settings to have an effect. This
+ // allows you to modify multiple properties in a single push to the sink.
+ void PushSourceSinkSettings();
+
+ VideoSourceRestrictions restrictions() const;
+ absl::optional<size_t> pixels_per_frame_upper_limit() const;
+ absl::optional<double> frame_rate_upper_limit() const;
+ bool rotation_applied() const;
+ int resolution_alignment() const;
+ const std::vector<rtc::VideoSinkWants::FrameSize>& resolutions() const;
+ bool active() const;
+ absl::optional<rtc::VideoSinkWants::FrameSize> requested_resolution() const;
+
+ // Updates the settings stored internally. In order for these settings to be
+ // applied to the sink, PushSourceSinkSettings() must subsequently be called.
+ void SetRestrictions(VideoSourceRestrictions restrictions);
+ void SetPixelsPerFrameUpperLimit(
+ absl::optional<size_t> pixels_per_frame_upper_limit);
+ void SetFrameRateUpperLimit(absl::optional<double> frame_rate_upper_limit);
+ void SetRotationApplied(bool rotation_applied);
+ void SetResolutionAlignment(int resolution_alignment);
+ void SetResolutions(std::vector<rtc::VideoSinkWants::FrameSize> resolutions);
+ void SetActive(bool active);
+ void SetRequestedResolution(
+ absl::optional<rtc::VideoSinkWants::FrameSize> requested_resolution);
+
+ private:
+ rtc::VideoSinkWants CurrentSettingsToSinkWants() const
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(sequence_checker_);
+
+ // Used to ensure that this class is called on threads/sequences that it and
+ // downstream implementations were designed for.
+ // In practice, this represent's libjingle's worker thread.
+ RTC_NO_UNIQUE_ADDRESS SequenceChecker sequence_checker_;
+
+ rtc::VideoSinkInterface<VideoFrame>* const sink_;
+ rtc::VideoSourceInterface<VideoFrame>* source_
+ RTC_GUARDED_BY(&sequence_checker_);
+ // Pixel and frame rate restrictions.
+ VideoSourceRestrictions restrictions_ RTC_GUARDED_BY(&sequence_checker_);
+ // Ensures that even if we are not restricted, the sink is never configured
+ // above this limit. Example: We are not CPU limited (no `restrictions_`) but
+ // our encoder is capped at 30 fps (= `frame_rate_upper_limit_`).
+ absl::optional<size_t> pixels_per_frame_upper_limit_
+ RTC_GUARDED_BY(&sequence_checker_);
+ absl::optional<double> frame_rate_upper_limit_
+ RTC_GUARDED_BY(&sequence_checker_);
+ bool rotation_applied_ RTC_GUARDED_BY(&sequence_checker_) = false;
+ int resolution_alignment_ RTC_GUARDED_BY(&sequence_checker_) = 1;
+ std::vector<rtc::VideoSinkWants::FrameSize> resolutions_
+ RTC_GUARDED_BY(&sequence_checker_);
+ bool active_ RTC_GUARDED_BY(&sequence_checker_) = true;
+ absl::optional<rtc::VideoSinkWants::FrameSize> requested_resolution_
+ RTC_GUARDED_BY(&sequence_checker_);
+};
+
+} // namespace webrtc
+
+#endif // VIDEO_VIDEO_SOURCE_SINK_CONTROLLER_H_
diff --git a/third_party/libwebrtc/video/video_source_sink_controller_unittest.cc b/third_party/libwebrtc/video/video_source_sink_controller_unittest.cc
new file mode 100644
index 0000000000..75cc52bdaf
--- /dev/null
+++ b/third_party/libwebrtc/video/video_source_sink_controller_unittest.cc
@@ -0,0 +1,199 @@
+/*
+ * Copyright 2020 The WebRTC Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video/video_source_sink_controller.h"
+
+#include <limits>
+
+#include "api/video/video_frame.h"
+#include "api/video/video_source_interface.h"
+#include "call/adaptation/video_source_restrictions.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+using testing::_;
+
+namespace webrtc {
+
+namespace {
+
+using FrameSize = rtc::VideoSinkWants::FrameSize;
+constexpr int kIntUnconstrained = std::numeric_limits<int>::max();
+
+class MockVideoSinkWithVideoFrame : public rtc::VideoSinkInterface<VideoFrame> {
+ public:
+ ~MockVideoSinkWithVideoFrame() override {}
+
+ MOCK_METHOD(void, OnFrame, (const VideoFrame& frame), (override));
+ MOCK_METHOD(void, OnDiscardedFrame, (), (override));
+};
+
+class MockVideoSourceWithVideoFrame
+ : public rtc::VideoSourceInterface<VideoFrame> {
+ public:
+ ~MockVideoSourceWithVideoFrame() override {}
+
+ MOCK_METHOD(void,
+ AddOrUpdateSink,
+ (rtc::VideoSinkInterface<VideoFrame>*,
+ const rtc::VideoSinkWants&),
+ (override));
+ MOCK_METHOD(void,
+ RemoveSink,
+ (rtc::VideoSinkInterface<VideoFrame>*),
+ (override));
+ MOCK_METHOD(void, RequestRefreshFrame, (), (override));
+};
+
+} // namespace
+
+TEST(VideoSourceSinkControllerTest, UnconstrainedByDefault) {
+ MockVideoSinkWithVideoFrame sink;
+ MockVideoSourceWithVideoFrame source;
+ VideoSourceSinkController controller(&sink, &source);
+ EXPECT_EQ(controller.restrictions(), VideoSourceRestrictions());
+ EXPECT_FALSE(controller.pixels_per_frame_upper_limit().has_value());
+ EXPECT_FALSE(controller.frame_rate_upper_limit().has_value());
+ EXPECT_FALSE(controller.rotation_applied());
+ EXPECT_FALSE(controller.requested_resolution().has_value());
+ EXPECT_EQ(controller.resolution_alignment(), 1);
+
+ EXPECT_CALL(source, AddOrUpdateSink(_, _))
+ .WillOnce([](rtc::VideoSinkInterface<VideoFrame>* sink,
+ const rtc::VideoSinkWants& wants) {
+ EXPECT_FALSE(wants.rotation_applied);
+ EXPECT_EQ(wants.max_pixel_count, kIntUnconstrained);
+ EXPECT_EQ(wants.target_pixel_count, absl::nullopt);
+ EXPECT_EQ(wants.max_framerate_fps, kIntUnconstrained);
+ EXPECT_EQ(wants.resolution_alignment, 1);
+ EXPECT_FALSE(wants.requested_resolution.has_value());
+ });
+ controller.PushSourceSinkSettings();
+}
+
+TEST(VideoSourceSinkControllerTest, VideoRestrictionsToSinkWants) {
+ MockVideoSinkWithVideoFrame sink;
+ MockVideoSourceWithVideoFrame source;
+ VideoSourceSinkController controller(&sink, &source);
+
+ VideoSourceRestrictions restrictions = controller.restrictions();
+ // max_pixels_per_frame() maps to `max_pixel_count`.
+ restrictions.set_max_pixels_per_frame(42u);
+ // target_pixels_per_frame() maps to `target_pixel_count`.
+ restrictions.set_target_pixels_per_frame(200u);
+ // max_frame_rate() maps to `max_framerate_fps`.
+ restrictions.set_max_frame_rate(30.0);
+ controller.SetRestrictions(restrictions);
+ EXPECT_CALL(source, AddOrUpdateSink(_, _))
+ .WillOnce([](rtc::VideoSinkInterface<VideoFrame>* sink,
+ const rtc::VideoSinkWants& wants) {
+ EXPECT_EQ(wants.max_pixel_count, 42);
+ EXPECT_EQ(wants.target_pixel_count, 200);
+ EXPECT_EQ(wants.max_framerate_fps, 30);
+ });
+ controller.PushSourceSinkSettings();
+
+ // pixels_per_frame_upper_limit() caps `max_pixel_count`.
+ controller.SetPixelsPerFrameUpperLimit(24);
+ // frame_rate_upper_limit() caps `max_framerate_fps`.
+ controller.SetFrameRateUpperLimit(10.0);
+
+ EXPECT_CALL(source, AddOrUpdateSink(_, _))
+ .WillOnce([](rtc::VideoSinkInterface<VideoFrame>* sink,
+ const rtc::VideoSinkWants& wants) {
+ EXPECT_EQ(wants.max_pixel_count, 24);
+ EXPECT_EQ(wants.max_framerate_fps, 10);
+ });
+ controller.PushSourceSinkSettings();
+}
+
+TEST(VideoSourceSinkControllerTest, RotationApplied) {
+ MockVideoSinkWithVideoFrame sink;
+ MockVideoSourceWithVideoFrame source;
+ VideoSourceSinkController controller(&sink, &source);
+ controller.SetRotationApplied(true);
+ EXPECT_TRUE(controller.rotation_applied());
+
+ EXPECT_CALL(source, AddOrUpdateSink(_, _))
+ .WillOnce([](rtc::VideoSinkInterface<VideoFrame>* sink,
+ const rtc::VideoSinkWants& wants) {
+ EXPECT_TRUE(wants.rotation_applied);
+ });
+ controller.PushSourceSinkSettings();
+}
+
+TEST(VideoSourceSinkControllerTest, ResolutionAlignment) {
+ MockVideoSinkWithVideoFrame sink;
+ MockVideoSourceWithVideoFrame source;
+ VideoSourceSinkController controller(&sink, &source);
+ controller.SetResolutionAlignment(13);
+ EXPECT_EQ(controller.resolution_alignment(), 13);
+
+ EXPECT_CALL(source, AddOrUpdateSink(_, _))
+ .WillOnce([](rtc::VideoSinkInterface<VideoFrame>* sink,
+ const rtc::VideoSinkWants& wants) {
+ EXPECT_EQ(wants.resolution_alignment, 13);
+ });
+ controller.PushSourceSinkSettings();
+}
+
+TEST(VideoSourceSinkControllerTest,
+ PushSourceSinkSettingsWithoutSourceDoesNotCrash) {
+ MockVideoSinkWithVideoFrame sink;
+ VideoSourceSinkController controller(&sink, nullptr);
+ controller.PushSourceSinkSettings();
+}
+
+TEST(VideoSourceSinkControllerTest, RequestsRefreshFrameWithSource) {
+ MockVideoSinkWithVideoFrame sink;
+ MockVideoSourceWithVideoFrame source;
+ VideoSourceSinkController controller(&sink, &source);
+ EXPECT_CALL(source, RequestRefreshFrame);
+ controller.RequestRefreshFrame();
+}
+
+TEST(VideoSourceSinkControllerTest,
+ RequestsRefreshFrameWithoutSourceDoesNotCrash) {
+ MockVideoSinkWithVideoFrame sink;
+ VideoSourceSinkController controller(&sink, nullptr);
+ controller.RequestRefreshFrame();
+}
+
+TEST(VideoSourceSinkControllerTest, RequestedResolutionPropagatesToWants) {
+ MockVideoSinkWithVideoFrame sink;
+ MockVideoSourceWithVideoFrame source;
+ VideoSourceSinkController controller(&sink, &source);
+ controller.SetRequestedResolution(FrameSize(640, 360));
+ EXPECT_TRUE(controller.requested_resolution().has_value());
+
+ EXPECT_CALL(source, AddOrUpdateSink(_, _))
+ .WillOnce([](rtc::VideoSinkInterface<VideoFrame>* sink,
+ const rtc::VideoSinkWants& wants) {
+ EXPECT_EQ(*wants.requested_resolution, FrameSize(640, 360));
+ });
+ controller.PushSourceSinkSettings();
+}
+
+TEST(VideoSourceSinkControllerTest, ActivePropagatesToWants) {
+ MockVideoSinkWithVideoFrame sink;
+ MockVideoSourceWithVideoFrame source;
+ VideoSourceSinkController controller(&sink, &source);
+ controller.SetActive(true);
+ EXPECT_TRUE(controller.active());
+
+ EXPECT_CALL(source, AddOrUpdateSink(_, _))
+ .WillOnce([](rtc::VideoSinkInterface<VideoFrame>* sink,
+ const rtc::VideoSinkWants& wants) {
+ EXPECT_TRUE(wants.is_active);
+ });
+ controller.PushSourceSinkSettings();
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/video/video_stream_buffer_controller.cc b/third_party/libwebrtc/video/video_stream_buffer_controller.cc
new file mode 100644
index 0000000000..37724a8338
--- /dev/null
+++ b/third_party/libwebrtc/video/video_stream_buffer_controller.cc
@@ -0,0 +1,422 @@
+/*
+ * Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video/video_stream_buffer_controller.h"
+
+#include <algorithm>
+#include <memory>
+#include <utility>
+
+#include "absl/base/attributes.h"
+#include "absl/functional/bind_front.h"
+#include "api/sequence_checker.h"
+#include "api/task_queue/task_queue_base.h"
+#include "api/units/data_size.h"
+#include "api/video/encoded_frame.h"
+#include "api/video/frame_buffer.h"
+#include "api/video/video_content_type.h"
+#include "modules/video_coding/frame_helpers.h"
+#include "modules/video_coding/timing/inter_frame_delay.h"
+#include "modules/video_coding/timing/jitter_estimator.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/thread_annotations.h"
+#include "rtc_base/trace_event.h"
+#include "video/frame_decode_scheduler.h"
+#include "video/frame_decode_timing.h"
+#include "video/task_queue_frame_decode_scheduler.h"
+#include "video/video_receive_stream_timeout_tracker.h"
+
+namespace webrtc {
+
+namespace {
+
+// Max number of frames the buffer will hold.
+static constexpr size_t kMaxFramesBuffered = 800;
+// Max number of decoded frame info that will be saved.
+static constexpr int kMaxFramesHistory = 1 << 13;
+
+// Default value for the maximum decode queue size that is used when the
+// low-latency renderer is used.
+static constexpr size_t kZeroPlayoutDelayDefaultMaxDecodeQueueSize = 8;
+
+struct FrameMetadata {
+ explicit FrameMetadata(const EncodedFrame& frame)
+ : is_last_spatial_layer(frame.is_last_spatial_layer),
+ is_keyframe(frame.is_keyframe()),
+ size(frame.size()),
+ contentType(frame.contentType()),
+ delayed_by_retransmission(frame.delayed_by_retransmission()),
+ rtp_timestamp(frame.Timestamp()),
+ receive_time(frame.ReceivedTimestamp()) {}
+
+ const bool is_last_spatial_layer;
+ const bool is_keyframe;
+ const size_t size;
+ const VideoContentType contentType;
+ const bool delayed_by_retransmission;
+ const uint32_t rtp_timestamp;
+ const absl::optional<Timestamp> receive_time;
+};
+
+Timestamp ReceiveTime(const EncodedFrame& frame) {
+ absl::optional<Timestamp> ts = frame.ReceivedTimestamp();
+ RTC_DCHECK(ts.has_value()) << "Received frame must have a timestamp set!";
+ return *ts;
+}
+
+} // namespace
+
+VideoStreamBufferController::VideoStreamBufferController(
+ Clock* clock,
+ TaskQueueBase* worker_queue,
+ VCMTiming* timing,
+ VCMReceiveStatisticsCallback* stats_proxy,
+ FrameSchedulingReceiver* receiver,
+ TimeDelta max_wait_for_keyframe,
+ TimeDelta max_wait_for_frame,
+ std::unique_ptr<FrameDecodeScheduler> frame_decode_scheduler,
+ const FieldTrialsView& field_trials)
+ : field_trials_(field_trials),
+ clock_(clock),
+ stats_proxy_(stats_proxy),
+ receiver_(receiver),
+ timing_(timing),
+ frame_decode_scheduler_(std::move(frame_decode_scheduler)),
+ jitter_estimator_(clock_, field_trials),
+ buffer_(std::make_unique<FrameBuffer>(kMaxFramesBuffered,
+ kMaxFramesHistory,
+ field_trials)),
+ decode_timing_(clock_, timing_),
+ timeout_tracker_(
+ clock_,
+ worker_queue,
+ VideoReceiveStreamTimeoutTracker::Timeouts{
+ .max_wait_for_keyframe = max_wait_for_keyframe,
+ .max_wait_for_frame = max_wait_for_frame},
+ absl::bind_front(&VideoStreamBufferController::OnTimeout, this)),
+ zero_playout_delay_max_decode_queue_size_(
+ "max_decode_queue_size",
+ kZeroPlayoutDelayDefaultMaxDecodeQueueSize) {
+ RTC_DCHECK(stats_proxy_);
+ RTC_DCHECK(receiver_);
+ RTC_DCHECK(timing_);
+ RTC_DCHECK(clock_);
+ RTC_DCHECK(frame_decode_scheduler_);
+
+ ParseFieldTrial({&zero_playout_delay_max_decode_queue_size_},
+ field_trials.Lookup("WebRTC-ZeroPlayoutDelay"));
+}
+
+void VideoStreamBufferController::Stop() {
+ RTC_DCHECK_RUN_ON(&worker_sequence_checker_);
+ frame_decode_scheduler_->Stop();
+ timeout_tracker_.Stop();
+ decoder_ready_for_new_frame_ = false;
+}
+
+void VideoStreamBufferController::SetProtectionMode(
+ VCMVideoProtection protection_mode) {
+ RTC_DCHECK_RUN_ON(&worker_sequence_checker_);
+ protection_mode_ = protection_mode;
+}
+
+void VideoStreamBufferController::Clear() {
+ RTC_DCHECK_RUN_ON(&worker_sequence_checker_);
+ stats_proxy_->OnDroppedFrames(buffer_->CurrentSize());
+ buffer_ = std::make_unique<FrameBuffer>(kMaxFramesBuffered, kMaxFramesHistory,
+ field_trials_);
+ frame_decode_scheduler_->CancelOutstanding();
+}
+
+absl::optional<int64_t> VideoStreamBufferController::InsertFrame(
+ std::unique_ptr<EncodedFrame> frame) {
+ RTC_DCHECK_RUN_ON(&worker_sequence_checker_);
+ FrameMetadata metadata(*frame);
+ const uint32_t ssrc =
+ frame->PacketInfos().empty() ? 0 : frame->PacketInfos()[0].ssrc();
+ const int64_t frameId = frame->Id();
+ int complete_units = buffer_->GetTotalNumberOfContinuousTemporalUnits();
+ if (buffer_->InsertFrame(std::move(frame))) {
+ RTC_DCHECK(metadata.receive_time) << "Frame receive time must be set!";
+ if (!metadata.delayed_by_retransmission && metadata.receive_time &&
+ (field_trials_.IsDisabled("WebRTC-IncomingTimestampOnMarkerBitOnly") ||
+ metadata.is_last_spatial_layer)) {
+ timing_->IncomingTimestamp(metadata.rtp_timestamp,
+ *metadata.receive_time);
+ }
+ if (complete_units < buffer_->GetTotalNumberOfContinuousTemporalUnits()) {
+ TRACE_EVENT2("webrtc",
+ "VideoStreamBufferController::InsertFrame Frame Complete",
+ "remote_ssrc", ssrc, "frame_id", frameId);
+ stats_proxy_->OnCompleteFrame(metadata.is_keyframe, metadata.size,
+ metadata.contentType);
+ MaybeScheduleFrameForRelease();
+ }
+ }
+
+ return buffer_->LastContinuousFrameId();
+}
+
+void VideoStreamBufferController::UpdateRtt(int64_t max_rtt_ms) {
+ RTC_DCHECK_RUN_ON(&worker_sequence_checker_);
+ jitter_estimator_.UpdateRtt(TimeDelta::Millis(max_rtt_ms));
+}
+
+void VideoStreamBufferController::SetMaxWaits(TimeDelta max_wait_for_keyframe,
+ TimeDelta max_wait_for_frame) {
+ RTC_DCHECK_RUN_ON(&worker_sequence_checker_);
+ timeout_tracker_.SetTimeouts({.max_wait_for_keyframe = max_wait_for_keyframe,
+ .max_wait_for_frame = max_wait_for_frame});
+}
+
+void VideoStreamBufferController::StartNextDecode(bool keyframe_required) {
+ RTC_DCHECK_RUN_ON(&worker_sequence_checker_);
+ if (!timeout_tracker_.Running())
+ timeout_tracker_.Start(keyframe_required);
+ keyframe_required_ = keyframe_required;
+ if (keyframe_required_) {
+ timeout_tracker_.SetWaitingForKeyframe();
+ }
+ decoder_ready_for_new_frame_ = true;
+ MaybeScheduleFrameForRelease();
+}
+
+int VideoStreamBufferController::Size() {
+ RTC_DCHECK_RUN_ON(&worker_sequence_checker_);
+ return buffer_->CurrentSize();
+}
+
+void VideoStreamBufferController::OnFrameReady(
+ absl::InlinedVector<std::unique_ptr<EncodedFrame>, 4> frames,
+ Timestamp render_time) {
+ RTC_DCHECK_RUN_ON(&worker_sequence_checker_);
+ RTC_CHECK(!frames.empty())
+ << "Callers must ensure there is at least one frame to decode.";
+
+ timeout_tracker_.OnEncodedFrameReleased();
+
+ Timestamp now = clock_->CurrentTime();
+ bool superframe_delayed_by_retransmission = false;
+ DataSize superframe_size = DataSize::Zero();
+ const EncodedFrame& first_frame = *frames.front();
+ Timestamp receive_time = ReceiveTime(first_frame);
+
+ if (first_frame.is_keyframe())
+ keyframe_required_ = false;
+
+ // Gracefully handle bad RTP timestamps and render time issues.
+ if (FrameHasBadRenderTiming(render_time, now) ||
+ TargetVideoDelayIsTooLarge(timing_->TargetVideoDelay())) {
+ RTC_LOG(LS_WARNING) << "Resetting jitter estimator and timing module due "
+ "to bad render timing for rtp_timestamp="
+ << first_frame.Timestamp();
+ jitter_estimator_.Reset();
+ timing_->Reset();
+ render_time = timing_->RenderTime(first_frame.Timestamp(), now);
+ }
+
+ for (std::unique_ptr<EncodedFrame>& frame : frames) {
+ frame->SetRenderTime(render_time.ms());
+
+ superframe_delayed_by_retransmission |= frame->delayed_by_retransmission();
+ receive_time = std::max(receive_time, ReceiveTime(*frame));
+ superframe_size += DataSize::Bytes(frame->size());
+ }
+
+ if (!superframe_delayed_by_retransmission) {
+ auto frame_delay = inter_frame_delay_.CalculateDelay(
+ first_frame.Timestamp(), receive_time);
+ if (frame_delay) {
+ jitter_estimator_.UpdateEstimate(*frame_delay, superframe_size);
+ }
+
+ float rtt_mult = protection_mode_ == kProtectionNackFEC ? 0.0 : 1.0;
+ absl::optional<TimeDelta> rtt_mult_add_cap_ms = absl::nullopt;
+ if (rtt_mult_settings_.has_value()) {
+ rtt_mult = rtt_mult_settings_->rtt_mult_setting;
+ rtt_mult_add_cap_ms =
+ TimeDelta::Millis(rtt_mult_settings_->rtt_mult_add_cap_ms);
+ }
+ timing_->SetJitterDelay(
+ jitter_estimator_.GetJitterEstimate(rtt_mult, rtt_mult_add_cap_ms));
+ timing_->UpdateCurrentDelay(render_time, now);
+ } else if (RttMultExperiment::RttMultEnabled()) {
+ jitter_estimator_.FrameNacked();
+ }
+
+ // Update stats.
+ UpdateDroppedFrames();
+ UpdateDiscardedPackets();
+ UpdateJitterDelay();
+ UpdateTimingFrameInfo();
+
+ std::unique_ptr<EncodedFrame> frame =
+ CombineAndDeleteFrames(std::move(frames));
+
+ timing_->SetLastDecodeScheduledTimestamp(now);
+
+ decoder_ready_for_new_frame_ = false;
+ receiver_->OnEncodedFrame(std::move(frame));
+}
+
+void VideoStreamBufferController::OnTimeout(TimeDelta delay) {
+ RTC_DCHECK_RUN_ON(&worker_sequence_checker_);
+
+ // Stop sending timeouts until receiver starts waiting for a new frame.
+ timeout_tracker_.Stop();
+
+ // If the stream is paused then ignore the timeout.
+ if (!decoder_ready_for_new_frame_) {
+ return;
+ }
+ decoder_ready_for_new_frame_ = false;
+ receiver_->OnDecodableFrameTimeout(delay);
+}
+
+void VideoStreamBufferController::FrameReadyForDecode(uint32_t rtp_timestamp,
+ Timestamp render_time) {
+ RTC_DCHECK_RUN_ON(&worker_sequence_checker_);
+ // Check that the frame to decode is still valid before passing the frame for
+ // decoding.
+ auto decodable_tu_info = buffer_->DecodableTemporalUnitsInfo();
+ if (!decodable_tu_info) {
+ RTC_LOG(LS_ERROR)
+ << "The frame buffer became undecodable during the wait "
+ "to decode frame with rtp-timestamp "
+ << rtp_timestamp
+ << ". Cancelling the decode of this frame, decoding "
+ "will resume when the frame buffers become decodable again.";
+ return;
+ }
+ RTC_DCHECK_EQ(rtp_timestamp, decodable_tu_info->next_rtp_timestamp)
+ << "Frame buffer's next decodable frame was not the one sent for "
+ "extraction.";
+ auto frames = buffer_->ExtractNextDecodableTemporalUnit();
+ if (frames.empty()) {
+ RTC_LOG(LS_ERROR)
+ << "The frame buffer should never return an empty temporal until list "
+ "when there is a decodable temporal unit.";
+ RTC_DCHECK_NOTREACHED();
+ return;
+ }
+ OnFrameReady(std::move(frames), render_time);
+}
+
+void VideoStreamBufferController::UpdateDroppedFrames()
+ RTC_RUN_ON(&worker_sequence_checker_) {
+ const int dropped_frames = buffer_->GetTotalNumberOfDroppedFrames() -
+ frames_dropped_before_last_new_frame_;
+ if (dropped_frames > 0)
+ stats_proxy_->OnDroppedFrames(dropped_frames);
+ frames_dropped_before_last_new_frame_ =
+ buffer_->GetTotalNumberOfDroppedFrames();
+}
+
+void VideoStreamBufferController::UpdateDiscardedPackets()
+ RTC_RUN_ON(&worker_sequence_checker_) {
+ const int discarded_packets = buffer_->GetTotalNumberOfDiscardedPackets() -
+ packets_discarded_before_last_new_frame_;
+ if (discarded_packets > 0) {
+ stats_proxy_->OnDiscardedPackets(discarded_packets);
+ }
+ packets_discarded_before_last_new_frame_ =
+ buffer_->GetTotalNumberOfDiscardedPackets();
+}
+
+void VideoStreamBufferController::UpdateJitterDelay() {
+ auto timings = timing_->GetTimings();
+ if (timings.num_decoded_frames) {
+ stats_proxy_->OnFrameBufferTimingsUpdated(
+ timings.max_decode_duration.ms(), timings.current_delay.ms(),
+ timings.target_delay.ms(), timings.jitter_buffer_delay.ms(),
+ timings.min_playout_delay.ms(), timings.render_delay.ms());
+ }
+}
+
+void VideoStreamBufferController::UpdateTimingFrameInfo() {
+ absl::optional<TimingFrameInfo> info = timing_->GetTimingFrameInfo();
+ if (info)
+ stats_proxy_->OnTimingFrameInfoUpdated(*info);
+}
+
+bool VideoStreamBufferController::IsTooManyFramesQueued() const
+ RTC_RUN_ON(&worker_sequence_checker_) {
+ return buffer_->CurrentSize() > zero_playout_delay_max_decode_queue_size_;
+}
+
+void VideoStreamBufferController::ForceKeyFrameReleaseImmediately()
+ RTC_RUN_ON(&worker_sequence_checker_) {
+ RTC_DCHECK(keyframe_required_);
+ // Iterate through the frame buffer until there is a complete keyframe and
+ // release this right away.
+ while (buffer_->DecodableTemporalUnitsInfo()) {
+ auto next_frame = buffer_->ExtractNextDecodableTemporalUnit();
+ if (next_frame.empty()) {
+ RTC_DCHECK_NOTREACHED()
+ << "Frame buffer should always return at least 1 frame.";
+ continue;
+ }
+ // Found keyframe - decode right away.
+ if (next_frame.front()->is_keyframe()) {
+ auto render_time = timing_->RenderTime(next_frame.front()->Timestamp(),
+ clock_->CurrentTime());
+ OnFrameReady(std::move(next_frame), render_time);
+ return;
+ }
+ }
+}
+
+void VideoStreamBufferController::MaybeScheduleFrameForRelease()
+ RTC_RUN_ON(&worker_sequence_checker_) {
+ auto decodable_tu_info = buffer_->DecodableTemporalUnitsInfo();
+ if (!decoder_ready_for_new_frame_ || !decodable_tu_info) {
+ return;
+ }
+
+ if (keyframe_required_) {
+ return ForceKeyFrameReleaseImmediately();
+ }
+
+ // If already scheduled then abort.
+ if (frame_decode_scheduler_->ScheduledRtpTimestamp() ==
+ decodable_tu_info->next_rtp_timestamp) {
+ return;
+ }
+
+ TimeDelta max_wait = timeout_tracker_.TimeUntilTimeout();
+ // Ensures the frame is scheduled for decode before the stream times out.
+ // This is otherwise a race condition.
+ max_wait = std::max(max_wait - TimeDelta::Millis(1), TimeDelta::Zero());
+ absl::optional<FrameDecodeTiming::FrameSchedule> schedule;
+ while (decodable_tu_info) {
+ schedule = decode_timing_.OnFrameBufferUpdated(
+ decodable_tu_info->next_rtp_timestamp,
+ decodable_tu_info->last_rtp_timestamp, max_wait,
+ IsTooManyFramesQueued());
+ if (schedule) {
+ // Don't schedule if already waiting for the same frame.
+ if (frame_decode_scheduler_->ScheduledRtpTimestamp() !=
+ decodable_tu_info->next_rtp_timestamp) {
+ frame_decode_scheduler_->CancelOutstanding();
+ frame_decode_scheduler_->ScheduleFrame(
+ decodable_tu_info->next_rtp_timestamp, *schedule,
+ absl::bind_front(&VideoStreamBufferController::FrameReadyForDecode,
+ this));
+ }
+ return;
+ }
+ // If no schedule for current rtp, drop and try again.
+ buffer_->DropNextDecodableTemporalUnit();
+ decodable_tu_info = buffer_->DecodableTemporalUnitsInfo();
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/video/video_stream_buffer_controller.h b/third_party/libwebrtc/video/video_stream_buffer_controller.h
new file mode 100644
index 0000000000..7638c91471
--- /dev/null
+++ b/third_party/libwebrtc/video/video_stream_buffer_controller.h
@@ -0,0 +1,122 @@
+/*
+ * Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VIDEO_VIDEO_STREAM_BUFFER_CONTROLLER_H_
+#define VIDEO_VIDEO_STREAM_BUFFER_CONTROLLER_H_
+
+#include <memory>
+
+#include "api/field_trials_view.h"
+#include "api/task_queue/task_queue_base.h"
+#include "api/video/encoded_frame.h"
+#include "api/video/frame_buffer.h"
+#include "modules/video_coding/include/video_coding_defines.h"
+#include "modules/video_coding/timing/inter_frame_delay.h"
+#include "modules/video_coding/timing/jitter_estimator.h"
+#include "modules/video_coding/timing/timing.h"
+#include "rtc_base/experiments/rtt_mult_experiment.h"
+#include "system_wrappers/include/clock.h"
+#include "video/decode_synchronizer.h"
+#include "video/video_receive_stream_timeout_tracker.h"
+
+namespace webrtc {
+
+class FrameSchedulingReceiver {
+ public:
+ virtual ~FrameSchedulingReceiver() = default;
+
+ virtual void OnEncodedFrame(std::unique_ptr<EncodedFrame> frame) = 0;
+ virtual void OnDecodableFrameTimeout(TimeDelta wait_time) = 0;
+};
+
+class VideoStreamBufferController {
+ public:
+ VideoStreamBufferController(
+ Clock* clock,
+ TaskQueueBase* worker_queue,
+ VCMTiming* timing,
+ VCMReceiveStatisticsCallback* stats_proxy,
+ FrameSchedulingReceiver* receiver,
+ TimeDelta max_wait_for_keyframe,
+ TimeDelta max_wait_for_frame,
+ std::unique_ptr<FrameDecodeScheduler> frame_decode_scheduler,
+ const FieldTrialsView& field_trials);
+ virtual ~VideoStreamBufferController() = default;
+
+ void Stop();
+ void SetProtectionMode(VCMVideoProtection protection_mode);
+ void Clear();
+ absl::optional<int64_t> InsertFrame(std::unique_ptr<EncodedFrame> frame);
+ void UpdateRtt(int64_t max_rtt_ms);
+ void SetMaxWaits(TimeDelta max_wait_for_keyframe,
+ TimeDelta max_wait_for_frame);
+ void StartNextDecode(bool keyframe_required);
+ int Size();
+
+ private:
+ void OnFrameReady(
+ absl::InlinedVector<std::unique_ptr<EncodedFrame>, 4> frames,
+ Timestamp render_time);
+ void OnTimeout(TimeDelta delay);
+ void FrameReadyForDecode(uint32_t rtp_timestamp, Timestamp render_time);
+ void UpdateDroppedFrames() RTC_RUN_ON(&worker_sequence_checker_);
+ void UpdateDiscardedPackets() RTC_RUN_ON(&worker_sequence_checker_);
+ void UpdateJitterDelay();
+ void UpdateTimingFrameInfo();
+ bool IsTooManyFramesQueued() const RTC_RUN_ON(&worker_sequence_checker_);
+ void ForceKeyFrameReleaseImmediately() RTC_RUN_ON(&worker_sequence_checker_);
+ void MaybeScheduleFrameForRelease() RTC_RUN_ON(&worker_sequence_checker_);
+
+ RTC_NO_UNIQUE_ADDRESS SequenceChecker worker_sequence_checker_;
+ const FieldTrialsView& field_trials_;
+ const absl::optional<RttMultExperiment::Settings> rtt_mult_settings_ =
+ RttMultExperiment::GetRttMultValue();
+ Clock* const clock_;
+ VCMReceiveStatisticsCallback* const stats_proxy_;
+ FrameSchedulingReceiver* const receiver_;
+ VCMTiming* const timing_;
+ const std::unique_ptr<FrameDecodeScheduler> frame_decode_scheduler_
+ RTC_GUARDED_BY(&worker_sequence_checker_);
+
+ JitterEstimator jitter_estimator_ RTC_GUARDED_BY(&worker_sequence_checker_);
+ InterFrameDelay inter_frame_delay_ RTC_GUARDED_BY(&worker_sequence_checker_);
+ bool keyframe_required_ RTC_GUARDED_BY(&worker_sequence_checker_) = false;
+ std::unique_ptr<FrameBuffer> buffer_
+ RTC_GUARDED_BY(&worker_sequence_checker_);
+ FrameDecodeTiming decode_timing_ RTC_GUARDED_BY(&worker_sequence_checker_);
+ VideoReceiveStreamTimeoutTracker timeout_tracker_
+ RTC_GUARDED_BY(&worker_sequence_checker_);
+ int frames_dropped_before_last_new_frame_
+ RTC_GUARDED_BY(&worker_sequence_checker_) = 0;
+ int packets_discarded_before_last_new_frame_
+ RTC_GUARDED_BY(&worker_sequence_checker_) = 0;
+ VCMVideoProtection protection_mode_
+ RTC_GUARDED_BY(&worker_sequence_checker_) = kProtectionNack;
+
+ // This flag guards frames from queuing in front of the decoder. Without this
+ // guard, encoded frames will not wait for the decoder to finish decoding a
+ // frame and just queue up, meaning frames will not be dropped or
+ // fast-forwarded when the decoder is slow or hangs.
+ bool decoder_ready_for_new_frame_ RTC_GUARDED_BY(&worker_sequence_checker_) =
+ false;
+
+ // Maximum number of frames in the decode queue to allow pacing. If the
+ // queue grows beyond the max limit, pacing will be disabled and frames will
+ // be pushed to the decoder as soon as possible. This only has an effect
+ // when the low-latency rendering path is active, which is indicated by
+ // the frame's render time == 0.
+ FieldTrialParameter<unsigned> zero_playout_delay_max_decode_queue_size_;
+
+ ScopedTaskSafety worker_safety_;
+};
+
+} // namespace webrtc
+
+#endif // VIDEO_VIDEO_STREAM_BUFFER_CONTROLLER_H_
diff --git a/third_party/libwebrtc/video/video_stream_buffer_controller_gn/moz.build b/third_party/libwebrtc/video/video_stream_buffer_controller_gn/moz.build
new file mode 100644
index 0000000000..a18b990a8d
--- /dev/null
+++ b/third_party/libwebrtc/video/video_stream_buffer_controller_gn/moz.build
@@ -0,0 +1,233 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+DEFINES["WEBRTC_STRICT_FIELD_TRIALS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "!/third_party/libwebrtc/gen",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/video/video_stream_buffer_controller.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "GLESv2",
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "crypt32",
+ "iphlpapi",
+ "secur32",
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "mips32":
+
+ DEFINES["MIPS32_LE"] = True
+ DEFINES["MIPS_FPU_LE"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "mips64":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("video_stream_buffer_controller_gn")
diff --git a/third_party/libwebrtc/video/video_stream_buffer_controller_unittest.cc b/third_party/libwebrtc/video/video_stream_buffer_controller_unittest.cc
new file mode 100644
index 0000000000..e7235a2ff1
--- /dev/null
+++ b/third_party/libwebrtc/video/video_stream_buffer_controller_unittest.cc
@@ -0,0 +1,922 @@
+/*
+ * Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video/video_stream_buffer_controller.h"
+
+#include <stdint.h>
+
+#include <limits>
+#include <memory>
+#include <string>
+#include <tuple>
+#include <utility>
+#include <vector>
+
+#include "absl/types/optional.h"
+#include "absl/types/variant.h"
+#include "api/metronome/test/fake_metronome.h"
+#include "api/units/frequency.h"
+#include "api/units/time_delta.h"
+#include "api/units/timestamp.h"
+#include "api/video/video_content_type.h"
+#include "api/video/video_timing.h"
+#include "rtc_base/checks.h"
+#include "test/fake_encoded_frame.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+#include "test/scoped_key_value_config.h"
+#include "test/time_controller/simulated_time_controller.h"
+#include "video/decode_synchronizer.h"
+#include "video/task_queue_frame_decode_scheduler.h"
+
+using ::testing::_;
+using ::testing::AllOf;
+using ::testing::Contains;
+using ::testing::Each;
+using ::testing::Eq;
+using ::testing::IsEmpty;
+using ::testing::Matches;
+using ::testing::Ne;
+using ::testing::Not;
+using ::testing::Optional;
+using ::testing::Pointee;
+using ::testing::SizeIs;
+using ::testing::VariantWith;
+
+namespace webrtc {
+
+namespace {
+
+constexpr size_t kFrameSize = 10;
+constexpr uint32_t kFps30Rtp = 90000 / 30;
+constexpr TimeDelta kFps30Delay = 1 / Frequency::Hertz(30);
+const VideoPlayoutDelay kZeroPlayoutDelay = {0, 0};
+constexpr Timestamp kClockStart = Timestamp::Millis(1000);
+
+auto TimedOut() {
+ return Optional(VariantWith<TimeDelta>(_));
+}
+
+auto Frame(testing::Matcher<EncodedFrame> m) {
+ return Optional(VariantWith<std::unique_ptr<EncodedFrame>>(Pointee(m)));
+}
+
+std::unique_ptr<test::FakeEncodedFrame> WithReceiveTimeFromRtpTimestamp(
+ std::unique_ptr<test::FakeEncodedFrame> frame) {
+ if (frame->Timestamp() == 0) {
+ frame->SetReceivedTime(kClockStart.ms());
+ } else {
+ frame->SetReceivedTime(
+ TimeDelta::Seconds(frame->Timestamp() / 90000.0).ms() +
+ kClockStart.ms());
+ }
+ return frame;
+}
+
+class VCMTimingTest : public VCMTiming {
+ public:
+ using VCMTiming::VCMTiming;
+ void IncomingTimestamp(uint32_t rtp_timestamp,
+ Timestamp last_packet_time) override {
+ IncomingTimestampMocked(rtp_timestamp, last_packet_time);
+ VCMTiming::IncomingTimestamp(rtp_timestamp, last_packet_time);
+ }
+
+ MOCK_METHOD(void,
+ IncomingTimestampMocked,
+ (uint32_t rtp_timestamp, Timestamp last_packet_time),
+ ());
+};
+
+class VCMReceiveStatisticsCallbackMock : public VCMReceiveStatisticsCallback {
+ public:
+ MOCK_METHOD(void,
+ OnCompleteFrame,
+ (bool is_keyframe,
+ size_t size_bytes,
+ VideoContentType content_type),
+ (override));
+ MOCK_METHOD(void, OnDroppedFrames, (uint32_t num_dropped), (override));
+ MOCK_METHOD(void,
+ OnFrameBufferTimingsUpdated,
+ (int max_decode_ms,
+ int current_delay_ms,
+ int target_delay_ms,
+ int jitter_buffer_ms,
+ int min_playout_delay_ms,
+ int render_delay_ms),
+ (override));
+ MOCK_METHOD(void,
+ OnTimingFrameInfoUpdated,
+ (const TimingFrameInfo& info),
+ (override));
+};
+
+} // namespace
+
+constexpr auto kMaxWaitForKeyframe = TimeDelta::Millis(500);
+constexpr auto kMaxWaitForFrame = TimeDelta::Millis(1500);
+class VideoStreamBufferControllerFixture
+ : public ::testing::WithParamInterface<std::tuple<bool, std::string>>,
+ public FrameSchedulingReceiver {
+ public:
+ VideoStreamBufferControllerFixture()
+ : sync_decoding_(std::get<0>(GetParam())),
+ field_trials_(std::get<1>(GetParam())),
+ time_controller_(kClockStart),
+ clock_(time_controller_.GetClock()),
+ fake_metronome_(TimeDelta::Millis(16)),
+ decode_sync_(clock_,
+ &fake_metronome_,
+ time_controller_.GetMainThread()),
+ timing_(clock_, field_trials_),
+ buffer_(std::make_unique<VideoStreamBufferController>(
+ clock_,
+ time_controller_.GetMainThread(),
+ &timing_,
+ &stats_callback_,
+ this,
+ kMaxWaitForKeyframe,
+ kMaxWaitForFrame,
+ sync_decoding_ ? decode_sync_.CreateSynchronizedFrameScheduler()
+ : std::make_unique<TaskQueueFrameDecodeScheduler>(
+ clock_,
+ time_controller_.GetMainThread()),
+ field_trials_)) {
+ // Avoid starting with negative render times.
+ timing_.set_min_playout_delay(TimeDelta::Millis(10));
+
+ ON_CALL(stats_callback_, OnDroppedFrames)
+ .WillByDefault(
+ [this](auto num_dropped) { dropped_frames_ += num_dropped; });
+ }
+
+ ~VideoStreamBufferControllerFixture() override {
+ if (buffer_) {
+ buffer_->Stop();
+ }
+ time_controller_.AdvanceTime(TimeDelta::Zero());
+ }
+
+ void OnEncodedFrame(std::unique_ptr<EncodedFrame> frame) override {
+ RTC_DCHECK(frame);
+ SetWaitResult(std::move(frame));
+ }
+
+ void OnDecodableFrameTimeout(TimeDelta wait_time) override {
+ SetWaitResult(wait_time);
+ }
+
+ using WaitResult =
+ absl::variant<std::unique_ptr<EncodedFrame>, TimeDelta /*wait_time*/>;
+
+ absl::optional<WaitResult> WaitForFrameOrTimeout(TimeDelta wait) {
+ if (wait_result_) {
+ return std::move(wait_result_);
+ }
+ time_controller_.AdvanceTime(TimeDelta::Zero());
+ if (wait_result_) {
+ return std::move(wait_result_);
+ }
+
+ Timestamp now = clock_->CurrentTime();
+ // TODO(bugs.webrtc.org/13756): Remove this when rtc::Thread uses uses
+ // Timestamp instead of an integer milliseconds. This extra wait is needed
+ // for some tests that use the metronome. This is due to rounding
+ // milliseconds, affecting the precision of simulated time controller uses
+ // when posting tasks from threads.
+ TimeDelta potential_extra_wait =
+ Timestamp::Millis((now + wait).ms()) - (now + wait);
+
+ time_controller_.AdvanceTime(wait);
+ if (potential_extra_wait > TimeDelta::Zero()) {
+ time_controller_.AdvanceTime(potential_extra_wait);
+ }
+ return std::move(wait_result_);
+ }
+
+ void StartNextDecode() {
+ ResetLastResult();
+ buffer_->StartNextDecode(false);
+ }
+
+ void StartNextDecodeForceKeyframe() {
+ ResetLastResult();
+ buffer_->StartNextDecode(true);
+ }
+
+ void ResetLastResult() { wait_result_.reset(); }
+
+ int dropped_frames() const { return dropped_frames_; }
+
+ protected:
+ const bool sync_decoding_;
+ test::ScopedKeyValueConfig field_trials_;
+ GlobalSimulatedTimeController time_controller_;
+ Clock* const clock_;
+ test::FakeMetronome fake_metronome_;
+ DecodeSynchronizer decode_sync_;
+
+ ::testing::NiceMock<VCMTimingTest> timing_;
+ ::testing::NiceMock<VCMReceiveStatisticsCallbackMock> stats_callback_;
+ std::unique_ptr<VideoStreamBufferController> buffer_;
+
+ private:
+ void SetWaitResult(WaitResult result) {
+ RTC_DCHECK(!wait_result_);
+ if (absl::holds_alternative<std::unique_ptr<EncodedFrame>>(result)) {
+ RTC_DCHECK(absl::get<std::unique_ptr<EncodedFrame>>(result));
+ }
+ wait_result_.emplace(std::move(result));
+ }
+
+ uint32_t dropped_frames_ = 0;
+ absl::optional<WaitResult> wait_result_;
+};
+
+class VideoStreamBufferControllerTest
+ : public ::testing::Test,
+ public VideoStreamBufferControllerFixture {};
+
+TEST_P(VideoStreamBufferControllerTest,
+ InitialTimeoutAfterKeyframeTimeoutPeriod) {
+ StartNextDecodeForceKeyframe();
+ // No frame inserted. Timeout expected.
+ EXPECT_THAT(WaitForFrameOrTimeout(kMaxWaitForKeyframe), TimedOut());
+
+ // No new timeout set since receiver has not started new decode.
+ ResetLastResult();
+ EXPECT_THAT(WaitForFrameOrTimeout(kMaxWaitForKeyframe), Eq(absl::nullopt));
+
+ // Now that receiver has asked for new frame, a new timeout can occur.
+ StartNextDecodeForceKeyframe();
+ EXPECT_THAT(WaitForFrameOrTimeout(kMaxWaitForKeyframe), TimedOut());
+}
+
+TEST_P(VideoStreamBufferControllerTest, KeyFramesAreScheduled) {
+ StartNextDecodeForceKeyframe();
+ time_controller_.AdvanceTime(TimeDelta::Millis(50));
+
+ auto frame = test::FakeFrameBuilder().Id(0).Time(0).AsLast().Build();
+ buffer_->InsertFrame(std::move(frame));
+
+ EXPECT_THAT(WaitForFrameOrTimeout(TimeDelta::Zero()), Frame(test::WithId(0)));
+}
+
+TEST_P(VideoStreamBufferControllerTest,
+ DeltaFrameTimeoutAfterKeyframeExtracted) {
+ StartNextDecodeForceKeyframe();
+
+ time_controller_.AdvanceTime(TimeDelta::Millis(50));
+ auto frame = test::FakeFrameBuilder().Id(0).Time(0).AsLast().Build();
+ buffer_->InsertFrame(std::move(frame));
+ EXPECT_THAT(WaitForFrameOrTimeout(kMaxWaitForKeyframe),
+ Frame(test::WithId(0)));
+
+ StartNextDecode();
+ time_controller_.AdvanceTime(TimeDelta::Millis(50));
+
+ // Timeouts should now happen at the normal frequency.
+ const int expected_timeouts = 5;
+ for (int i = 0; i < expected_timeouts; ++i) {
+ EXPECT_THAT(WaitForFrameOrTimeout(kMaxWaitForFrame), TimedOut());
+ StartNextDecode();
+ }
+}
+
+TEST_P(VideoStreamBufferControllerTest, DependantFramesAreScheduled) {
+ StartNextDecodeForceKeyframe();
+ buffer_->InsertFrame(test::FakeFrameBuilder().Id(0).Time(0).AsLast().Build());
+ EXPECT_THAT(WaitForFrameOrTimeout(TimeDelta::Zero()), Frame(test::WithId(0)));
+
+ StartNextDecode();
+
+ time_controller_.AdvanceTime(kFps30Delay);
+ buffer_->InsertFrame(test::FakeFrameBuilder()
+ .Id(1)
+ .Time(kFps30Rtp)
+ .AsLast()
+ .Refs({0})
+ .Build());
+ EXPECT_THAT(WaitForFrameOrTimeout(kFps30Delay), Frame(test::WithId(1)));
+}
+
+TEST_P(VideoStreamBufferControllerTest, SpatialLayersAreScheduled) {
+ StartNextDecodeForceKeyframe();
+ buffer_->InsertFrame(WithReceiveTimeFromRtpTimestamp(
+ test::FakeFrameBuilder().Id(0).SpatialLayer(0).Time(0).Build()));
+ buffer_->InsertFrame(WithReceiveTimeFromRtpTimestamp(
+ test::FakeFrameBuilder().Id(1).SpatialLayer(1).Time(0).Build()));
+ buffer_->InsertFrame(WithReceiveTimeFromRtpTimestamp(
+ test::FakeFrameBuilder().Id(2).SpatialLayer(2).Time(0).AsLast().Build()));
+ EXPECT_THAT(
+ WaitForFrameOrTimeout(TimeDelta::Zero()),
+ Frame(AllOf(test::WithId(0), test::FrameWithSize(3 * kFrameSize))));
+
+ buffer_->InsertFrame(WithReceiveTimeFromRtpTimestamp(
+ test::FakeFrameBuilder().Id(3).Time(kFps30Rtp).SpatialLayer(0).Build()));
+ buffer_->InsertFrame(WithReceiveTimeFromRtpTimestamp(
+ test::FakeFrameBuilder().Id(4).Time(kFps30Rtp).SpatialLayer(1).Build()));
+ buffer_->InsertFrame(WithReceiveTimeFromRtpTimestamp(test::FakeFrameBuilder()
+ .Id(5)
+ .Time(kFps30Rtp)
+ .SpatialLayer(2)
+ .AsLast()
+ .Build()));
+
+ StartNextDecode();
+ EXPECT_THAT(
+ WaitForFrameOrTimeout(kFps30Delay * 10),
+ Frame(AllOf(test::WithId(3), test::FrameWithSize(3 * kFrameSize))));
+}
+
+TEST_P(VideoStreamBufferControllerTest,
+ OutstandingFrameTasksAreCancelledAfterDeletion) {
+ StartNextDecodeForceKeyframe();
+ buffer_->InsertFrame(WithReceiveTimeFromRtpTimestamp(
+ test::FakeFrameBuilder().Id(0).Time(0).AsLast().Build()));
+ // Get keyframe. Delta frame should now be scheduled.
+ EXPECT_THAT(WaitForFrameOrTimeout(TimeDelta::Zero()), Frame(test::WithId(0)));
+
+ StartNextDecode();
+ buffer_->InsertFrame(WithReceiveTimeFromRtpTimestamp(test::FakeFrameBuilder()
+ .Id(1)
+ .Time(kFps30Rtp)
+ .AsLast()
+ .Refs({0})
+ .Build()));
+ buffer_->Stop();
+ // Wait for 2x max wait time. Since we stopped, this should cause no timeouts
+ // or frame-ready callbacks.
+ EXPECT_THAT(WaitForFrameOrTimeout(kMaxWaitForFrame * 2), Eq(absl::nullopt));
+}
+
+TEST_P(VideoStreamBufferControllerTest, FramesWaitForDecoderToComplete) {
+ StartNextDecodeForceKeyframe();
+
+ // Start with a keyframe.
+ buffer_->InsertFrame(test::FakeFrameBuilder().Id(0).Time(0).AsLast().Build());
+ EXPECT_THAT(WaitForFrameOrTimeout(TimeDelta::Zero()), Frame(test::WithId(0)));
+
+ ResetLastResult();
+ // Insert a delta frame.
+ buffer_->InsertFrame(test::FakeFrameBuilder()
+ .Id(1)
+ .Time(kFps30Rtp)
+ .AsLast()
+ .Refs({0})
+ .Build());
+
+ // Advancing time should not result in a frame since the scheduler has not
+ // been signalled that we are ready.
+ EXPECT_THAT(WaitForFrameOrTimeout(kFps30Delay), Eq(absl::nullopt));
+ // Signal ready.
+ StartNextDecode();
+ EXPECT_THAT(WaitForFrameOrTimeout(kFps30Delay), Frame(test::WithId(1)));
+}
+
+TEST_P(VideoStreamBufferControllerTest, LateFrameDropped) {
+ StartNextDecodeForceKeyframe();
+ // F1
+ // /
+ // F0 --> F2
+ buffer_->InsertFrame(test::FakeFrameBuilder().Id(0).Time(0).AsLast().Build());
+ // Start with a keyframe.
+ EXPECT_THAT(WaitForFrameOrTimeout(TimeDelta::Zero()), Frame(test::WithId(0)));
+
+ StartNextDecode();
+
+ // Simulate late F1 which arrives after F2.
+ time_controller_.AdvanceTime(kFps30Delay * 2);
+ buffer_->InsertFrame(test::FakeFrameBuilder()
+ .Id(2)
+ .Time(2 * kFps30Rtp)
+ .AsLast()
+ .Refs({0})
+ .Build());
+ EXPECT_THAT(WaitForFrameOrTimeout(kFps30Delay), Frame(test::WithId(2)));
+
+ StartNextDecode();
+
+ buffer_->InsertFrame(test::FakeFrameBuilder()
+ .Id(1)
+ .Time(1 * kFps30Rtp)
+ .AsLast()
+ .Refs({0})
+ .Build());
+ // Confirm frame 1 is never scheduled by timing out.
+ EXPECT_THAT(WaitForFrameOrTimeout(kMaxWaitForFrame), TimedOut());
+}
+
+TEST_P(VideoStreamBufferControllerTest, FramesFastForwardOnSystemHalt) {
+ StartNextDecodeForceKeyframe();
+ // F1
+ // /
+ // F0 --> F2
+ buffer_->InsertFrame(test::FakeFrameBuilder().Id(0).Time(0).AsLast().Build());
+
+ // Start with a keyframe.
+ EXPECT_THAT(WaitForFrameOrTimeout(TimeDelta::Zero()), Frame(test::WithId(0)));
+
+ time_controller_.AdvanceTime(kFps30Delay);
+ buffer_->InsertFrame(test::FakeFrameBuilder()
+ .Id(1)
+ .Time(kFps30Rtp)
+ .AsLast()
+ .Refs({0})
+ .Build());
+ time_controller_.AdvanceTime(kFps30Delay);
+ buffer_->InsertFrame(test::FakeFrameBuilder()
+ .Id(2)
+ .Time(2 * kFps30Rtp)
+ .AsLast()
+ .Refs({0})
+ .Build());
+
+ // Halting time should result in F1 being skipped.
+ time_controller_.AdvanceTime(kFps30Delay * 2);
+ StartNextDecode();
+ EXPECT_THAT(WaitForFrameOrTimeout(kFps30Delay), Frame(test::WithId(2)));
+ EXPECT_EQ(dropped_frames(), 1);
+}
+
+TEST_P(VideoStreamBufferControllerTest, ForceKeyFrame) {
+ StartNextDecodeForceKeyframe();
+ // Initial keyframe.
+ buffer_->InsertFrame(test::FakeFrameBuilder().Id(0).Time(0).AsLast().Build());
+ EXPECT_THAT(WaitForFrameOrTimeout(TimeDelta::Zero()), Frame(test::WithId(0)));
+
+ StartNextDecodeForceKeyframe();
+
+ // F2 is the next keyframe, and should be extracted since a keyframe was
+ // forced.
+ buffer_->InsertFrame(test::FakeFrameBuilder()
+ .Id(1)
+ .Time(kFps30Rtp)
+ .AsLast()
+ .Refs({0})
+ .Build());
+ buffer_->InsertFrame(
+ test::FakeFrameBuilder().Id(2).Time(kFps30Rtp * 2).AsLast().Build());
+
+ EXPECT_THAT(WaitForFrameOrTimeout(kFps30Delay * 3), Frame(test::WithId(2)));
+}
+
+TEST_P(VideoStreamBufferControllerTest, SlowDecoderDropsTemporalLayers) {
+ StartNextDecodeForceKeyframe();
+ // 2 temporal layers, at 15fps per layer to make 30fps total.
+ // Decoder is slower than 30fps, so last_frame() will be skipped.
+ // F1 --> F3 --> F5
+ // / / /
+ // F0 --> F2 --> F4
+ buffer_->InsertFrame(test::FakeFrameBuilder().Id(0).Time(0).AsLast().Build());
+ // Keyframe received.
+ // Don't start next decode until slow delay.
+ EXPECT_THAT(WaitForFrameOrTimeout(TimeDelta::Zero()), Frame(test::WithId(0)));
+
+ time_controller_.AdvanceTime(kFps30Delay);
+ buffer_->InsertFrame(test::FakeFrameBuilder()
+ .Id(1)
+ .Time(1 * kFps30Rtp)
+ .Refs({0})
+ .AsLast()
+ .Build());
+ time_controller_.AdvanceTime(kFps30Delay);
+ buffer_->InsertFrame(test::FakeFrameBuilder()
+ .Id(2)
+ .Time(2 * kFps30Rtp)
+ .Refs({0})
+ .AsLast()
+ .Build());
+
+ // Simulate decode taking 3x FPS rate.
+ time_controller_.AdvanceTime(kFps30Delay * 1.5);
+ StartNextDecode();
+ // F2 is the best frame since decoding was so slow that F1 is too old.
+ EXPECT_THAT(WaitForFrameOrTimeout(kFps30Delay * 2), Frame(test::WithId(2)));
+ EXPECT_EQ(dropped_frames(), 1);
+ time_controller_.AdvanceTime(kFps30Delay / 2);
+
+ buffer_->InsertFrame(test::FakeFrameBuilder()
+ .Id(3)
+ .Time(3 * kFps30Rtp)
+ .Refs({1, 2})
+ .AsLast()
+ .Build());
+ time_controller_.AdvanceTime(kFps30Delay / 2);
+ buffer_->InsertFrame(test::FakeFrameBuilder()
+ .Id(4)
+ .Time(4 * kFps30Rtp)
+ .Refs({2})
+ .AsLast()
+ .Build());
+ time_controller_.AdvanceTime(kFps30Delay / 2);
+
+ // F4 is the best frame since decoding was so slow that F1 is too old.
+ time_controller_.AdvanceTime(kFps30Delay);
+ StartNextDecode();
+ EXPECT_THAT(WaitForFrameOrTimeout(kFps30Delay), Frame(test::WithId(4)));
+
+ buffer_->InsertFrame(test::FakeFrameBuilder()
+ .Id(5)
+ .Time(5 * kFps30Rtp)
+ .Refs({3, 4})
+ .AsLast()
+ .Build());
+ time_controller_.AdvanceTime(kFps30Delay / 2);
+
+ // F5 is not decodable since F4 was decoded, so a timeout is expected.
+ time_controller_.AdvanceTime(TimeDelta::Millis(10));
+ StartNextDecode();
+ EXPECT_THAT(WaitForFrameOrTimeout(kMaxWaitForFrame), TimedOut());
+ // TODO(bugs.webrtc.org/13343): This should be 2 dropped frames since frames 1
+ // and 3 were dropped. However, frame_buffer2 does not mark frame 3 as dropped
+ // which is a bug. Uncomment below when that is fixed for frame_buffer2 is
+ // deleted.
+ // EXPECT_EQ(dropped_frames(), 2);
+}
+
+TEST_P(VideoStreamBufferControllerTest,
+ NewFrameInsertedWhileWaitingToReleaseFrame) {
+ StartNextDecodeForceKeyframe();
+ // Initial keyframe.
+ buffer_->InsertFrame(WithReceiveTimeFromRtpTimestamp(
+ test::FakeFrameBuilder().Id(0).Time(0).AsLast().Build()));
+ EXPECT_THAT(WaitForFrameOrTimeout(TimeDelta::Zero()), Frame(test::WithId(0)));
+
+ time_controller_.AdvanceTime(kFps30Delay / 2);
+ buffer_->InsertFrame(WithReceiveTimeFromRtpTimestamp(test::FakeFrameBuilder()
+ .Id(1)
+ .Time(kFps30Rtp)
+ .Refs({0})
+ .AsLast()
+ .Build()));
+ StartNextDecode();
+ EXPECT_THAT(WaitForFrameOrTimeout(TimeDelta::Zero()), Eq(absl::nullopt));
+
+ // Scheduler is waiting to deliver Frame 1 now. Insert Frame 2. Frame 1 should
+ // be delivered still.
+ buffer_->InsertFrame(WithReceiveTimeFromRtpTimestamp(test::FakeFrameBuilder()
+ .Id(2)
+ .Time(kFps30Rtp * 2)
+ .Refs({0})
+ .AsLast()
+ .Build()));
+ EXPECT_THAT(WaitForFrameOrTimeout(kFps30Delay), Frame(test::WithId(1)));
+}
+
+TEST_P(VideoStreamBufferControllerTest, SameFrameNotScheduledTwice) {
+ // A frame could be scheduled twice if last_frame() arrive out-of-order but
+ // the older frame is old enough to be fast forwarded.
+ //
+ // 1. F2 arrives and is scheduled.
+ // 2. F3 arrives, but scheduling will not change since F2 is next.
+ // 3. F1 arrives late and scheduling is checked since it is before F2. F1
+ // fast-forwarded since it is older.
+ //
+ // F2 is the best frame, but should only be scheduled once, followed by F3.
+ StartNextDecodeForceKeyframe();
+
+ // First keyframe.
+ buffer_->InsertFrame(WithReceiveTimeFromRtpTimestamp(
+ test::FakeFrameBuilder().Id(0).Time(0).AsLast().Build()));
+ EXPECT_THAT(WaitForFrameOrTimeout(TimeDelta::Millis(15)),
+ Frame(test::WithId(0)));
+
+ StartNextDecode();
+
+ // F2 arrives and is scheduled.
+ buffer_->InsertFrame(WithReceiveTimeFromRtpTimestamp(
+ test::FakeFrameBuilder().Id(2).Time(2 * kFps30Rtp).AsLast().Build()));
+
+ // F3 arrives before F2 is extracted.
+ time_controller_.AdvanceTime(kFps30Delay);
+ buffer_->InsertFrame(WithReceiveTimeFromRtpTimestamp(
+ test::FakeFrameBuilder().Id(3).Time(3 * kFps30Rtp).AsLast().Build()));
+
+ // F1 arrives and is fast-forwarded since it is too late.
+ // F2 is already scheduled and should not be rescheduled.
+ time_controller_.AdvanceTime(kFps30Delay / 2);
+ buffer_->InsertFrame(WithReceiveTimeFromRtpTimestamp(
+ test::FakeFrameBuilder().Id(1).Time(1 * kFps30Rtp).AsLast().Build()));
+
+ EXPECT_THAT(WaitForFrameOrTimeout(kFps30Delay), Frame(test::WithId(2)));
+ StartNextDecode();
+
+ EXPECT_THAT(WaitForFrameOrTimeout(kFps30Delay), Frame(test::WithId(3)));
+ StartNextDecode();
+ EXPECT_THAT(WaitForFrameOrTimeout(kMaxWaitForFrame), TimedOut());
+ EXPECT_EQ(dropped_frames(), 1);
+}
+
+TEST_P(VideoStreamBufferControllerTest, TestStatsCallback) {
+ EXPECT_CALL(stats_callback_,
+ OnCompleteFrame(true, kFrameSize, VideoContentType::UNSPECIFIED));
+ EXPECT_CALL(stats_callback_, OnFrameBufferTimingsUpdated);
+
+ // Fake timing having received decoded frame.
+ timing_.StopDecodeTimer(TimeDelta::Millis(1), clock_->CurrentTime());
+ StartNextDecodeForceKeyframe();
+ buffer_->InsertFrame(test::FakeFrameBuilder().Id(0).Time(0).AsLast().Build());
+ EXPECT_THAT(WaitForFrameOrTimeout(TimeDelta::Zero()), Frame(test::WithId(0)));
+
+ // Flush stats posted on the decode queue.
+ time_controller_.AdvanceTime(TimeDelta::Zero());
+}
+
+TEST_P(VideoStreamBufferControllerTest,
+ FrameCompleteCalledOnceForDuplicateFrame) {
+ EXPECT_CALL(stats_callback_,
+ OnCompleteFrame(true, kFrameSize, VideoContentType::UNSPECIFIED))
+ .Times(1);
+
+ StartNextDecodeForceKeyframe();
+ buffer_->InsertFrame(test::FakeFrameBuilder().Id(0).Time(0).AsLast().Build());
+ buffer_->InsertFrame(test::FakeFrameBuilder().Id(0).Time(0).AsLast().Build());
+ // Flush stats posted on the decode queue.
+ time_controller_.AdvanceTime(TimeDelta::Zero());
+}
+
+TEST_P(VideoStreamBufferControllerTest,
+ FrameCompleteCalledOnceForSingleTemporalUnit) {
+ StartNextDecodeForceKeyframe();
+
+ // `OnCompleteFrame` should not be called for the first two frames since they
+ // do not complete the temporal layer.
+ EXPECT_CALL(stats_callback_, OnCompleteFrame(_, _, _)).Times(0);
+ buffer_->InsertFrame(test::FakeFrameBuilder().Id(0).Time(0).Build());
+ buffer_->InsertFrame(
+ test::FakeFrameBuilder().Id(1).Time(0).Refs({0}).Build());
+ time_controller_.AdvanceTime(TimeDelta::Zero());
+ // Flush stats posted on the decode queue.
+ ::testing::Mock::VerifyAndClearExpectations(&stats_callback_);
+
+ // Note that this frame is not marked as a keyframe since the last spatial
+ // layer has dependencies.
+ EXPECT_CALL(stats_callback_,
+ OnCompleteFrame(false, kFrameSize, VideoContentType::UNSPECIFIED))
+ .Times(1);
+ buffer_->InsertFrame(
+ test::FakeFrameBuilder().Id(2).Time(0).Refs({0, 1}).AsLast().Build());
+ // Flush stats posted on the decode queue.
+ time_controller_.AdvanceTime(TimeDelta::Zero());
+}
+
+TEST_P(VideoStreamBufferControllerTest,
+ FrameCompleteCalledOnceForCompleteTemporalUnit) {
+ // FrameBuffer2 logs the complete frame on the arrival of the last layer.
+ StartNextDecodeForceKeyframe();
+
+ // `OnCompleteFrame` should not be called for the first two frames since they
+ // do not complete the temporal layer. Frame 1 arrives later, at which time
+ // this frame can finally be considered complete.
+ EXPECT_CALL(stats_callback_, OnCompleteFrame(_, _, _)).Times(0);
+ buffer_->InsertFrame(test::FakeFrameBuilder().Id(0).Time(0).Build());
+ buffer_->InsertFrame(
+ test::FakeFrameBuilder().Id(2).Time(0).Refs({0, 1}).AsLast().Build());
+ time_controller_.AdvanceTime(TimeDelta::Zero());
+ // Flush stats posted on the decode queue.
+ ::testing::Mock::VerifyAndClearExpectations(&stats_callback_);
+
+ EXPECT_CALL(stats_callback_,
+ OnCompleteFrame(false, kFrameSize, VideoContentType::UNSPECIFIED))
+ .Times(1);
+ buffer_->InsertFrame(
+ test::FakeFrameBuilder().Id(1).Time(0).Refs({0}).Build());
+ // Flush stats posted on the decode queue.
+ time_controller_.AdvanceTime(TimeDelta::Zero());
+}
+
+// Note: This test takes a long time to run if the fake metronome is active.
+// Since the test needs to wait for the timestamp to rollover, it has a fake
+// delay of around 6.5 hours. Even though time is simulated, this will be
+// around 1,500,000 metronome tick invocations.
+TEST_P(VideoStreamBufferControllerTest, NextFrameWithOldTimestamp) {
+ // Test inserting 31 frames and pause the stream for a long time before
+ // frame 32.
+ StartNextDecodeForceKeyframe();
+ constexpr uint32_t kBaseRtp = std::numeric_limits<uint32_t>::max() / 2;
+
+ // First keyframe. The receive time must be explicitly set in this test since
+ // the RTP derived time used in all tests does not work when the long pause
+ // happens later in the test.
+ buffer_->InsertFrame(test::FakeFrameBuilder()
+ .Id(0)
+ .Time(kBaseRtp)
+ .ReceivedTime(clock_->CurrentTime())
+ .AsLast()
+ .Build());
+ EXPECT_THAT(WaitForFrameOrTimeout(kFps30Delay), Frame(test::WithId(0)));
+
+ // 1 more frame to warmup VCMTiming for 30fps.
+ StartNextDecode();
+ buffer_->InsertFrame(test::FakeFrameBuilder()
+ .Id(1)
+ .Time(kBaseRtp + kFps30Rtp)
+ .ReceivedTime(clock_->CurrentTime())
+ .AsLast()
+ .Build());
+ EXPECT_THAT(WaitForFrameOrTimeout(kFps30Delay), Frame(test::WithId(1)));
+
+ // Pause the stream for such a long time it incurs an RTP timestamp rollover
+ // by over half.
+ constexpr uint32_t kLastRtp = kBaseRtp + kFps30Rtp;
+ constexpr uint32_t kRolloverRtp =
+ kLastRtp + std::numeric_limits<uint32_t>::max() / 2 + 1;
+ constexpr Frequency kRtpHz = Frequency::KiloHertz(90);
+ // Pause for corresponding delay such that RTP timestamp would increase this
+ // much at 30fps.
+ constexpr TimeDelta kRolloverDelay =
+ (std::numeric_limits<uint32_t>::max() / 2 + 1) / kRtpHz;
+
+ // Avoid timeout being set while waiting for the frame and before the receiver
+ // is ready.
+ ResetLastResult();
+ EXPECT_THAT(WaitForFrameOrTimeout(kMaxWaitForFrame), Eq(absl::nullopt));
+ time_controller_.AdvanceTime(kRolloverDelay - kMaxWaitForFrame);
+ StartNextDecode();
+ buffer_->InsertFrame(test::FakeFrameBuilder()
+ .Id(2)
+ .Time(kRolloverRtp)
+ .ReceivedTime(clock_->CurrentTime())
+ .AsLast()
+ .Build());
+ // FrameBuffer2 drops the frame, while FrameBuffer3 will continue the stream.
+ EXPECT_THAT(WaitForFrameOrTimeout(kFps30Delay), Frame(test::WithId(2)));
+}
+
+TEST_P(VideoStreamBufferControllerTest,
+ FrameNotSetForDecodedIfFrameBufferBecomesNonDecodable) {
+ // This can happen if the frame buffer receives non-standard input. This test
+ // will simply clear the frame buffer to replicate this.
+ StartNextDecodeForceKeyframe();
+ // Initial keyframe.
+ buffer_->InsertFrame(WithReceiveTimeFromRtpTimestamp(
+ test::FakeFrameBuilder().Id(0).Time(0).SpatialLayer(1).AsLast().Build()));
+ EXPECT_THAT(WaitForFrameOrTimeout(TimeDelta::Zero()), Frame(test::WithId(0)));
+
+ // Insert a frame that will become non-decodable.
+ buffer_->InsertFrame(WithReceiveTimeFromRtpTimestamp(test::FakeFrameBuilder()
+ .Id(11)
+ .Time(kFps30Rtp)
+ .Refs({0})
+ .SpatialLayer(1)
+ .AsLast()
+ .Build()));
+ StartNextDecode();
+ // Second layer inserted after last layer for the same frame out-of-order.
+ // This second frame requires some older frame to be decoded and so now the
+ // super-frame is no longer decodable despite already being scheduled.
+ buffer_->InsertFrame(WithReceiveTimeFromRtpTimestamp(test::FakeFrameBuilder()
+ .Id(10)
+ .Time(kFps30Rtp)
+ .SpatialLayer(0)
+ .Refs({2})
+ .Build()));
+ EXPECT_THAT(WaitForFrameOrTimeout(kMaxWaitForFrame), TimedOut());
+
+ // Ensure that this frame can be decoded later.
+ StartNextDecode();
+ buffer_->InsertFrame(WithReceiveTimeFromRtpTimestamp(test::FakeFrameBuilder()
+ .Id(2)
+ .Time(kFps30Rtp / 2)
+ .SpatialLayer(0)
+ .Refs({0})
+ .AsLast()
+ .Build()));
+ EXPECT_THAT(WaitForFrameOrTimeout(kFps30Delay), Frame(test::WithId(2)));
+ StartNextDecode();
+ EXPECT_THAT(WaitForFrameOrTimeout(kFps30Delay), Frame(test::WithId(10)));
+}
+
+INSTANTIATE_TEST_SUITE_P(VideoStreamBufferController,
+ VideoStreamBufferControllerTest,
+ ::testing::Combine(::testing::Bool(),
+ ::testing::Values("")),
+ [](const auto& info) {
+ return std::get<0>(info.param) ? "SyncDecoding"
+ : "UnsyncedDecoding";
+ });
+
+class LowLatencyVideoStreamBufferControllerTest
+ : public ::testing::Test,
+ public VideoStreamBufferControllerFixture {};
+
+TEST_P(LowLatencyVideoStreamBufferControllerTest,
+ FramesDecodedInstantlyWithLowLatencyRendering) {
+ // Initial keyframe.
+ StartNextDecodeForceKeyframe();
+ timing_.set_min_playout_delay(TimeDelta::Zero());
+ timing_.set_max_playout_delay(TimeDelta::Millis(10));
+ auto frame = test::FakeFrameBuilder().Id(0).Time(0).AsLast().Build();
+ // Playout delay of 0 implies low-latency rendering.
+ frame->SetPlayoutDelay({0, 10});
+ buffer_->InsertFrame(std::move(frame));
+ EXPECT_THAT(WaitForFrameOrTimeout(TimeDelta::Zero()), Frame(test::WithId(0)));
+
+ // Delta frame would normally wait here, but should decode at the pacing rate
+ // in low-latency mode.
+ StartNextDecode();
+ frame = test::FakeFrameBuilder().Id(1).Time(kFps30Rtp).AsLast().Build();
+ frame->SetPlayoutDelay({0, 10});
+ buffer_->InsertFrame(std::move(frame));
+ // Pacing is set to 16ms in the field trial so we should not decode yet.
+ EXPECT_THAT(WaitForFrameOrTimeout(TimeDelta::Zero()), Eq(absl::nullopt));
+ time_controller_.AdvanceTime(TimeDelta::Millis(16));
+ EXPECT_THAT(WaitForFrameOrTimeout(TimeDelta::Zero()), Frame(test::WithId(1)));
+}
+
+TEST_P(LowLatencyVideoStreamBufferControllerTest, ZeroPlayoutDelayFullQueue) {
+ // Initial keyframe.
+ StartNextDecodeForceKeyframe();
+ timing_.set_min_playout_delay(TimeDelta::Zero());
+ timing_.set_max_playout_delay(TimeDelta::Millis(10));
+ auto frame = test::FakeFrameBuilder().Id(0).Time(0).AsLast().Build();
+ // Playout delay of 0 implies low-latency rendering.
+ frame->SetPlayoutDelay({0, 10});
+ buffer_->InsertFrame(std::move(frame));
+ EXPECT_THAT(WaitForFrameOrTimeout(TimeDelta::Zero()), Frame(test::WithId(0)));
+
+ // Queue up 5 frames (configured max queue size for 0-playout delay pacing).
+ for (int id = 1; id <= 6; ++id) {
+ frame =
+ test::FakeFrameBuilder().Id(id).Time(kFps30Rtp * id).AsLast().Build();
+ frame->SetPlayoutDelay({0, 10});
+ buffer_->InsertFrame(std::move(frame));
+ }
+
+ // The queue is at its max size for zero playout delay pacing, so the pacing
+ // should be ignored and the next frame should be decoded instantly.
+ StartNextDecode();
+ EXPECT_THAT(WaitForFrameOrTimeout(TimeDelta::Zero()), Frame(test::WithId(1)));
+}
+
+TEST_P(LowLatencyVideoStreamBufferControllerTest,
+ MinMaxDelayZeroLowLatencyMode) {
+ // Initial keyframe.
+ StartNextDecodeForceKeyframe();
+ timing_.set_min_playout_delay(TimeDelta::Zero());
+ timing_.set_max_playout_delay(TimeDelta::Zero());
+ auto frame = test::FakeFrameBuilder().Id(0).Time(0).AsLast().Build();
+ // Playout delay of 0 implies low-latency rendering.
+ frame->SetPlayoutDelay({0, 0});
+ buffer_->InsertFrame(std::move(frame));
+ EXPECT_THAT(WaitForFrameOrTimeout(TimeDelta::Zero()), Frame(test::WithId(0)));
+
+ // Delta frame would normally wait here, but should decode at the pacing rate
+ // in low-latency mode.
+ StartNextDecode();
+ frame = test::FakeFrameBuilder().Id(1).Time(kFps30Rtp).AsLast().Build();
+ frame->SetPlayoutDelay({0, 0});
+ buffer_->InsertFrame(std::move(frame));
+ // The min/max=0 version of low-latency rendering will result in a large
+ // negative decode wait time, so the frame should be ready right away.
+ EXPECT_THAT(WaitForFrameOrTimeout(TimeDelta::Zero()), Frame(test::WithId(1)));
+}
+
+INSTANTIATE_TEST_SUITE_P(
+ VideoStreamBufferController,
+ LowLatencyVideoStreamBufferControllerTest,
+ ::testing::Combine(
+ ::testing::Bool(),
+ ::testing::Values(
+ "WebRTC-ZeroPlayoutDelay/min_pacing:16ms,max_decode_queue_size:5/",
+ "WebRTC-ZeroPlayoutDelay/"
+ "min_pacing:16ms,max_decode_queue_size:5/")));
+
+class IncomingTimestampVideoStreamBufferControllerTest
+ : public ::testing::Test,
+ public VideoStreamBufferControllerFixture {};
+
+TEST_P(IncomingTimestampVideoStreamBufferControllerTest,
+ IncomingTimestampOnMarkerBitOnly) {
+ StartNextDecodeForceKeyframe();
+ EXPECT_CALL(timing_, IncomingTimestampMocked)
+ .Times(field_trials_.IsDisabled("WebRTC-IncomingTimestampOnMarkerBitOnly")
+ ? 3
+ : 1);
+ buffer_->InsertFrame(WithReceiveTimeFromRtpTimestamp(
+ test::FakeFrameBuilder().Id(0).SpatialLayer(0).Time(0).Build()));
+ buffer_->InsertFrame(WithReceiveTimeFromRtpTimestamp(
+ test::FakeFrameBuilder().Id(1).SpatialLayer(1).Time(0).Build()));
+ buffer_->InsertFrame(WithReceiveTimeFromRtpTimestamp(
+ test::FakeFrameBuilder().Id(2).SpatialLayer(2).Time(0).AsLast().Build()));
+}
+
+INSTANTIATE_TEST_SUITE_P(
+ VideoStreamBufferController,
+ IncomingTimestampVideoStreamBufferControllerTest,
+ ::testing::Combine(
+ ::testing::Bool(),
+ ::testing::Values(
+ "WebRTC-IncomingTimestampOnMarkerBitOnly/Enabled/",
+ "WebRTC-IncomingTimestampOnMarkerBitOnly/Disabled/")));
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/video/video_stream_decoder2.cc b/third_party/libwebrtc/video/video_stream_decoder2.cc
new file mode 100644
index 0000000000..1ef2d0ecd0
--- /dev/null
+++ b/third_party/libwebrtc/video/video_stream_decoder2.cc
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video/video_stream_decoder2.h"
+
+#include "api/video_codecs/video_decoder.h"
+#include "modules/video_coding/video_receiver2.h"
+#include "rtc_base/checks.h"
+#include "video/receive_statistics_proxy2.h"
+
+namespace webrtc {
+namespace internal {
+
+VideoStreamDecoder::VideoStreamDecoder(
+ VideoReceiver2* video_receiver,
+ ReceiveStatisticsProxy* receive_statistics_proxy,
+ rtc::VideoSinkInterface<VideoFrame>* incoming_video_stream)
+ : video_receiver_(video_receiver),
+ receive_stats_callback_(receive_statistics_proxy),
+ incoming_video_stream_(incoming_video_stream) {
+ RTC_DCHECK(video_receiver_);
+
+ video_receiver_->RegisterReceiveCallback(this);
+}
+
+VideoStreamDecoder::~VideoStreamDecoder() {
+ // Note: There's an assumption at this point that the decoder thread is
+ // *not* running. If it was, then there could be a race for each of these
+ // callbacks.
+
+ // Unset all the callback pointers that we set in the ctor.
+ video_receiver_->RegisterReceiveCallback(nullptr);
+}
+
+// Do not acquire the lock of `video_receiver_` in this function. Decode
+// callback won't necessarily be called from the decoding thread. The decoding
+// thread may have held the lock when calling VideoDecoder::Decode, Reset, or
+// Release. Acquiring the same lock in the path of decode callback can deadlock.
+int32_t VideoStreamDecoder::FrameToRender(VideoFrame& video_frame,
+ absl::optional<uint8_t> qp,
+ TimeDelta decode_time,
+ VideoContentType content_type) {
+ receive_stats_callback_->OnDecodedFrame(video_frame, qp, decode_time,
+ content_type);
+ incoming_video_stream_->OnFrame(video_frame);
+ return 0;
+}
+
+void VideoStreamDecoder::OnDroppedFrames(uint32_t frames_dropped) {
+ receive_stats_callback_->OnDroppedFrames(frames_dropped);
+}
+
+void VideoStreamDecoder::OnIncomingPayloadType(int payload_type) {
+ receive_stats_callback_->OnIncomingPayloadType(payload_type);
+}
+
+void VideoStreamDecoder::OnDecoderInfoChanged(
+ const VideoDecoder::DecoderInfo& decoder_info) {
+ receive_stats_callback_->OnDecoderInfo(decoder_info);
+}
+
+} // namespace internal
+} // namespace webrtc
diff --git a/third_party/libwebrtc/video/video_stream_decoder2.h b/third_party/libwebrtc/video/video_stream_decoder2.h
new file mode 100644
index 0000000000..473d463186
--- /dev/null
+++ b/third_party/libwebrtc/video/video_stream_decoder2.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VIDEO_VIDEO_STREAM_DECODER2_H_
+#define VIDEO_VIDEO_STREAM_DECODER2_H_
+
+#include <list>
+#include <map>
+#include <memory>
+#include <vector>
+
+#include "api/scoped_refptr.h"
+#include "api/video/video_sink_interface.h"
+#include "api/video_codecs/video_decoder.h"
+#include "modules/remote_bitrate_estimator/include/remote_bitrate_estimator.h"
+#include "modules/video_coding/include/video_coding_defines.h"
+#include "rtc_base/platform_thread.h"
+
+namespace webrtc {
+
+class VideoReceiver2;
+
+namespace internal {
+
+class ReceiveStatisticsProxy;
+
+class VideoStreamDecoder : public VCMReceiveCallback {
+ public:
+ VideoStreamDecoder(
+ VideoReceiver2* video_receiver,
+ ReceiveStatisticsProxy* receive_statistics_proxy,
+ rtc::VideoSinkInterface<VideoFrame>* incoming_video_stream);
+ ~VideoStreamDecoder() override;
+
+ // Implements VCMReceiveCallback.
+ int32_t FrameToRender(VideoFrame& video_frame,
+ absl::optional<uint8_t> qp,
+ TimeDelta decode_time,
+ VideoContentType content_type) override;
+ void OnDroppedFrames(uint32_t frames_dropped) override;
+ void OnIncomingPayloadType(int payload_type) override;
+ void OnDecoderInfoChanged(
+ const VideoDecoder::DecoderInfo& decoder_info) override;
+
+ private:
+ VideoReceiver2* const video_receiver_;
+ ReceiveStatisticsProxy* const receive_stats_callback_;
+ rtc::VideoSinkInterface<VideoFrame>* const incoming_video_stream_;
+};
+
+} // namespace internal
+} // namespace webrtc
+
+#endif // VIDEO_VIDEO_STREAM_DECODER2_H_
diff --git a/third_party/libwebrtc/video/video_stream_decoder_impl.cc b/third_party/libwebrtc/video/video_stream_decoder_impl.cc
new file mode 100644
index 0000000000..516aceb680
--- /dev/null
+++ b/third_party/libwebrtc/video/video_stream_decoder_impl.cc
@@ -0,0 +1,293 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video/video_stream_decoder_impl.h"
+
+#include <memory>
+
+#include "rtc_base/logging.h"
+#include "rtc_base/numerics/mod_ops.h"
+#include "rtc_base/time_utils.h"
+
+namespace webrtc {
+
+VideoStreamDecoderImpl::VideoStreamDecoderImpl(
+ VideoStreamDecoderInterface::Callbacks* callbacks,
+ VideoDecoderFactory* decoder_factory,
+ TaskQueueFactory* task_queue_factory,
+ std::map<int, std::pair<SdpVideoFormat, int>> decoder_settings,
+ const FieldTrialsView* field_trials)
+ : field_trials_(field_trials),
+ timing_(Clock::GetRealTimeClock(), *field_trials_),
+ decode_callbacks_(this),
+ next_frame_info_index_(0),
+ callbacks_(callbacks),
+ keyframe_required_(true),
+ decoder_factory_(decoder_factory),
+ decoder_settings_(std::move(decoder_settings)),
+ shut_down_(false),
+ frame_buffer_(Clock::GetRealTimeClock(), &timing_, *field_trials_),
+ bookkeeping_queue_(task_queue_factory->CreateTaskQueue(
+ "video_stream_decoder_bookkeeping_queue",
+ TaskQueueFactory::Priority::NORMAL)),
+ decode_queue_(task_queue_factory->CreateTaskQueue(
+ "video_stream_decoder_decode_queue",
+ TaskQueueFactory::Priority::NORMAL)) {
+ bookkeeping_queue_.PostTask([this]() {
+ RTC_DCHECK_RUN_ON(&bookkeeping_queue_);
+ StartNextDecode();
+ });
+}
+
+VideoStreamDecoderImpl::~VideoStreamDecoderImpl() {
+ MutexLock lock(&shut_down_mutex_);
+ shut_down_ = true;
+}
+
+void VideoStreamDecoderImpl::OnFrame(std::unique_ptr<EncodedFrame> frame) {
+ if (!bookkeeping_queue_.IsCurrent()) {
+ bookkeeping_queue_.PostTask([this, frame = std::move(frame)]() mutable {
+ OnFrame(std::move(frame));
+ return true;
+ });
+
+ return;
+ }
+
+ RTC_DCHECK_RUN_ON(&bookkeeping_queue_);
+
+ int64_t continuous_frame_id = frame_buffer_.InsertFrame(std::move(frame));
+ if (last_continuous_frame_id_ < continuous_frame_id) {
+ last_continuous_frame_id_ = continuous_frame_id;
+ callbacks_->OnContinuousUntil(last_continuous_frame_id_);
+ }
+}
+
+void VideoStreamDecoderImpl::SetMinPlayoutDelay(TimeDelta min_delay) {
+ timing_.set_min_playout_delay(min_delay);
+}
+
+void VideoStreamDecoderImpl::SetMaxPlayoutDelay(TimeDelta max_delay) {
+ timing_.set_max_playout_delay(max_delay);
+}
+
+VideoDecoder* VideoStreamDecoderImpl::GetDecoder(int payload_type) {
+ if (current_payload_type_ == payload_type) {
+ RTC_DCHECK(decoder_);
+ return decoder_.get();
+ }
+
+ current_payload_type_.reset();
+ decoder_.reset();
+
+ auto decoder_settings_it = decoder_settings_.find(payload_type);
+ if (decoder_settings_it == decoder_settings_.end()) {
+ RTC_LOG(LS_WARNING) << "Payload type " << payload_type
+ << " not registered.";
+ return nullptr;
+ }
+
+ const SdpVideoFormat& video_format = decoder_settings_it->second.first;
+ std::unique_ptr<VideoDecoder> decoder =
+ decoder_factory_->CreateVideoDecoder(video_format);
+ if (!decoder) {
+ RTC_LOG(LS_WARNING) << "Failed to create decoder for payload type "
+ << payload_type << ".";
+ return nullptr;
+ }
+
+ VideoDecoder::Settings settings;
+ settings.set_number_of_cores(decoder_settings_it->second.second);
+ if (!decoder->Configure(settings)) {
+ RTC_LOG(LS_WARNING) << "Failed to initialize decoder for payload type "
+ << payload_type << ".";
+ return nullptr;
+ }
+
+ int32_t register_result =
+ decoder->RegisterDecodeCompleteCallback(&decode_callbacks_);
+ if (register_result != WEBRTC_VIDEO_CODEC_OK) {
+ RTC_LOG(LS_WARNING) << "Failed to register decode callback.";
+ return nullptr;
+ }
+
+ current_payload_type_.emplace(payload_type);
+ decoder_ = std::move(decoder);
+ return decoder_.get();
+}
+
+void VideoStreamDecoderImpl::SaveFrameInfo(const EncodedFrame& frame) {
+ FrameInfo* frame_info = &frame_info_[next_frame_info_index_];
+ frame_info->timestamp = frame.Timestamp();
+ frame_info->decode_start_time_ms = rtc::TimeMillis();
+ frame_info->render_time_us = frame.RenderTimeMs() * 1000;
+ frame_info->content_type = frame.EncodedImage().content_type_;
+
+ next_frame_info_index_ = Add<kFrameInfoMemory>(next_frame_info_index_, 1);
+}
+
+void VideoStreamDecoderImpl::StartNextDecode() {
+ int64_t max_wait_time = keyframe_required_ ? 200 : 3000;
+
+ frame_buffer_.NextFrame(max_wait_time, keyframe_required_,
+ bookkeeping_queue_.Get(),
+ [this](std::unique_ptr<EncodedFrame> frame) {
+ RTC_DCHECK_RUN_ON(&bookkeeping_queue_);
+ OnNextFrameCallback(std::move(frame));
+ });
+}
+
+void VideoStreamDecoderImpl::OnNextFrameCallback(
+ std::unique_ptr<EncodedFrame> frame) {
+ if (frame) {
+ RTC_DCHECK(frame);
+ SaveFrameInfo(*frame);
+
+ MutexLock lock(&shut_down_mutex_);
+ if (shut_down_) {
+ return;
+ }
+
+ decode_queue_.PostTask([this, frame = std::move(frame)]() mutable {
+ RTC_DCHECK_RUN_ON(&decode_queue_);
+ DecodeResult decode_result = DecodeFrame(std::move(frame));
+ bookkeeping_queue_.PostTask([this, decode_result]() {
+ RTC_DCHECK_RUN_ON(&bookkeeping_queue_);
+ switch (decode_result) {
+ case kOk: {
+ keyframe_required_ = false;
+ break;
+ }
+ case kOkRequestKeyframe: {
+ callbacks_->OnNonDecodableState();
+ keyframe_required_ = false;
+ break;
+ }
+ case kDecodeFailure: {
+ callbacks_->OnNonDecodableState();
+ keyframe_required_ = true;
+ break;
+ }
+ }
+ StartNextDecode();
+ });
+ });
+ } else {
+ callbacks_->OnNonDecodableState();
+ // The `frame_buffer_` requires the frame callback function to complete
+ // before NextFrame is called again. For this reason we call
+ // StartNextDecode in a later task to allow this task to complete first.
+ bookkeeping_queue_.PostTask([this]() {
+ RTC_DCHECK_RUN_ON(&bookkeeping_queue_);
+ StartNextDecode();
+ });
+ }
+}
+
+VideoStreamDecoderImpl::DecodeResult VideoStreamDecoderImpl::DecodeFrame(
+ std::unique_ptr<EncodedFrame> frame) {
+ RTC_DCHECK(frame);
+
+ VideoDecoder* decoder = GetDecoder(frame->PayloadType());
+ if (!decoder) {
+ return kDecodeFailure;
+ }
+
+ int32_t decode_result = decoder->Decode(frame->EncodedImage(), //
+ /*missing_frames=*/false, //
+ frame->RenderTimeMs());
+ switch (decode_result) {
+ case WEBRTC_VIDEO_CODEC_OK: {
+ return kOk;
+ }
+ case WEBRTC_VIDEO_CODEC_OK_REQUEST_KEYFRAME: {
+ return kOkRequestKeyframe;
+ }
+ default:
+ return kDecodeFailure;
+ }
+}
+
+VideoStreamDecoderImpl::FrameInfo* VideoStreamDecoderImpl::GetFrameInfo(
+ int64_t timestamp) {
+ int start_time_index = next_frame_info_index_;
+ for (int i = 0; i < kFrameInfoMemory; ++i) {
+ start_time_index = Subtract<kFrameInfoMemory>(start_time_index, 1);
+
+ if (frame_info_[start_time_index].timestamp == timestamp)
+ return &frame_info_[start_time_index];
+ }
+
+ return nullptr;
+}
+
+void VideoStreamDecoderImpl::OnDecodedFrameCallback(
+ VideoFrame& decoded_image,
+ absl::optional<int32_t> decode_time_ms,
+ absl::optional<uint8_t> qp) {
+ int64_t decode_stop_time_ms = rtc::TimeMillis();
+
+ bookkeeping_queue_.PostTask([this, decode_stop_time_ms, decoded_image,
+ decode_time_ms, qp]() mutable {
+ RTC_DCHECK_RUN_ON(&bookkeeping_queue_);
+
+ FrameInfo* frame_info = GetFrameInfo(decoded_image.timestamp());
+ if (!frame_info) {
+ RTC_LOG(LS_ERROR) << "No frame information found for frame with timestamp"
+ << decoded_image.timestamp();
+ return;
+ }
+
+ Callbacks::FrameInfo callback_info;
+ callback_info.content_type = frame_info->content_type;
+
+ if (qp)
+ callback_info.qp.emplace(*qp);
+
+ if (!decode_time_ms) {
+ decode_time_ms = decode_stop_time_ms - frame_info->decode_start_time_ms;
+ }
+ decoded_image.set_processing_time(
+ {Timestamp::Millis(frame_info->decode_start_time_ms),
+ Timestamp::Millis(frame_info->decode_start_time_ms +
+ *decode_time_ms)});
+ decoded_image.set_timestamp_us(frame_info->render_time_us);
+ timing_.StopDecodeTimer(TimeDelta::Millis(*decode_time_ms),
+ Timestamp::Millis(decode_stop_time_ms));
+
+ callbacks_->OnDecodedFrame(decoded_image, callback_info);
+ });
+}
+
+VideoStreamDecoderImpl::DecodeCallbacks::DecodeCallbacks(
+ VideoStreamDecoderImpl* video_stream_decoder_impl)
+ : video_stream_decoder_impl_(video_stream_decoder_impl) {}
+
+int32_t VideoStreamDecoderImpl::DecodeCallbacks::Decoded(
+ VideoFrame& decoded_image) {
+ Decoded(decoded_image, absl::nullopt, absl::nullopt);
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+int32_t VideoStreamDecoderImpl::DecodeCallbacks::Decoded(
+ VideoFrame& decoded_image,
+ int64_t decode_time_ms) {
+ Decoded(decoded_image, decode_time_ms, absl::nullopt);
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+void VideoStreamDecoderImpl::DecodeCallbacks::Decoded(
+ VideoFrame& decoded_image,
+ absl::optional<int32_t> decode_time_ms,
+ absl::optional<uint8_t> qp) {
+ video_stream_decoder_impl_->OnDecodedFrameCallback(decoded_image,
+ decode_time_ms, qp);
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/video/video_stream_decoder_impl.h b/third_party/libwebrtc/video/video_stream_decoder_impl.h
new file mode 100644
index 0000000000..fcd7158391
--- /dev/null
+++ b/third_party/libwebrtc/video/video_stream_decoder_impl.h
@@ -0,0 +1,128 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VIDEO_VIDEO_STREAM_DECODER_IMPL_H_
+#define VIDEO_VIDEO_STREAM_DECODER_IMPL_H_
+
+#include <map>
+#include <memory>
+#include <utility>
+
+#include "absl/types/optional.h"
+#include "api/field_trials_view.h"
+#include "api/sequence_checker.h"
+#include "api/transport/field_trial_based_config.h"
+#include "api/video/video_stream_decoder.h"
+#include "modules/video_coding/frame_buffer2.h"
+#include "modules/video_coding/timing/timing.h"
+#include "rtc_base/memory/always_valid_pointer.h"
+#include "rtc_base/platform_thread.h"
+#include "rtc_base/synchronization/mutex.h"
+#include "rtc_base/task_queue.h"
+#include "system_wrappers/include/clock.h"
+
+namespace webrtc {
+
+class VideoStreamDecoderImpl : public VideoStreamDecoderInterface {
+ public:
+ VideoStreamDecoderImpl(
+ VideoStreamDecoderInterface::Callbacks* callbacks,
+ VideoDecoderFactory* decoder_factory,
+ TaskQueueFactory* task_queue_factory,
+ std::map<int, std::pair<SdpVideoFormat, int>> decoder_settings,
+ const FieldTrialsView* field_trials);
+
+ ~VideoStreamDecoderImpl() override;
+
+ void OnFrame(std::unique_ptr<EncodedFrame> frame) override;
+
+ void SetMinPlayoutDelay(TimeDelta min_delay) override;
+ void SetMaxPlayoutDelay(TimeDelta max_delay) override;
+
+ private:
+ class DecodeCallbacks : public DecodedImageCallback {
+ public:
+ explicit DecodeCallbacks(VideoStreamDecoderImpl* video_stream_decoder_impl);
+ int32_t Decoded(VideoFrame& decodedImage) override;
+ int32_t Decoded(VideoFrame& decodedImage, int64_t decode_time_ms) override;
+ void Decoded(VideoFrame& decodedImage,
+ absl::optional<int32_t> decode_time_ms,
+ absl::optional<uint8_t> qp) override;
+
+ private:
+ VideoStreamDecoderImpl* const video_stream_decoder_impl_;
+ };
+
+ enum DecodeResult {
+ kOk,
+ kOkRequestKeyframe,
+ kDecodeFailure,
+ };
+
+ struct FrameInfo {
+ int64_t timestamp = -1;
+ int64_t decode_start_time_ms;
+ int64_t render_time_us;
+ VideoContentType content_type;
+ };
+
+ void SaveFrameInfo(const EncodedFrame& frame) RTC_RUN_ON(bookkeeping_queue_);
+ FrameInfo* GetFrameInfo(int64_t timestamp) RTC_RUN_ON(bookkeeping_queue_);
+ void StartNextDecode() RTC_RUN_ON(bookkeeping_queue_);
+ void OnNextFrameCallback(std::unique_ptr<EncodedFrame> frame)
+ RTC_RUN_ON(bookkeeping_queue_);
+ void OnDecodedFrameCallback(VideoFrame& decodedImage, // NOLINT
+ absl::optional<int32_t> decode_time_ms,
+ absl::optional<uint8_t> qp);
+
+ VideoDecoder* GetDecoder(int payload_type) RTC_RUN_ON(decode_queue_);
+ VideoStreamDecoderImpl::DecodeResult DecodeFrame(
+ std::unique_ptr<EncodedFrame> frame) RTC_RUN_ON(decode_queue_);
+
+ AlwaysValidPointer<const FieldTrialsView, FieldTrialBasedConfig>
+ field_trials_;
+ VCMTiming timing_;
+ DecodeCallbacks decode_callbacks_;
+
+ // Some decoders are pipelined so it is not sufficient to save frame info
+ // for the last frame only.
+ static constexpr int kFrameInfoMemory = 8;
+ std::array<FrameInfo, kFrameInfoMemory> frame_info_
+ RTC_GUARDED_BY(bookkeeping_queue_);
+ int next_frame_info_index_ RTC_GUARDED_BY(bookkeeping_queue_);
+ VideoStreamDecoderInterface::Callbacks* const callbacks_
+ RTC_PT_GUARDED_BY(bookkeeping_queue_);
+ int64_t last_continuous_frame_id_ RTC_GUARDED_BY(bookkeeping_queue_) = -1;
+ bool keyframe_required_ RTC_GUARDED_BY(bookkeeping_queue_);
+
+ absl::optional<int> current_payload_type_ RTC_GUARDED_BY(decode_queue_);
+ VideoDecoderFactory* const decoder_factory_ RTC_PT_GUARDED_BY(decode_queue_);
+ std::map<int, std::pair<SdpVideoFormat, int>> decoder_settings_
+ RTC_GUARDED_BY(decode_queue_);
+
+ // The `bookkeeping_queue_` use the `frame_buffer_` and also posts tasks to
+ // the `decode_queue_`. The `decode_queue_` in turn use the `decoder_` to
+ // decode frames. When the `decoder_` is done it will post back to the
+ // `bookkeeping_queue_` with the decoded frame. During shutdown we start by
+ // isolating the `bookkeeping_queue_` from the `decode_queue_`, so now it's
+ // safe for the `decode_queue_` to be destructed. After that the `decoder_`
+ // can be destructed, and then the `bookkeeping_queue_`. Finally the
+ // `frame_buffer_` can be destructed.
+ Mutex shut_down_mutex_;
+ bool shut_down_ RTC_GUARDED_BY(shut_down_mutex_);
+ video_coding::FrameBuffer frame_buffer_ RTC_GUARDED_BY(bookkeeping_queue_);
+ rtc::TaskQueue bookkeeping_queue_;
+ std::unique_ptr<VideoDecoder> decoder_ RTC_GUARDED_BY(decode_queue_);
+ rtc::TaskQueue decode_queue_;
+};
+
+} // namespace webrtc
+
+#endif // VIDEO_VIDEO_STREAM_DECODER_IMPL_H_
diff --git a/third_party/libwebrtc/video/video_stream_decoder_impl_unittest.cc b/third_party/libwebrtc/video/video_stream_decoder_impl_unittest.cc
new file mode 100644
index 0000000000..d0cf9255c3
--- /dev/null
+++ b/third_party/libwebrtc/video/video_stream_decoder_impl_unittest.cc
@@ -0,0 +1,221 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video/video_stream_decoder_impl.h"
+
+#include <vector>
+
+#include "api/video/i420_buffer.h"
+#include "api/video_codecs/video_decoder.h"
+#include "test/fake_encoded_frame.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+#include "test/scoped_key_value_config.h"
+#include "test/time_controller/simulated_time_controller.h"
+
+namespace webrtc {
+namespace {
+using ::testing::_;
+using ::testing::NiceMock;
+using ::testing::Return;
+
+class MockVideoStreamDecoderCallbacks
+ : public VideoStreamDecoderInterface::Callbacks {
+ public:
+ MOCK_METHOD(void, OnNonDecodableState, (), (override));
+ MOCK_METHOD(void, OnContinuousUntil, (int64_t frame_id), (override));
+ MOCK_METHOD(
+ void,
+ OnDecodedFrame,
+ (VideoFrame frame,
+ const VideoStreamDecoderInterface::Callbacks::FrameInfo& frame_info),
+ (override));
+};
+
+class StubVideoDecoder : public VideoDecoder {
+ public:
+ StubVideoDecoder() { ON_CALL(*this, Configure).WillByDefault(Return(true)); }
+
+ MOCK_METHOD(bool, Configure, (const Settings&), (override));
+
+ int32_t Decode(const EncodedImage& input_image,
+ bool missing_frames,
+ int64_t render_time_ms) override {
+ int32_t ret_code = DecodeCall(input_image, missing_frames, render_time_ms);
+ if (ret_code == WEBRTC_VIDEO_CODEC_OK ||
+ ret_code == WEBRTC_VIDEO_CODEC_OK_REQUEST_KEYFRAME) {
+ VideoFrame frame = VideoFrame::Builder()
+ .set_video_frame_buffer(I420Buffer::Create(1, 1))
+ .build();
+ callback_->Decoded(frame);
+ }
+ return ret_code;
+ }
+
+ MOCK_METHOD(int32_t,
+ DecodeCall,
+ (const EncodedImage& input_image,
+ bool missing_frames,
+ int64_t render_time_ms),
+ ());
+
+ int32_t Release() override { return 0; }
+
+ int32_t RegisterDecodeCompleteCallback(
+ DecodedImageCallback* callback) override {
+ callback_ = callback;
+ return 0;
+ }
+
+ private:
+ DecodedImageCallback* callback_;
+};
+
+class WrappedVideoDecoder : public VideoDecoder {
+ public:
+ explicit WrappedVideoDecoder(StubVideoDecoder* decoder) : decoder_(decoder) {}
+
+ bool Configure(const Settings& settings) override {
+ return decoder_->Configure(settings);
+ }
+ int32_t Decode(const EncodedImage& input_image,
+ bool missing_frames,
+ int64_t render_time_ms) override {
+ return decoder_->Decode(input_image, missing_frames, render_time_ms);
+ }
+ int32_t Release() override { return decoder_->Release(); }
+
+ int32_t RegisterDecodeCompleteCallback(
+ DecodedImageCallback* callback) override {
+ return decoder_->RegisterDecodeCompleteCallback(callback);
+ }
+
+ private:
+ StubVideoDecoder* decoder_;
+};
+
+class FakeVideoDecoderFactory : public VideoDecoderFactory {
+ public:
+ std::vector<SdpVideoFormat> GetSupportedFormats() const override {
+ return {};
+ }
+ std::unique_ptr<VideoDecoder> CreateVideoDecoder(
+ const SdpVideoFormat& format) override {
+ if (format.name == "VP8") {
+ return std::make_unique<WrappedVideoDecoder>(&vp8_decoder_);
+ }
+
+ if (format.name == "AV1") {
+ return std::make_unique<WrappedVideoDecoder>(&av1_decoder_);
+ }
+
+ return {};
+ }
+
+ StubVideoDecoder& Vp8Decoder() { return vp8_decoder_; }
+ StubVideoDecoder& Av1Decoder() { return av1_decoder_; }
+
+ private:
+ NiceMock<StubVideoDecoder> vp8_decoder_;
+ NiceMock<StubVideoDecoder> av1_decoder_;
+};
+
+class VideoStreamDecoderImplTest : public ::testing::Test {
+ public:
+ VideoStreamDecoderImplTest()
+ : time_controller_(Timestamp::Seconds(0)),
+ video_stream_decoder_(&callbacks_,
+ &decoder_factory_,
+ time_controller_.GetTaskQueueFactory(),
+ {{1, std::make_pair(SdpVideoFormat("VP8"), 1)},
+ {2, std::make_pair(SdpVideoFormat("AV1"), 1)}},
+ &field_trials_) {
+ // Set the min playout delay to a value greater than zero to not activate
+ // the low-latency renderer.
+ video_stream_decoder_.SetMinPlayoutDelay(TimeDelta::Millis(10));
+ }
+
+ test::ScopedKeyValueConfig field_trials_;
+ NiceMock<MockVideoStreamDecoderCallbacks> callbacks_;
+ FakeVideoDecoderFactory decoder_factory_;
+ GlobalSimulatedTimeController time_controller_;
+ VideoStreamDecoderImpl video_stream_decoder_;
+};
+
+TEST_F(VideoStreamDecoderImplTest, InsertAndDecodeFrame) {
+ video_stream_decoder_.OnFrame(
+ test::FakeFrameBuilder().PayloadType(1).AsLast().Build());
+ EXPECT_CALL(callbacks_, OnDecodedFrame);
+ time_controller_.AdvanceTime(TimeDelta::Millis(1));
+}
+
+TEST_F(VideoStreamDecoderImplTest, NonDecodableStateWaitingForKeyframe) {
+ EXPECT_CALL(callbacks_, OnNonDecodableState);
+ time_controller_.AdvanceTime(TimeDelta::Millis(200));
+}
+
+TEST_F(VideoStreamDecoderImplTest, NonDecodableStateWaitingForDeltaFrame) {
+ video_stream_decoder_.OnFrame(
+ test::FakeFrameBuilder().PayloadType(1).AsLast().Build());
+ EXPECT_CALL(callbacks_, OnDecodedFrame);
+ time_controller_.AdvanceTime(TimeDelta::Millis(1));
+ EXPECT_CALL(callbacks_, OnNonDecodableState);
+ time_controller_.AdvanceTime(TimeDelta::Millis(3000));
+}
+
+TEST_F(VideoStreamDecoderImplTest, InsertAndDecodeFrameWithKeyframeRequest) {
+ video_stream_decoder_.OnFrame(
+ test::FakeFrameBuilder().PayloadType(1).AsLast().Build());
+ EXPECT_CALL(decoder_factory_.Vp8Decoder(), DecodeCall)
+ .WillOnce(Return(WEBRTC_VIDEO_CODEC_OK_REQUEST_KEYFRAME));
+ EXPECT_CALL(callbacks_, OnDecodedFrame);
+ EXPECT_CALL(callbacks_, OnNonDecodableState);
+ time_controller_.AdvanceTime(TimeDelta::Millis(1));
+}
+
+TEST_F(VideoStreamDecoderImplTest, FailToInitDecoder) {
+ video_stream_decoder_.OnFrame(
+ test::FakeFrameBuilder()
+ .ReceivedTime(time_controller_.GetClock()->CurrentTime())
+ .PayloadType(1)
+ .AsLast()
+ .Build());
+ ON_CALL(decoder_factory_.Vp8Decoder(), Configure)
+ .WillByDefault(Return(false));
+ EXPECT_CALL(callbacks_, OnNonDecodableState);
+ time_controller_.AdvanceTime(TimeDelta::Millis(1));
+}
+
+TEST_F(VideoStreamDecoderImplTest, FailToDecodeFrame) {
+ video_stream_decoder_.OnFrame(
+ test::FakeFrameBuilder().PayloadType(1).AsLast().Build());
+ ON_CALL(decoder_factory_.Vp8Decoder(), DecodeCall)
+ .WillByDefault(Return(WEBRTC_VIDEO_CODEC_ERROR));
+ EXPECT_CALL(callbacks_, OnNonDecodableState);
+ time_controller_.AdvanceTime(TimeDelta::Millis(1));
+}
+
+TEST_F(VideoStreamDecoderImplTest, ChangeFramePayloadType) {
+ constexpr TimeDelta kFrameInterval = TimeDelta::Millis(1000 / 60);
+ video_stream_decoder_.OnFrame(
+ test::FakeFrameBuilder().PayloadType(1).Id(0).AsLast().Build());
+ EXPECT_CALL(decoder_factory_.Vp8Decoder(), DecodeCall);
+ EXPECT_CALL(callbacks_, OnDecodedFrame);
+ time_controller_.AdvanceTime(kFrameInterval);
+
+ video_stream_decoder_.OnFrame(
+ test::FakeFrameBuilder().PayloadType(2).Id(1).AsLast().Build());
+ EXPECT_CALL(decoder_factory_.Av1Decoder(), DecodeCall);
+ EXPECT_CALL(callbacks_, OnDecodedFrame);
+ time_controller_.AdvanceTime(kFrameInterval);
+}
+
+} // namespace
+} // namespace webrtc
diff --git a/third_party/libwebrtc/video/video_stream_encoder.cc b/third_party/libwebrtc/video/video_stream_encoder.cc
new file mode 100644
index 0000000000..c680fe12c8
--- /dev/null
+++ b/third_party/libwebrtc/video/video_stream_encoder.cc
@@ -0,0 +1,2597 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video/video_stream_encoder.h"
+
+#include <algorithm>
+#include <array>
+#include <limits>
+#include <memory>
+#include <numeric>
+#include <utility>
+
+#include "absl/algorithm/container.h"
+#include "absl/cleanup/cleanup.h"
+#include "absl/types/optional.h"
+#include "api/field_trials_view.h"
+#include "api/sequence_checker.h"
+#include "api/task_queue/task_queue_base.h"
+#include "api/video/encoded_image.h"
+#include "api/video/i420_buffer.h"
+#include "api/video/render_resolution.h"
+#include "api/video/video_adaptation_reason.h"
+#include "api/video/video_bitrate_allocator_factory.h"
+#include "api/video/video_codec_constants.h"
+#include "api/video/video_layers_allocation.h"
+#include "api/video_codecs/sdp_video_format.h"
+#include "api/video_codecs/video_encoder.h"
+#include "call/adaptation/resource_adaptation_processor.h"
+#include "call/adaptation/video_source_restrictions.h"
+#include "call/adaptation/video_stream_adapter.h"
+#include "media/base/media_channel.h"
+#include "modules/video_coding/include/video_codec_initializer.h"
+#include "modules/video_coding/svc/svc_rate_allocator.h"
+#include "modules/video_coding/utility/vp8_constants.h"
+#include "rtc_base/arraysize.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/event.h"
+#include "rtc_base/experiments/alr_experiment.h"
+#include "rtc_base/experiments/encoder_info_settings.h"
+#include "rtc_base/experiments/rate_control_settings.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/strings/string_builder.h"
+#include "rtc_base/system/no_unique_address.h"
+#include "rtc_base/thread_annotations.h"
+#include "rtc_base/trace_event.h"
+#include "system_wrappers/include/metrics.h"
+#include "video/adaptation/video_stream_encoder_resource_manager.h"
+#include "video/alignment_adjuster.h"
+#include "video/config/encoder_stream_factory.h"
+#include "video/frame_cadence_adapter.h"
+
+namespace webrtc {
+
+namespace {
+
+// Time interval for logging frame counts.
+const int64_t kFrameLogIntervalMs = 60000;
+
+// Time to keep a single cached pending frame in paused state.
+const int64_t kPendingFrameTimeoutMs = 1000;
+
+constexpr char kFrameDropperFieldTrial[] = "WebRTC-FrameDropper";
+
+// TODO(bugs.webrtc.org/13572): Remove this kill switch after deploying the
+// feature.
+constexpr char kSwitchEncoderOnInitializationFailuresFieldTrial[] =
+ "WebRTC-SwitchEncoderOnInitializationFailures";
+
+const size_t kDefaultPayloadSize = 1440;
+
+const int64_t kParameterUpdateIntervalMs = 1000;
+
+// Animation is capped to 720p.
+constexpr int kMaxAnimationPixels = 1280 * 720;
+
+constexpr int kDefaultMinScreenSharebps = 1200000;
+
+bool RequiresEncoderReset(const VideoCodec& prev_send_codec,
+ const VideoCodec& new_send_codec,
+ bool was_encode_called_since_last_initialization) {
+ // Does not check max/minBitrate or maxFramerate.
+ if (new_send_codec.codecType != prev_send_codec.codecType ||
+ new_send_codec.width != prev_send_codec.width ||
+ new_send_codec.height != prev_send_codec.height ||
+ new_send_codec.qpMax != prev_send_codec.qpMax ||
+ new_send_codec.numberOfSimulcastStreams !=
+ prev_send_codec.numberOfSimulcastStreams ||
+ new_send_codec.mode != prev_send_codec.mode ||
+ new_send_codec.GetFrameDropEnabled() !=
+ prev_send_codec.GetFrameDropEnabled()) {
+ return true;
+ }
+
+ if (!was_encode_called_since_last_initialization &&
+ (new_send_codec.startBitrate != prev_send_codec.startBitrate)) {
+ // If start bitrate has changed reconfigure encoder only if encoding had not
+ // yet started.
+ return true;
+ }
+
+ switch (new_send_codec.codecType) {
+ case kVideoCodecVP8:
+ if (new_send_codec.VP8() != prev_send_codec.VP8()) {
+ return true;
+ }
+ break;
+
+ case kVideoCodecVP9:
+ if (new_send_codec.VP9() != prev_send_codec.VP9()) {
+ return true;
+ }
+ break;
+
+ case kVideoCodecH264:
+ if (new_send_codec.H264() != prev_send_codec.H264()) {
+ return true;
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ for (unsigned char i = 0; i < new_send_codec.numberOfSimulcastStreams; ++i) {
+ if (!new_send_codec.simulcastStream[i].active) {
+ // No need to reset when stream is inactive.
+ continue;
+ }
+
+ if (!prev_send_codec.simulcastStream[i].active ||
+ new_send_codec.simulcastStream[i].width !=
+ prev_send_codec.simulcastStream[i].width ||
+ new_send_codec.simulcastStream[i].height !=
+ prev_send_codec.simulcastStream[i].height ||
+ new_send_codec.simulcastStream[i].numberOfTemporalLayers !=
+ prev_send_codec.simulcastStream[i].numberOfTemporalLayers ||
+ new_send_codec.simulcastStream[i].qpMax !=
+ prev_send_codec.simulcastStream[i].qpMax) {
+ return true;
+ }
+ }
+
+ if (new_send_codec.codecType == kVideoCodecVP9) {
+ size_t num_spatial_layers = new_send_codec.VP9().numberOfSpatialLayers;
+ for (unsigned char i = 0; i < num_spatial_layers; ++i) {
+ if (!new_send_codec.spatialLayers[i].active) {
+ // No need to reset when layer is inactive.
+ continue;
+ }
+ if (new_send_codec.spatialLayers[i].width !=
+ prev_send_codec.spatialLayers[i].width ||
+ new_send_codec.spatialLayers[i].height !=
+ prev_send_codec.spatialLayers[i].height ||
+ new_send_codec.spatialLayers[i].numberOfTemporalLayers !=
+ prev_send_codec.spatialLayers[i].numberOfTemporalLayers ||
+ new_send_codec.spatialLayers[i].qpMax !=
+ prev_send_codec.spatialLayers[i].qpMax ||
+ !prev_send_codec.spatialLayers[i].active) {
+ return true;
+ }
+ }
+ }
+
+ if (new_send_codec.GetScalabilityMode() !=
+ prev_send_codec.GetScalabilityMode()) {
+ return true;
+ }
+
+ return false;
+}
+
+std::array<uint8_t, 2> GetExperimentGroups() {
+ std::array<uint8_t, 2> experiment_groups;
+ absl::optional<AlrExperimentSettings> experiment_settings =
+ AlrExperimentSettings::CreateFromFieldTrial(
+ AlrExperimentSettings::kStrictPacingAndProbingExperimentName);
+ if (experiment_settings) {
+ experiment_groups[0] = experiment_settings->group_id + 1;
+ } else {
+ experiment_groups[0] = 0;
+ }
+ experiment_settings = AlrExperimentSettings::CreateFromFieldTrial(
+ AlrExperimentSettings::kScreenshareProbingBweExperimentName);
+ if (experiment_settings) {
+ experiment_groups[1] = experiment_settings->group_id + 1;
+ } else {
+ experiment_groups[1] = 0;
+ }
+ return experiment_groups;
+}
+
+// Limit allocation across TLs in bitrate allocation according to number of TLs
+// in EncoderInfo.
+VideoBitrateAllocation UpdateAllocationFromEncoderInfo(
+ const VideoBitrateAllocation& allocation,
+ const VideoEncoder::EncoderInfo& encoder_info) {
+ if (allocation.get_sum_bps() == 0) {
+ return allocation;
+ }
+ VideoBitrateAllocation new_allocation;
+ for (int si = 0; si < kMaxSpatialLayers; ++si) {
+ if (encoder_info.fps_allocation[si].size() == 1 &&
+ allocation.IsSpatialLayerUsed(si)) {
+ // One TL is signalled to be used by the encoder. Do not distribute
+ // bitrate allocation across TLs (use sum at ti:0).
+ new_allocation.SetBitrate(si, 0, allocation.GetSpatialLayerSum(si));
+ } else {
+ for (int ti = 0; ti < kMaxTemporalStreams; ++ti) {
+ if (allocation.HasBitrate(si, ti))
+ new_allocation.SetBitrate(si, ti, allocation.GetBitrate(si, ti));
+ }
+ }
+ }
+ new_allocation.set_bw_limited(allocation.is_bw_limited());
+ return new_allocation;
+}
+
+// Converts a VideoBitrateAllocation that contains allocated bitrate per layer,
+// and an EncoderInfo that contains information about the actual encoder
+// structure used by a codec. Stream structures can be Ksvc, Full SVC, Simulcast
+// etc.
+VideoLayersAllocation CreateVideoLayersAllocation(
+ const VideoCodec& encoder_config,
+ const VideoEncoder::RateControlParameters& current_rate,
+ const VideoEncoder::EncoderInfo& encoder_info) {
+ const VideoBitrateAllocation& target_bitrate = current_rate.target_bitrate;
+ VideoLayersAllocation layers_allocation;
+ if (target_bitrate.get_sum_bps() == 0) {
+ return layers_allocation;
+ }
+
+ if (encoder_config.numberOfSimulcastStreams > 1) {
+ layers_allocation.resolution_and_frame_rate_is_valid = true;
+ for (int si = 0; si < encoder_config.numberOfSimulcastStreams; ++si) {
+ if (!target_bitrate.IsSpatialLayerUsed(si) ||
+ target_bitrate.GetSpatialLayerSum(si) == 0) {
+ continue;
+ }
+ layers_allocation.active_spatial_layers.emplace_back();
+ VideoLayersAllocation::SpatialLayer& spatial_layer =
+ layers_allocation.active_spatial_layers.back();
+ spatial_layer.width = encoder_config.simulcastStream[si].width;
+ spatial_layer.height = encoder_config.simulcastStream[si].height;
+ spatial_layer.rtp_stream_index = si;
+ spatial_layer.spatial_id = 0;
+ auto frame_rate_fraction =
+ VideoEncoder::EncoderInfo::kMaxFramerateFraction;
+ if (encoder_info.fps_allocation[si].size() == 1) {
+ // One TL is signalled to be used by the encoder. Do not distribute
+ // bitrate allocation across TLs (use sum at tl:0).
+ spatial_layer.target_bitrate_per_temporal_layer.push_back(
+ DataRate::BitsPerSec(target_bitrate.GetSpatialLayerSum(si)));
+ frame_rate_fraction = encoder_info.fps_allocation[si][0];
+ } else { // Temporal layers are supported.
+ uint32_t temporal_layer_bitrate_bps = 0;
+ for (size_t ti = 0;
+ ti < encoder_config.simulcastStream[si].numberOfTemporalLayers;
+ ++ti) {
+ if (!target_bitrate.HasBitrate(si, ti)) {
+ break;
+ }
+ if (ti < encoder_info.fps_allocation[si].size()) {
+ // Use frame rate of the top used temporal layer.
+ frame_rate_fraction = encoder_info.fps_allocation[si][ti];
+ }
+ temporal_layer_bitrate_bps += target_bitrate.GetBitrate(si, ti);
+ spatial_layer.target_bitrate_per_temporal_layer.push_back(
+ DataRate::BitsPerSec(temporal_layer_bitrate_bps));
+ }
+ }
+ // Encoder may drop frames internally if `maxFramerate` is set.
+ spatial_layer.frame_rate_fps = std::min<uint8_t>(
+ encoder_config.simulcastStream[si].maxFramerate,
+ rtc::saturated_cast<uint8_t>(
+ (current_rate.framerate_fps * frame_rate_fraction) /
+ VideoEncoder::EncoderInfo::kMaxFramerateFraction));
+ }
+ } else if (encoder_config.numberOfSimulcastStreams == 1) {
+ // TODO(bugs.webrtc.org/12000): Implement support for AV1 with
+ // scalability.
+ const bool higher_spatial_depend_on_lower =
+ encoder_config.codecType == kVideoCodecVP9 &&
+ encoder_config.VP9().interLayerPred == InterLayerPredMode::kOn;
+ layers_allocation.resolution_and_frame_rate_is_valid = true;
+
+ std::vector<DataRate> aggregated_spatial_bitrate(
+ webrtc::kMaxTemporalStreams, DataRate::Zero());
+ for (int si = 0; si < webrtc::kMaxSpatialLayers; ++si) {
+ layers_allocation.resolution_and_frame_rate_is_valid = true;
+ if (!target_bitrate.IsSpatialLayerUsed(si) ||
+ target_bitrate.GetSpatialLayerSum(si) == 0) {
+ break;
+ }
+ layers_allocation.active_spatial_layers.emplace_back();
+ VideoLayersAllocation::SpatialLayer& spatial_layer =
+ layers_allocation.active_spatial_layers.back();
+ spatial_layer.width = encoder_config.spatialLayers[si].width;
+ spatial_layer.height = encoder_config.spatialLayers[si].height;
+ spatial_layer.rtp_stream_index = 0;
+ spatial_layer.spatial_id = si;
+ auto frame_rate_fraction =
+ VideoEncoder::EncoderInfo::kMaxFramerateFraction;
+ if (encoder_info.fps_allocation[si].size() == 1) {
+ // One TL is signalled to be used by the encoder. Do not distribute
+ // bitrate allocation across TLs (use sum at tl:0).
+ DataRate aggregated_temporal_bitrate =
+ DataRate::BitsPerSec(target_bitrate.GetSpatialLayerSum(si));
+ aggregated_spatial_bitrate[0] += aggregated_temporal_bitrate;
+ if (higher_spatial_depend_on_lower) {
+ spatial_layer.target_bitrate_per_temporal_layer.push_back(
+ aggregated_spatial_bitrate[0]);
+ } else {
+ spatial_layer.target_bitrate_per_temporal_layer.push_back(
+ aggregated_temporal_bitrate);
+ }
+ frame_rate_fraction = encoder_info.fps_allocation[si][0];
+ } else { // Temporal layers are supported.
+ DataRate aggregated_temporal_bitrate = DataRate::Zero();
+ for (size_t ti = 0;
+ ti < encoder_config.spatialLayers[si].numberOfTemporalLayers;
+ ++ti) {
+ if (!target_bitrate.HasBitrate(si, ti)) {
+ break;
+ }
+ if (ti < encoder_info.fps_allocation[si].size()) {
+ // Use frame rate of the top used temporal layer.
+ frame_rate_fraction = encoder_info.fps_allocation[si][ti];
+ }
+ aggregated_temporal_bitrate +=
+ DataRate::BitsPerSec(target_bitrate.GetBitrate(si, ti));
+ if (higher_spatial_depend_on_lower) {
+ spatial_layer.target_bitrate_per_temporal_layer.push_back(
+ aggregated_temporal_bitrate + aggregated_spatial_bitrate[ti]);
+ aggregated_spatial_bitrate[ti] += aggregated_temporal_bitrate;
+ } else {
+ spatial_layer.target_bitrate_per_temporal_layer.push_back(
+ aggregated_temporal_bitrate);
+ }
+ }
+ }
+ // Encoder may drop frames internally if `maxFramerate` is set.
+ spatial_layer.frame_rate_fps = std::min<uint8_t>(
+ encoder_config.spatialLayers[si].maxFramerate,
+ rtc::saturated_cast<uint8_t>(
+ (current_rate.framerate_fps * frame_rate_fraction) /
+ VideoEncoder::EncoderInfo::kMaxFramerateFraction));
+ }
+ }
+
+ return layers_allocation;
+}
+
+VideoEncoder::EncoderInfo GetEncoderInfoWithBitrateLimitUpdate(
+ const VideoEncoder::EncoderInfo& info,
+ const VideoEncoderConfig& encoder_config,
+ bool default_limits_allowed) {
+ if (!default_limits_allowed || !info.resolution_bitrate_limits.empty() ||
+ encoder_config.simulcast_layers.size() <= 1) {
+ return info;
+ }
+ // Bitrate limits are not configured and more than one layer is used, use
+ // the default limits (bitrate limits are not used for simulcast).
+ VideoEncoder::EncoderInfo new_info = info;
+ new_info.resolution_bitrate_limits =
+ EncoderInfoSettings::GetDefaultSinglecastBitrateLimits(
+ encoder_config.codec_type);
+ return new_info;
+}
+
+int NumActiveStreams(const std::vector<VideoStream>& streams) {
+ int num_active = 0;
+ for (const auto& stream : streams) {
+ if (stream.active)
+ ++num_active;
+ }
+ return num_active;
+}
+
+void ApplyVp9BitrateLimits(const VideoEncoder::EncoderInfo& encoder_info,
+ const VideoEncoderConfig& encoder_config,
+ VideoCodec* codec) {
+ if (codec->codecType != VideoCodecType::kVideoCodecVP9 ||
+ encoder_config.simulcast_layers.size() <= 1 ||
+ VideoStreamEncoderResourceManager::IsSimulcastOrMultipleSpatialLayers(
+ encoder_config)) {
+ // Resolution bitrate limits usage is restricted to singlecast.
+ return;
+ }
+
+ // Get bitrate limits for active stream.
+ absl::optional<uint32_t> pixels =
+ VideoStreamAdapter::GetSingleActiveLayerPixels(*codec);
+ if (!pixels.has_value()) {
+ return;
+ }
+ absl::optional<VideoEncoder::ResolutionBitrateLimits> bitrate_limits =
+ encoder_info.GetEncoderBitrateLimitsForResolution(*pixels);
+ if (!bitrate_limits.has_value()) {
+ return;
+ }
+
+ // Index for the active stream.
+ absl::optional<size_t> index;
+ for (size_t i = 0; i < encoder_config.simulcast_layers.size(); ++i) {
+ if (encoder_config.simulcast_layers[i].active)
+ index = i;
+ }
+ if (!index.has_value()) {
+ return;
+ }
+
+ int min_bitrate_bps;
+ if (encoder_config.simulcast_layers[*index].min_bitrate_bps <= 0) {
+ min_bitrate_bps = bitrate_limits->min_bitrate_bps;
+ } else {
+ min_bitrate_bps =
+ std::max(bitrate_limits->min_bitrate_bps,
+ encoder_config.simulcast_layers[*index].min_bitrate_bps);
+ }
+ int max_bitrate_bps;
+ if (encoder_config.simulcast_layers[*index].max_bitrate_bps <= 0) {
+ max_bitrate_bps = bitrate_limits->max_bitrate_bps;
+ } else {
+ max_bitrate_bps =
+ std::min(bitrate_limits->max_bitrate_bps,
+ encoder_config.simulcast_layers[*index].max_bitrate_bps);
+ }
+ if (min_bitrate_bps >= max_bitrate_bps) {
+ RTC_LOG(LS_WARNING) << "Bitrate limits not used, min_bitrate_bps "
+ << min_bitrate_bps << " >= max_bitrate_bps "
+ << max_bitrate_bps;
+ return;
+ }
+
+ for (int i = 0; i < codec->VP9()->numberOfSpatialLayers; ++i) {
+ if (codec->spatialLayers[i].active) {
+ codec->spatialLayers[i].minBitrate = min_bitrate_bps / 1000;
+ codec->spatialLayers[i].maxBitrate = max_bitrate_bps / 1000;
+ codec->spatialLayers[i].targetBitrate =
+ std::min(codec->spatialLayers[i].targetBitrate,
+ codec->spatialLayers[i].maxBitrate);
+ break;
+ }
+ }
+}
+
+void ApplyEncoderBitrateLimitsIfSingleActiveStream(
+ const VideoEncoder::EncoderInfo& encoder_info,
+ const std::vector<VideoStream>& encoder_config_layers,
+ std::vector<VideoStream>* streams) {
+ // Apply limits if simulcast with one active stream (expect lowest).
+ bool single_active_stream =
+ streams->size() > 1 && NumActiveStreams(*streams) == 1 &&
+ !streams->front().active && NumActiveStreams(encoder_config_layers) == 1;
+ if (!single_active_stream) {
+ return;
+ }
+
+ // Index for the active stream.
+ size_t index = 0;
+ for (size_t i = 0; i < encoder_config_layers.size(); ++i) {
+ if (encoder_config_layers[i].active)
+ index = i;
+ }
+ if (streams->size() < (index + 1) || !(*streams)[index].active) {
+ return;
+ }
+
+ // Get bitrate limits for active stream.
+ absl::optional<VideoEncoder::ResolutionBitrateLimits> encoder_bitrate_limits =
+ encoder_info.GetEncoderBitrateLimitsForResolution(
+ (*streams)[index].width * (*streams)[index].height);
+ if (!encoder_bitrate_limits) {
+ return;
+ }
+
+ // If bitrate limits are set by RtpEncodingParameters, use intersection.
+ int min_bitrate_bps;
+ if (encoder_config_layers[index].min_bitrate_bps <= 0) {
+ min_bitrate_bps = encoder_bitrate_limits->min_bitrate_bps;
+ } else {
+ min_bitrate_bps = std::max(encoder_bitrate_limits->min_bitrate_bps,
+ (*streams)[index].min_bitrate_bps);
+ }
+ int max_bitrate_bps;
+ if (encoder_config_layers[index].max_bitrate_bps <= 0) {
+ max_bitrate_bps = encoder_bitrate_limits->max_bitrate_bps;
+ } else {
+ max_bitrate_bps = std::min(encoder_bitrate_limits->max_bitrate_bps,
+ (*streams)[index].max_bitrate_bps);
+ }
+ if (min_bitrate_bps >= max_bitrate_bps) {
+ RTC_LOG(LS_WARNING) << "Encoder bitrate limits"
+ << " (min=" << encoder_bitrate_limits->min_bitrate_bps
+ << ", max=" << encoder_bitrate_limits->max_bitrate_bps
+ << ") do not intersect with stream limits"
+ << " (min=" << (*streams)[index].min_bitrate_bps
+ << ", max=" << (*streams)[index].max_bitrate_bps
+ << "). Encoder bitrate limits not used.";
+ return;
+ }
+
+ (*streams)[index].min_bitrate_bps = min_bitrate_bps;
+ (*streams)[index].max_bitrate_bps = max_bitrate_bps;
+ (*streams)[index].target_bitrate_bps =
+ std::min((*streams)[index].target_bitrate_bps,
+ encoder_bitrate_limits->max_bitrate_bps);
+}
+
+absl::optional<int> ParseVp9LowTierCoreCountThreshold(
+ const FieldTrialsView& trials) {
+ FieldTrialFlag disable_low_tier("Disabled");
+ FieldTrialParameter<int> max_core_count("max_core_count", 2);
+ ParseFieldTrial({&disable_low_tier, &max_core_count},
+ trials.Lookup("WebRTC-VP9-LowTierOptimizations"));
+ if (disable_low_tier.Get()) {
+ return absl::nullopt;
+ }
+ return max_core_count.Get();
+}
+
+absl::optional<VideoSourceRestrictions> MergeRestrictions(
+ const std::vector<absl::optional<VideoSourceRestrictions>>& list) {
+ absl::optional<VideoSourceRestrictions> return_value;
+ for (const auto& res : list) {
+ if (!res) {
+ continue;
+ }
+ if (!return_value) {
+ return_value = *res;
+ continue;
+ }
+ return_value->UpdateMin(*res);
+ }
+ return return_value;
+}
+
+} // namespace
+
+VideoStreamEncoder::EncoderRateSettings::EncoderRateSettings()
+ : rate_control(),
+ encoder_target(DataRate::Zero()),
+ stable_encoder_target(DataRate::Zero()) {}
+
+VideoStreamEncoder::EncoderRateSettings::EncoderRateSettings(
+ const VideoBitrateAllocation& bitrate,
+ double framerate_fps,
+ DataRate bandwidth_allocation,
+ DataRate encoder_target,
+ DataRate stable_encoder_target)
+ : rate_control(bitrate, framerate_fps, bandwidth_allocation),
+ encoder_target(encoder_target),
+ stable_encoder_target(stable_encoder_target) {}
+
+bool VideoStreamEncoder::EncoderRateSettings::operator==(
+ const EncoderRateSettings& rhs) const {
+ return rate_control == rhs.rate_control &&
+ encoder_target == rhs.encoder_target &&
+ stable_encoder_target == rhs.stable_encoder_target;
+}
+
+bool VideoStreamEncoder::EncoderRateSettings::operator!=(
+ const EncoderRateSettings& rhs) const {
+ return !(*this == rhs);
+}
+
+class VideoStreamEncoder::DegradationPreferenceManager
+ : public DegradationPreferenceProvider {
+ public:
+ explicit DegradationPreferenceManager(
+ VideoStreamAdapter* video_stream_adapter)
+ : degradation_preference_(DegradationPreference::DISABLED),
+ is_screenshare_(false),
+ effective_degradation_preference_(DegradationPreference::DISABLED),
+ video_stream_adapter_(video_stream_adapter) {
+ RTC_DCHECK(video_stream_adapter_);
+ sequence_checker_.Detach();
+ }
+
+ ~DegradationPreferenceManager() override = default;
+
+ DegradationPreference degradation_preference() const override {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+ return effective_degradation_preference_;
+ }
+
+ void SetDegradationPreference(DegradationPreference degradation_preference) {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+ degradation_preference_ = degradation_preference;
+ MaybeUpdateEffectiveDegradationPreference();
+ }
+
+ void SetIsScreenshare(bool is_screenshare) {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+ is_screenshare_ = is_screenshare;
+ MaybeUpdateEffectiveDegradationPreference();
+ }
+
+ private:
+ void MaybeUpdateEffectiveDegradationPreference()
+ RTC_RUN_ON(&sequence_checker_) {
+ DegradationPreference effective_degradation_preference =
+ (is_screenshare_ &&
+ degradation_preference_ == DegradationPreference::BALANCED)
+ ? DegradationPreference::MAINTAIN_RESOLUTION
+ : degradation_preference_;
+
+ if (effective_degradation_preference != effective_degradation_preference_) {
+ effective_degradation_preference_ = effective_degradation_preference;
+ video_stream_adapter_->SetDegradationPreference(
+ effective_degradation_preference);
+ }
+ }
+
+ RTC_NO_UNIQUE_ADDRESS SequenceChecker sequence_checker_;
+ DegradationPreference degradation_preference_
+ RTC_GUARDED_BY(&sequence_checker_);
+ bool is_screenshare_ RTC_GUARDED_BY(&sequence_checker_);
+ DegradationPreference effective_degradation_preference_
+ RTC_GUARDED_BY(&sequence_checker_);
+ VideoStreamAdapter* video_stream_adapter_ RTC_GUARDED_BY(&sequence_checker_);
+};
+
+VideoStreamEncoder::VideoStreamEncoder(
+ Clock* clock,
+ uint32_t number_of_cores,
+ VideoStreamEncoderObserver* encoder_stats_observer,
+ const VideoStreamEncoderSettings& settings,
+ std::unique_ptr<OveruseFrameDetector> overuse_detector,
+ std::unique_ptr<FrameCadenceAdapterInterface> frame_cadence_adapter,
+ std::unique_ptr<webrtc::TaskQueueBase, webrtc::TaskQueueDeleter>
+ encoder_queue,
+ BitrateAllocationCallbackType allocation_cb_type,
+ const FieldTrialsView& field_trials,
+ webrtc::VideoEncoderFactory::EncoderSelectorInterface* encoder_selector)
+ : field_trials_(field_trials),
+ worker_queue_(TaskQueueBase::Current()),
+ number_of_cores_(number_of_cores),
+ sink_(nullptr),
+ settings_(settings),
+ allocation_cb_type_(allocation_cb_type),
+ rate_control_settings_(RateControlSettings::ParseFromFieldTrials()),
+ encoder_selector_from_constructor_(encoder_selector),
+ encoder_selector_from_factory_(
+ encoder_selector_from_constructor_
+ ? nullptr
+ : settings.encoder_factory->GetEncoderSelector()),
+ encoder_selector_(encoder_selector_from_constructor_
+ ? encoder_selector_from_constructor_
+ : encoder_selector_from_factory_.get()),
+ encoder_stats_observer_(encoder_stats_observer),
+ cadence_callback_(*this),
+ frame_cadence_adapter_(std::move(frame_cadence_adapter)),
+ encoder_initialized_(false),
+ max_framerate_(-1),
+ pending_encoder_reconfiguration_(false),
+ pending_encoder_creation_(false),
+ crop_width_(0),
+ crop_height_(0),
+ encoder_target_bitrate_bps_(absl::nullopt),
+ max_data_payload_length_(0),
+ encoder_paused_and_dropped_frame_(false),
+ was_encode_called_since_last_initialization_(false),
+ encoder_failed_(false),
+ clock_(clock),
+ last_captured_timestamp_(0),
+ delta_ntp_internal_ms_(clock_->CurrentNtpInMilliseconds() -
+ clock_->TimeInMilliseconds()),
+ last_frame_log_ms_(clock_->TimeInMilliseconds()),
+ captured_frame_count_(0),
+ dropped_frame_cwnd_pushback_count_(0),
+ dropped_frame_encoder_block_count_(0),
+ pending_frame_post_time_us_(0),
+ accumulated_update_rect_{0, 0, 0, 0},
+ accumulated_update_rect_is_valid_(true),
+ animation_start_time_(Timestamp::PlusInfinity()),
+ cap_resolution_due_to_video_content_(false),
+ expect_resize_state_(ExpectResizeState::kNoResize),
+ fec_controller_override_(nullptr),
+ force_disable_frame_dropper_(false),
+ pending_frame_drops_(0),
+ cwnd_frame_counter_(0),
+ next_frame_types_(1, VideoFrameType::kVideoFrameDelta),
+ frame_encode_metadata_writer_(this),
+ experiment_groups_(GetExperimentGroups()),
+ automatic_animation_detection_experiment_(
+ ParseAutomatincAnimationDetectionFieldTrial()),
+ input_state_provider_(encoder_stats_observer),
+ video_stream_adapter_(
+ std::make_unique<VideoStreamAdapter>(&input_state_provider_,
+ encoder_stats_observer,
+ field_trials)),
+ degradation_preference_manager_(
+ std::make_unique<DegradationPreferenceManager>(
+ video_stream_adapter_.get())),
+ adaptation_constraints_(),
+ stream_resource_manager_(&input_state_provider_,
+ encoder_stats_observer,
+ clock_,
+ settings_.experiment_cpu_load_estimator,
+ std::move(overuse_detector),
+ degradation_preference_manager_.get(),
+ field_trials),
+ video_source_sink_controller_(/*sink=*/frame_cadence_adapter_.get(),
+ /*source=*/nullptr),
+ default_limits_allowed_(
+ !field_trials.IsEnabled("WebRTC-DefaultBitrateLimitsKillSwitch")),
+ qp_parsing_allowed_(
+ !field_trials.IsEnabled("WebRTC-QpParsingKillSwitch")),
+ switch_encoder_on_init_failures_(!field_trials.IsDisabled(
+ kSwitchEncoderOnInitializationFailuresFieldTrial)),
+ vp9_low_tier_core_threshold_(
+ ParseVp9LowTierCoreCountThreshold(field_trials)),
+ encoder_queue_(std::move(encoder_queue)) {
+ TRACE_EVENT0("webrtc", "VideoStreamEncoder::VideoStreamEncoder");
+ RTC_DCHECK_RUN_ON(worker_queue_);
+ RTC_DCHECK(encoder_stats_observer);
+ RTC_DCHECK_GE(number_of_cores, 1);
+
+ frame_cadence_adapter_->Initialize(&cadence_callback_);
+ stream_resource_manager_.Initialize(encoder_queue_.Get());
+
+ encoder_queue_.PostTask([this] {
+ RTC_DCHECK_RUN_ON(&encoder_queue_);
+
+ resource_adaptation_processor_ =
+ std::make_unique<ResourceAdaptationProcessor>(
+ video_stream_adapter_.get());
+
+ stream_resource_manager_.SetAdaptationProcessor(
+ resource_adaptation_processor_.get(), video_stream_adapter_.get());
+ resource_adaptation_processor_->AddResourceLimitationsListener(
+ &stream_resource_manager_);
+ video_stream_adapter_->AddRestrictionsListener(&stream_resource_manager_);
+ video_stream_adapter_->AddRestrictionsListener(this);
+ stream_resource_manager_.MaybeInitializePixelLimitResource();
+
+ // Add the stream resource manager's resources to the processor.
+ adaptation_constraints_ = stream_resource_manager_.AdaptationConstraints();
+ for (auto* constraint : adaptation_constraints_) {
+ video_stream_adapter_->AddAdaptationConstraint(constraint);
+ }
+ });
+}
+
+VideoStreamEncoder::~VideoStreamEncoder() {
+ RTC_DCHECK_RUN_ON(worker_queue_);
+ RTC_DCHECK(!video_source_sink_controller_.HasSource())
+ << "Must call ::Stop() before destruction.";
+}
+
+void VideoStreamEncoder::Stop() {
+ RTC_DCHECK_RUN_ON(worker_queue_);
+ video_source_sink_controller_.SetSource(nullptr);
+
+ rtc::Event shutdown_event;
+ absl::Cleanup shutdown = [&shutdown_event] { shutdown_event.Set(); };
+ encoder_queue_.PostTask(
+ [this, shutdown = std::move(shutdown)] {
+ RTC_DCHECK_RUN_ON(&encoder_queue_);
+ if (resource_adaptation_processor_) {
+ stream_resource_manager_.StopManagedResources();
+ for (auto* constraint : adaptation_constraints_) {
+ video_stream_adapter_->RemoveAdaptationConstraint(constraint);
+ }
+ for (auto& resource : additional_resources_) {
+ stream_resource_manager_.RemoveResource(resource);
+ }
+ additional_resources_.clear();
+ video_stream_adapter_->RemoveRestrictionsListener(this);
+ video_stream_adapter_->RemoveRestrictionsListener(
+ &stream_resource_manager_);
+ resource_adaptation_processor_->RemoveResourceLimitationsListener(
+ &stream_resource_manager_);
+ stream_resource_manager_.SetAdaptationProcessor(nullptr, nullptr);
+ resource_adaptation_processor_.reset();
+ }
+ rate_allocator_ = nullptr;
+ ReleaseEncoder();
+ encoder_ = nullptr;
+ frame_cadence_adapter_ = nullptr;
+ });
+ shutdown_event.Wait(rtc::Event::kForever);
+}
+
+void VideoStreamEncoder::SetFecControllerOverride(
+ FecControllerOverride* fec_controller_override) {
+ encoder_queue_.PostTask([this, fec_controller_override] {
+ RTC_DCHECK_RUN_ON(&encoder_queue_);
+ RTC_DCHECK(!fec_controller_override_);
+ fec_controller_override_ = fec_controller_override;
+ if (encoder_) {
+ encoder_->SetFecControllerOverride(fec_controller_override_);
+ }
+ });
+}
+
+void VideoStreamEncoder::AddAdaptationResource(
+ rtc::scoped_refptr<Resource> resource) {
+ RTC_DCHECK_RUN_ON(worker_queue_);
+ TRACE_EVENT0("webrtc", "VideoStreamEncoder::AddAdaptationResource");
+ // Map any externally added resources as kCpu for the sake of stats reporting.
+ // TODO(hbos): Make the manager map any unknown resources to kCpu and get rid
+ // of this MapResourceToReason() call.
+ TRACE_EVENT_ASYNC_BEGIN0(
+ "webrtc", "VideoStreamEncoder::AddAdaptationResource(latency)", this);
+ encoder_queue_.PostTask([this, resource = std::move(resource)] {
+ TRACE_EVENT_ASYNC_END0(
+ "webrtc", "VideoStreamEncoder::AddAdaptationResource(latency)", this);
+ RTC_DCHECK_RUN_ON(&encoder_queue_);
+ additional_resources_.push_back(resource);
+ stream_resource_manager_.AddResource(resource, VideoAdaptationReason::kCpu);
+ });
+}
+
+std::vector<rtc::scoped_refptr<Resource>>
+VideoStreamEncoder::GetAdaptationResources() {
+ RTC_DCHECK_RUN_ON(worker_queue_);
+ // In practice, this method is only called by tests to verify operations that
+ // run on the encoder queue. So rather than force PostTask() operations to
+ // be accompanied by an event and a `Wait()`, we'll use PostTask + Wait()
+ // here.
+ rtc::Event event;
+ std::vector<rtc::scoped_refptr<Resource>> resources;
+ encoder_queue_.PostTask([&] {
+ RTC_DCHECK_RUN_ON(&encoder_queue_);
+ resources = resource_adaptation_processor_->GetResources();
+ event.Set();
+ });
+ event.Wait(rtc::Event::kForever);
+ return resources;
+}
+
+void VideoStreamEncoder::SetSource(
+ rtc::VideoSourceInterface<VideoFrame>* source,
+ const DegradationPreference& degradation_preference) {
+ RTC_DCHECK_RUN_ON(worker_queue_);
+ video_source_sink_controller_.SetSource(source);
+ input_state_provider_.OnHasInputChanged(source);
+
+ // This may trigger reconfiguring the QualityScaler on the encoder queue.
+ encoder_queue_.PostTask([this, degradation_preference] {
+ RTC_DCHECK_RUN_ON(&encoder_queue_);
+ degradation_preference_manager_->SetDegradationPreference(
+ degradation_preference);
+ stream_resource_manager_.SetDegradationPreferences(degradation_preference);
+ if (encoder_) {
+ stream_resource_manager_.ConfigureQualityScaler(
+ encoder_->GetEncoderInfo());
+ stream_resource_manager_.ConfigureBandwidthQualityScaler(
+ encoder_->GetEncoderInfo());
+ }
+ });
+}
+
+void VideoStreamEncoder::SetSink(EncoderSink* sink, bool rotation_applied) {
+ RTC_DCHECK_RUN_ON(worker_queue_);
+ video_source_sink_controller_.SetRotationApplied(rotation_applied);
+ video_source_sink_controller_.PushSourceSinkSettings();
+
+ encoder_queue_.PostTask([this, sink] {
+ RTC_DCHECK_RUN_ON(&encoder_queue_);
+ sink_ = sink;
+ });
+}
+
+void VideoStreamEncoder::SetStartBitrate(int start_bitrate_bps) {
+ encoder_queue_.PostTask([this, start_bitrate_bps] {
+ RTC_DCHECK_RUN_ON(&encoder_queue_);
+ RTC_LOG(LS_INFO) << "SetStartBitrate " << start_bitrate_bps;
+ encoder_target_bitrate_bps_ =
+ start_bitrate_bps != 0 ? absl::optional<uint32_t>(start_bitrate_bps)
+ : absl::nullopt;
+ stream_resource_manager_.SetStartBitrate(
+ DataRate::BitsPerSec(start_bitrate_bps));
+ });
+}
+
+void VideoStreamEncoder::ConfigureEncoder(VideoEncoderConfig config,
+ size_t max_data_payload_length) {
+ ConfigureEncoder(std::move(config), max_data_payload_length, nullptr);
+}
+
+void VideoStreamEncoder::ConfigureEncoder(VideoEncoderConfig config,
+ size_t max_data_payload_length,
+ SetParametersCallback callback) {
+ RTC_DCHECK_RUN_ON(worker_queue_);
+ encoder_queue_.PostTask(
+ [this, config = std::move(config), max_data_payload_length,
+ callback = std::move(callback)]() mutable {
+ RTC_DCHECK_RUN_ON(&encoder_queue_);
+ RTC_DCHECK(sink_);
+ RTC_LOG(LS_INFO) << "ConfigureEncoder requested.";
+
+ // Set up the frame cadence adapter according to if we're going to do
+ // screencast. The final number of spatial layers is based on info
+ // in `send_codec_`, which is computed based on incoming frame
+ // dimensions which can only be determined later.
+ //
+ // Note: zero-hertz mode isn't enabled by this alone. Constraints also
+ // have to be set up with min_fps = 0 and max_fps > 0.
+ if (config.content_type == VideoEncoderConfig::ContentType::kScreen) {
+ frame_cadence_adapter_->SetZeroHertzModeEnabled(
+ FrameCadenceAdapterInterface::ZeroHertzModeParams{});
+ } else {
+ frame_cadence_adapter_->SetZeroHertzModeEnabled(absl::nullopt);
+ }
+
+ pending_encoder_creation_ =
+ (!encoder_ || encoder_config_.video_format != config.video_format ||
+ max_data_payload_length_ != max_data_payload_length);
+ encoder_config_ = std::move(config);
+ max_data_payload_length_ = max_data_payload_length;
+ pending_encoder_reconfiguration_ = true;
+
+ // Reconfigure the encoder now if the frame resolution is known.
+ // Otherwise, the reconfiguration is deferred until the next frame to
+ // minimize the number of reconfigurations. The codec configuration
+ // depends on incoming video frame size.
+ if (last_frame_info_) {
+ if (callback) {
+ encoder_configuration_callbacks_.push_back(std::move(callback));
+ }
+
+ ReconfigureEncoder();
+ } else {
+ webrtc::InvokeSetParametersCallback(callback, webrtc::RTCError::OK());
+ }
+ });
+}
+
+// We should reduce the number of 'full' ReconfigureEncoder(). If only need
+// subset of it at runtime, consider handle it in
+// VideoStreamEncoder::EncodeVideoFrame() when encoder_info_ != info.
+void VideoStreamEncoder::ReconfigureEncoder() {
+ // Running on the encoder queue.
+ RTC_DCHECK(pending_encoder_reconfiguration_);
+
+ bool encoder_reset_required = false;
+ if (pending_encoder_creation_) {
+ // Destroy existing encoder instance before creating a new one. Otherwise
+ // attempt to create another instance will fail if encoder factory
+ // supports only single instance of encoder of given type.
+ encoder_.reset();
+
+ encoder_ = settings_.encoder_factory->CreateVideoEncoder(
+ encoder_config_.video_format);
+ if (!encoder_) {
+ RTC_LOG(LS_ERROR) << "CreateVideoEncoder failed, failing encoder format: "
+ << encoder_config_.video_format.ToString();
+ RequestEncoderSwitch();
+ return;
+ }
+
+ if (encoder_selector_) {
+ encoder_selector_->OnCurrentEncoder(encoder_config_.video_format);
+ }
+
+ encoder_->SetFecControllerOverride(fec_controller_override_);
+
+ encoder_reset_required = true;
+ }
+
+ // TODO(webrtc:14451) : Move AlignmentAdjuster into EncoderStreamFactory
+ // Possibly adjusts scale_resolution_down_by in `encoder_config_` to limit the
+ // alignment value.
+ AlignmentAdjuster::GetAlignmentAndMaybeAdjustScaleFactors(
+ encoder_->GetEncoderInfo(), &encoder_config_, absl::nullopt);
+
+ std::vector<VideoStream> streams;
+ if (encoder_config_.video_stream_factory) {
+ // Note: only tests set their own EncoderStreamFactory...
+ streams = encoder_config_.video_stream_factory->CreateEncoderStreams(
+ last_frame_info_->width, last_frame_info_->height, encoder_config_);
+ } else {
+ rtc::scoped_refptr<VideoEncoderConfig::VideoStreamFactoryInterface>
+ factory = rtc::make_ref_counted<cricket::EncoderStreamFactory>(
+ encoder_config_.video_format.name, encoder_config_.max_qp,
+ encoder_config_.content_type ==
+ webrtc::VideoEncoderConfig::ContentType::kScreen,
+ encoder_config_.legacy_conference_mode, encoder_->GetEncoderInfo(),
+ MergeRestrictions({latest_restrictions_, animate_restrictions_}),
+ &field_trials_);
+
+ streams = factory->CreateEncoderStreams(
+ last_frame_info_->width, last_frame_info_->height, encoder_config_);
+ }
+
+ // TODO(webrtc:14451) : Move AlignmentAdjuster into EncoderStreamFactory
+ // Get alignment when actual number of layers are known.
+ int alignment = AlignmentAdjuster::GetAlignmentAndMaybeAdjustScaleFactors(
+ encoder_->GetEncoderInfo(), &encoder_config_, streams.size());
+
+ // Check that the higher layers do not try to set number of temporal layers
+ // to less than 1.
+ // TODO(brandtr): Get rid of the wrapping optional as it serves no purpose
+ // at this layer.
+#if RTC_DCHECK_IS_ON
+ for (const auto& stream : streams) {
+ RTC_DCHECK_GE(stream.num_temporal_layers.value_or(1), 1);
+ }
+#endif
+
+ // TODO(ilnik): If configured resolution is significantly less than provided,
+ // e.g. because there are not enough SSRCs for all simulcast streams,
+ // signal new resolutions via SinkWants to video source.
+
+ // Stream dimensions may be not equal to given because of a simulcast
+ // restrictions.
+ auto highest_stream = absl::c_max_element(
+ streams, [](const webrtc::VideoStream& a, const webrtc::VideoStream& b) {
+ return std::tie(a.width, a.height) < std::tie(b.width, b.height);
+ });
+ int highest_stream_width = static_cast<int>(highest_stream->width);
+ int highest_stream_height = static_cast<int>(highest_stream->height);
+ // Dimension may be reduced to be, e.g. divisible by 4.
+ RTC_CHECK_GE(last_frame_info_->width, highest_stream_width);
+ RTC_CHECK_GE(last_frame_info_->height, highest_stream_height);
+ crop_width_ = last_frame_info_->width - highest_stream_width;
+ crop_height_ = last_frame_info_->height - highest_stream_height;
+
+ if (!encoder_->GetEncoderInfo().is_qp_trusted.value_or(true)) {
+ // when qp is not trusted, we priorities to using the
+ // |resolution_bitrate_limits| provided by the decoder.
+ const std::vector<VideoEncoder::ResolutionBitrateLimits>& bitrate_limits =
+ encoder_->GetEncoderInfo().resolution_bitrate_limits.empty()
+ ? EncoderInfoSettings::
+ GetDefaultSinglecastBitrateLimitsWhenQpIsUntrusted()
+ : encoder_->GetEncoderInfo().resolution_bitrate_limits;
+
+ // For BandwidthQualityScaler, its implement based on a certain pixel_count
+ // correspond a certain bps interval. In fact, WebRTC default max_bps is
+ // 2500Kbps when width * height > 960 * 540. For example, we assume:
+ // 1.the camera support 1080p.
+ // 2.ResolutionBitrateLimits set 720p bps interval is [1500Kbps,2000Kbps].
+ // 3.ResolutionBitrateLimits set 1080p bps interval is [2000Kbps,2500Kbps].
+ // We will never be stable at 720p due to actual encoding bps of 720p and
+ // 1080p are both 2500Kbps. So it is necessary to do a linear interpolation
+ // to get a certain bitrate for certain pixel_count. It also doesn't work
+ // for 960*540 and 640*520, we will nerver be stable at 640*520 due to their
+ // |target_bitrate_bps| are both 2000Kbps.
+ absl::optional<VideoEncoder::ResolutionBitrateLimits>
+ qp_untrusted_bitrate_limit = EncoderInfoSettings::
+ GetSinglecastBitrateLimitForResolutionWhenQpIsUntrusted(
+ last_frame_info_->width * last_frame_info_->height,
+ bitrate_limits);
+
+ if (qp_untrusted_bitrate_limit) {
+ // bandwidth_quality_scaler is only used for singlecast.
+ if (streams.size() == 1 && encoder_config_.simulcast_layers.size() == 1) {
+ streams.back().min_bitrate_bps =
+ qp_untrusted_bitrate_limit->min_bitrate_bps;
+ streams.back().max_bitrate_bps =
+ qp_untrusted_bitrate_limit->max_bitrate_bps;
+ // If it is screen share mode, the minimum value of max_bitrate should
+ // be greater than/equal to 1200kbps.
+ if (encoder_config_.content_type ==
+ VideoEncoderConfig::ContentType::kScreen) {
+ streams.back().max_bitrate_bps = std::max(
+ streams.back().max_bitrate_bps, kDefaultMinScreenSharebps);
+ }
+ streams.back().target_bitrate_bps =
+ qp_untrusted_bitrate_limit->max_bitrate_bps;
+ }
+ }
+ } else {
+ absl::optional<VideoEncoder::ResolutionBitrateLimits>
+ encoder_bitrate_limits =
+ encoder_->GetEncoderInfo().GetEncoderBitrateLimitsForResolution(
+ last_frame_info_->width * last_frame_info_->height);
+
+ if (encoder_bitrate_limits) {
+ if (streams.size() == 1 && encoder_config_.simulcast_layers.size() == 1) {
+ // Bitrate limits can be set by app (in SDP or RtpEncodingParameters)
+ // or/and can be provided by encoder. In presence of both set of
+ // limits, the final set is derived as their intersection.
+ int min_bitrate_bps;
+ if (encoder_config_.simulcast_layers.empty() ||
+ encoder_config_.simulcast_layers[0].min_bitrate_bps <= 0) {
+ min_bitrate_bps = encoder_bitrate_limits->min_bitrate_bps;
+ } else {
+ min_bitrate_bps = std::max(encoder_bitrate_limits->min_bitrate_bps,
+ streams.back().min_bitrate_bps);
+ }
+
+ int max_bitrate_bps;
+ // We don't check encoder_config_.simulcast_layers[0].max_bitrate_bps
+ // here since encoder_config_.max_bitrate_bps is derived from it (as
+ // well as from other inputs).
+ if (encoder_config_.max_bitrate_bps <= 0) {
+ max_bitrate_bps = encoder_bitrate_limits->max_bitrate_bps;
+ } else {
+ max_bitrate_bps = std::min(encoder_bitrate_limits->max_bitrate_bps,
+ streams.back().max_bitrate_bps);
+ }
+
+ if (min_bitrate_bps < max_bitrate_bps) {
+ streams.back().min_bitrate_bps = min_bitrate_bps;
+ streams.back().max_bitrate_bps = max_bitrate_bps;
+ streams.back().target_bitrate_bps =
+ std::min(streams.back().target_bitrate_bps,
+ encoder_bitrate_limits->max_bitrate_bps);
+ } else {
+ RTC_LOG(LS_WARNING)
+ << "Bitrate limits provided by encoder"
+ << " (min=" << encoder_bitrate_limits->min_bitrate_bps
+ << ", max=" << encoder_bitrate_limits->max_bitrate_bps
+ << ") do not intersect with limits set by app"
+ << " (min=" << streams.back().min_bitrate_bps
+ << ", max=" << encoder_config_.max_bitrate_bps
+ << "). The app bitrate limits will be used.";
+ }
+ }
+ }
+ }
+
+ ApplyEncoderBitrateLimitsIfSingleActiveStream(
+ GetEncoderInfoWithBitrateLimitUpdate(
+ encoder_->GetEncoderInfo(), encoder_config_, default_limits_allowed_),
+ encoder_config_.simulcast_layers, &streams);
+
+ VideoCodec codec;
+ if (!VideoCodecInitializer::SetupCodec(encoder_config_, streams, &codec)) {
+ RTC_LOG(LS_ERROR) << "Failed to create encoder configuration.";
+ }
+
+ if (encoder_config_.codec_type == kVideoCodecVP9) {
+ // Spatial layers configuration might impose some parity restrictions,
+ // thus some cropping might be needed.
+ crop_width_ = last_frame_info_->width - codec.width;
+ crop_height_ = last_frame_info_->height - codec.height;
+ ApplyVp9BitrateLimits(GetEncoderInfoWithBitrateLimitUpdate(
+ encoder_->GetEncoderInfo(), encoder_config_,
+ default_limits_allowed_),
+ encoder_config_, &codec);
+ }
+
+ char log_stream_buf[4 * 1024];
+ rtc::SimpleStringBuilder log_stream(log_stream_buf);
+ log_stream << "ReconfigureEncoder:\n";
+ log_stream << "Simulcast streams:\n";
+ for (size_t i = 0; i < codec.numberOfSimulcastStreams; ++i) {
+ log_stream << i << ": " << codec.simulcastStream[i].width << "x"
+ << codec.simulcastStream[i].height
+ << " min_kbps: " << codec.simulcastStream[i].minBitrate
+ << " target_kbps: " << codec.simulcastStream[i].targetBitrate
+ << " max_kbps: " << codec.simulcastStream[i].maxBitrate
+ << " max_fps: " << codec.simulcastStream[i].maxFramerate
+ << " max_qp: " << codec.simulcastStream[i].qpMax
+ << " num_tl: " << codec.simulcastStream[i].numberOfTemporalLayers
+ << " active: "
+ << (codec.simulcastStream[i].active ? "true" : "false") << "\n";
+ }
+ if (encoder_config_.codec_type == kVideoCodecVP9) {
+ size_t num_spatial_layers = codec.VP9()->numberOfSpatialLayers;
+ log_stream << "Spatial layers:\n";
+ for (size_t i = 0; i < num_spatial_layers; ++i) {
+ log_stream << i << ": " << codec.spatialLayers[i].width << "x"
+ << codec.spatialLayers[i].height
+ << " min_kbps: " << codec.spatialLayers[i].minBitrate
+ << " target_kbps: " << codec.spatialLayers[i].targetBitrate
+ << " max_kbps: " << codec.spatialLayers[i].maxBitrate
+ << " max_fps: " << codec.spatialLayers[i].maxFramerate
+ << " max_qp: " << codec.spatialLayers[i].qpMax
+ << " num_tl: " << codec.spatialLayers[i].numberOfTemporalLayers
+ << " active: "
+ << (codec.spatialLayers[i].active ? "true" : "false") << "\n";
+ }
+ }
+ RTC_LOG(LS_INFO) << log_stream.str();
+
+ codec.startBitrate = std::max(encoder_target_bitrate_bps_.value_or(0) / 1000,
+ codec.minBitrate);
+ codec.startBitrate = std::min(codec.startBitrate, codec.maxBitrate);
+ codec.expect_encode_from_texture = last_frame_info_->is_texture;
+ // Make sure the start bit rate is sane...
+ RTC_DCHECK_LE(codec.startBitrate, 1000000);
+ max_framerate_ = codec.maxFramerate;
+
+ // Inform source about max configured framerate,
+ // requested_resolution and which layers are active.
+ int max_framerate = 0;
+ // Is any layer active.
+ bool active = false;
+ // The max requested_resolution.
+ absl::optional<rtc::VideoSinkWants::FrameSize> requested_resolution;
+ for (const auto& stream : streams) {
+ max_framerate = std::max(stream.max_framerate, max_framerate);
+ active |= stream.active;
+ // Note: we propagate the highest requested_resolution regardless
+ // if layer is active or not.
+ if (stream.requested_resolution) {
+ if (!requested_resolution) {
+ requested_resolution.emplace(stream.requested_resolution->width,
+ stream.requested_resolution->height);
+ } else {
+ requested_resolution.emplace(
+ std::max(stream.requested_resolution->width,
+ requested_resolution->width),
+ std::max(stream.requested_resolution->height,
+ requested_resolution->height));
+ }
+ }
+ }
+
+ // The resolutions that we're actually encoding with.
+ std::vector<rtc::VideoSinkWants::FrameSize> encoder_resolutions;
+ // TODO(hbos): For the case of SVC, also make use of `codec.spatialLayers`.
+ // For now, SVC layers are handled by the VP9 encoder.
+ for (const auto& simulcastStream : codec.simulcastStream) {
+ if (!simulcastStream.active)
+ continue;
+ encoder_resolutions.emplace_back(simulcastStream.width,
+ simulcastStream.height);
+ }
+
+ worker_queue_->PostTask(SafeTask(
+ task_safety_.flag(),
+ [this, max_framerate, alignment,
+ encoder_resolutions = std::move(encoder_resolutions),
+ requested_resolution = std::move(requested_resolution), active]() {
+ RTC_DCHECK_RUN_ON(worker_queue_);
+ if (max_framerate !=
+ video_source_sink_controller_.frame_rate_upper_limit() ||
+ alignment != video_source_sink_controller_.resolution_alignment() ||
+ encoder_resolutions !=
+ video_source_sink_controller_.resolutions() ||
+ (video_source_sink_controller_.requested_resolution() !=
+ requested_resolution) ||
+ (video_source_sink_controller_.active() != active)) {
+ video_source_sink_controller_.SetFrameRateUpperLimit(max_framerate);
+ video_source_sink_controller_.SetResolutionAlignment(alignment);
+ video_source_sink_controller_.SetResolutions(
+ std::move(encoder_resolutions));
+ video_source_sink_controller_.SetRequestedResolution(
+ requested_resolution);
+ video_source_sink_controller_.SetActive(active);
+ video_source_sink_controller_.PushSourceSinkSettings();
+ }
+ }));
+
+ rate_allocator_ =
+ settings_.bitrate_allocator_factory->CreateVideoBitrateAllocator(codec);
+ rate_allocator_->SetLegacyConferenceMode(
+ encoder_config_.legacy_conference_mode);
+
+ // Reset (release existing encoder) if one exists and anything except
+ // start bitrate or max framerate has changed.
+ if (!encoder_reset_required) {
+ encoder_reset_required = RequiresEncoderReset(
+ send_codec_, codec, was_encode_called_since_last_initialization_);
+ }
+
+ if (codec.codecType == VideoCodecType::kVideoCodecVP9 &&
+ number_of_cores_ <= vp9_low_tier_core_threshold_.value_or(0)) {
+ codec.SetVideoEncoderComplexity(VideoCodecComplexity::kComplexityLow);
+ }
+
+ send_codec_ = codec;
+
+ // Keep the same encoder, as long as the video_format is unchanged.
+ // Encoder creation block is split in two since EncoderInfo needed to start
+ // CPU adaptation with the correct settings should be polled after
+ // encoder_->InitEncode().
+ if (encoder_reset_required) {
+ ReleaseEncoder();
+ const size_t max_data_payload_length = max_data_payload_length_ > 0
+ ? max_data_payload_length_
+ : kDefaultPayloadSize;
+ if (encoder_->InitEncode(
+ &send_codec_,
+ VideoEncoder::Settings(settings_.capabilities, number_of_cores_,
+ max_data_payload_length)) != 0) {
+ RTC_LOG(LS_ERROR) << "Failed to initialize the encoder associated with "
+ "codec type: "
+ << CodecTypeToPayloadString(send_codec_.codecType)
+ << " (" << send_codec_.codecType << ")";
+ ReleaseEncoder();
+ } else {
+ encoder_initialized_ = true;
+ encoder_->RegisterEncodeCompleteCallback(this);
+ frame_encode_metadata_writer_.OnEncoderInit(send_codec_);
+ next_frame_types_.clear();
+ next_frame_types_.resize(
+ std::max(static_cast<int>(codec.numberOfSimulcastStreams), 1),
+ VideoFrameType::kVideoFrameKey);
+ }
+
+ frame_encode_metadata_writer_.Reset();
+ last_encode_info_ms_ = absl::nullopt;
+ was_encode_called_since_last_initialization_ = false;
+ }
+
+ // Inform dependents of updated encoder settings.
+ OnEncoderSettingsChanged();
+
+ if (encoder_initialized_) {
+ RTC_LOG(LS_VERBOSE) << " max bitrate " << codec.maxBitrate
+ << " start bitrate " << codec.startBitrate
+ << " max frame rate " << codec.maxFramerate
+ << " max payload size " << max_data_payload_length_;
+ } else {
+ RTC_LOG(LS_ERROR) << "Failed to configure encoder.";
+ rate_allocator_ = nullptr;
+ }
+
+ if (pending_encoder_creation_) {
+ stream_resource_manager_.ConfigureEncodeUsageResource();
+ pending_encoder_creation_ = false;
+ }
+
+ int num_layers;
+ if (codec.codecType == kVideoCodecVP8) {
+ num_layers = codec.VP8()->numberOfTemporalLayers;
+ } else if (codec.codecType == kVideoCodecVP9) {
+ num_layers = codec.VP9()->numberOfTemporalLayers;
+ } else if (codec.codecType == kVideoCodecH264) {
+ num_layers = codec.H264()->numberOfTemporalLayers;
+ } else if (codec.codecType == kVideoCodecGeneric &&
+ codec.numberOfSimulcastStreams > 0) {
+ // This is mainly for unit testing, disabling frame dropping.
+ // TODO(sprang): Add a better way to disable frame dropping.
+ num_layers = codec.simulcastStream[0].numberOfTemporalLayers;
+ } else {
+ num_layers = 1;
+ }
+
+ frame_dropper_.Reset();
+ frame_dropper_.SetRates(codec.startBitrate, max_framerate_);
+ // Force-disable frame dropper if either:
+ // * We have screensharing with layers.
+ // * "WebRTC-FrameDropper" field trial is "Disabled".
+ force_disable_frame_dropper_ =
+ field_trials_.IsDisabled(kFrameDropperFieldTrial) ||
+ (num_layers > 1 && codec.mode == VideoCodecMode::kScreensharing);
+
+ VideoEncoder::EncoderInfo info = encoder_->GetEncoderInfo();
+ if (rate_control_settings_.UseEncoderBitrateAdjuster()) {
+ bitrate_adjuster_ = std::make_unique<EncoderBitrateAdjuster>(codec);
+ bitrate_adjuster_->OnEncoderInfo(info);
+ }
+
+ if (rate_allocator_ && last_encoder_rate_settings_) {
+ // We have a new rate allocator instance and already configured target
+ // bitrate. Update the rate allocation and notify observers.
+ // We must invalidate the last_encoder_rate_settings_ to ensure
+ // the changes get propagated to all listeners.
+ EncoderRateSettings rate_settings = *last_encoder_rate_settings_;
+ last_encoder_rate_settings_.reset();
+ rate_settings.rate_control.framerate_fps = GetInputFramerateFps();
+
+ SetEncoderRates(UpdateBitrateAllocation(rate_settings));
+ }
+
+ encoder_stats_observer_->OnEncoderReconfigured(encoder_config_, streams);
+
+ pending_encoder_reconfiguration_ = false;
+
+ bool is_svc = false;
+ // Set min_bitrate_bps, max_bitrate_bps, and max padding bit rate for VP9
+ // and leave only one stream containing all necessary information.
+ if (encoder_config_.codec_type == kVideoCodecVP9) {
+ // Lower max bitrate to the level codec actually can produce.
+ streams[0].max_bitrate_bps =
+ std::min(streams[0].max_bitrate_bps,
+ SvcRateAllocator::GetMaxBitrate(codec).bps<int>());
+ streams[0].min_bitrate_bps = codec.spatialLayers[0].minBitrate * 1000;
+ // target_bitrate_bps specifies the maximum padding bitrate.
+ streams[0].target_bitrate_bps =
+ SvcRateAllocator::GetPaddingBitrate(codec).bps<int>();
+ streams[0].width = streams.back().width;
+ streams[0].height = streams.back().height;
+ is_svc = codec.VP9()->numberOfSpatialLayers > 1;
+ streams.resize(1);
+ }
+
+ sink_->OnEncoderConfigurationChanged(
+ std::move(streams), is_svc, encoder_config_.content_type,
+ encoder_config_.min_transmit_bitrate_bps);
+
+ stream_resource_manager_.ConfigureQualityScaler(info);
+ stream_resource_manager_.ConfigureBandwidthQualityScaler(info);
+
+ webrtc::RTCError encoder_configuration_result = webrtc::RTCError::OK();
+
+ if (!encoder_initialized_) {
+ RTC_LOG(LS_WARNING) << "Failed to initialize "
+ << CodecTypeToPayloadString(codec.codecType)
+ << " encoder."
+ << "switch_encoder_on_init_failures: "
+ << switch_encoder_on_init_failures_;
+
+ if (switch_encoder_on_init_failures_) {
+ RequestEncoderSwitch();
+ } else {
+ encoder_configuration_result =
+ webrtc::RTCError(RTCErrorType::UNSUPPORTED_OPERATION);
+ }
+ }
+
+ if (!encoder_configuration_callbacks_.empty()) {
+ for (auto& callback : encoder_configuration_callbacks_) {
+ webrtc::InvokeSetParametersCallback(callback,
+ encoder_configuration_result);
+ }
+ encoder_configuration_callbacks_.clear();
+ }
+}
+
+void VideoStreamEncoder::RequestEncoderSwitch() {
+ bool is_encoder_switching_supported =
+ settings_.encoder_switch_request_callback != nullptr;
+ bool is_encoder_selector_available = encoder_selector_ != nullptr;
+
+ RTC_LOG(LS_INFO) << "RequestEncoderSwitch."
+ << " is_encoder_selector_available: "
+ << is_encoder_selector_available
+ << " is_encoder_switching_supported: "
+ << is_encoder_switching_supported;
+
+ if (!is_encoder_switching_supported) {
+ return;
+ }
+
+ // If encoder selector is available, switch to the encoder it prefers.
+ // Otherwise try switching to VP8 (default WebRTC codec).
+ absl::optional<SdpVideoFormat> preferred_fallback_encoder;
+ if (is_encoder_selector_available) {
+ preferred_fallback_encoder = encoder_selector_->OnEncoderBroken();
+ }
+
+ if (!preferred_fallback_encoder) {
+ preferred_fallback_encoder =
+ SdpVideoFormat(CodecTypeToPayloadString(kVideoCodecVP8));
+ }
+
+ settings_.encoder_switch_request_callback->RequestEncoderSwitch(
+ *preferred_fallback_encoder, /*allow_default_fallback=*/true);
+}
+
+void VideoStreamEncoder::OnEncoderSettingsChanged() {
+ EncoderSettings encoder_settings(
+ GetEncoderInfoWithBitrateLimitUpdate(
+ encoder_->GetEncoderInfo(), encoder_config_, default_limits_allowed_),
+ encoder_config_.Copy(), send_codec_);
+ stream_resource_manager_.SetEncoderSettings(encoder_settings);
+ input_state_provider_.OnEncoderSettingsChanged(encoder_settings);
+ bool is_screenshare = encoder_settings.encoder_config().content_type ==
+ VideoEncoderConfig::ContentType::kScreen;
+ degradation_preference_manager_->SetIsScreenshare(is_screenshare);
+ if (is_screenshare) {
+ frame_cadence_adapter_->SetZeroHertzModeEnabled(
+ FrameCadenceAdapterInterface::ZeroHertzModeParams{
+ send_codec_.numberOfSimulcastStreams});
+ }
+}
+
+void VideoStreamEncoder::OnFrame(Timestamp post_time,
+ int frames_scheduled_for_processing,
+ const VideoFrame& video_frame) {
+ RTC_DCHECK_RUN_ON(&encoder_queue_);
+ VideoFrame incoming_frame = video_frame;
+
+ // In some cases, e.g., when the frame from decoder is fed to encoder,
+ // the timestamp may be set to the future. As the encoding pipeline assumes
+ // capture time to be less than present time, we should reset the capture
+ // timestamps here. Otherwise there may be issues with RTP send stream.
+ if (incoming_frame.timestamp_us() > post_time.us())
+ incoming_frame.set_timestamp_us(post_time.us());
+
+ // Capture time may come from clock with an offset and drift from clock_.
+ int64_t capture_ntp_time_ms;
+ if (video_frame.ntp_time_ms() > 0) {
+ capture_ntp_time_ms = video_frame.ntp_time_ms();
+ } else if (video_frame.render_time_ms() != 0) {
+ capture_ntp_time_ms = video_frame.render_time_ms() + delta_ntp_internal_ms_;
+ } else {
+ capture_ntp_time_ms = post_time.ms() + delta_ntp_internal_ms_;
+ }
+ incoming_frame.set_ntp_time_ms(capture_ntp_time_ms);
+
+ // Convert NTP time, in ms, to RTP timestamp.
+ const int kMsToRtpTimestamp = 90;
+ incoming_frame.set_timestamp(
+ kMsToRtpTimestamp * static_cast<uint32_t>(incoming_frame.ntp_time_ms()));
+
+ if (incoming_frame.ntp_time_ms() <= last_captured_timestamp_) {
+ // We don't allow the same capture time for two frames, drop this one.
+ RTC_LOG(LS_WARNING) << "Same/old NTP timestamp ("
+ << incoming_frame.ntp_time_ms()
+ << " <= " << last_captured_timestamp_
+ << ") for incoming frame. Dropping.";
+ encoder_queue_.PostTask([this, incoming_frame]() {
+ RTC_DCHECK_RUN_ON(&encoder_queue_);
+ accumulated_update_rect_.Union(incoming_frame.update_rect());
+ accumulated_update_rect_is_valid_ &= incoming_frame.has_update_rect();
+ });
+ return;
+ }
+
+ bool log_stats = false;
+ if (post_time.ms() - last_frame_log_ms_ > kFrameLogIntervalMs) {
+ last_frame_log_ms_ = post_time.ms();
+ log_stats = true;
+ }
+
+ last_captured_timestamp_ = incoming_frame.ntp_time_ms();
+
+ encoder_stats_observer_->OnIncomingFrame(incoming_frame.width(),
+ incoming_frame.height());
+ ++captured_frame_count_;
+ CheckForAnimatedContent(incoming_frame, post_time.us());
+ bool cwnd_frame_drop =
+ cwnd_frame_drop_interval_ &&
+ (cwnd_frame_counter_++ % cwnd_frame_drop_interval_.value() == 0);
+ if (frames_scheduled_for_processing == 1 && !cwnd_frame_drop) {
+ MaybeEncodeVideoFrame(incoming_frame, post_time.us());
+ } else {
+ if (cwnd_frame_drop) {
+ // Frame drop by congestion window pushback. Do not encode this
+ // frame.
+ ++dropped_frame_cwnd_pushback_count_;
+ encoder_stats_observer_->OnFrameDropped(
+ VideoStreamEncoderObserver::DropReason::kCongestionWindow);
+ } else {
+ // There is a newer frame in flight. Do not encode this frame.
+ RTC_LOG(LS_VERBOSE)
+ << "Incoming frame dropped due to that the encoder is blocked.";
+ ++dropped_frame_encoder_block_count_;
+ encoder_stats_observer_->OnFrameDropped(
+ VideoStreamEncoderObserver::DropReason::kEncoderQueue);
+ }
+ accumulated_update_rect_.Union(incoming_frame.update_rect());
+ accumulated_update_rect_is_valid_ &= incoming_frame.has_update_rect();
+ }
+ if (log_stats) {
+ RTC_LOG(LS_INFO) << "Number of frames: captured " << captured_frame_count_
+ << ", dropped (due to congestion window pushback) "
+ << dropped_frame_cwnd_pushback_count_
+ << ", dropped (due to encoder blocked) "
+ << dropped_frame_encoder_block_count_ << ", interval_ms "
+ << kFrameLogIntervalMs;
+ captured_frame_count_ = 0;
+ dropped_frame_cwnd_pushback_count_ = 0;
+ dropped_frame_encoder_block_count_ = 0;
+ }
+}
+
+void VideoStreamEncoder::OnDiscardedFrame() {
+ encoder_stats_observer_->OnFrameDropped(
+ VideoStreamEncoderObserver::DropReason::kSource);
+}
+
+bool VideoStreamEncoder::EncoderPaused() const {
+ RTC_DCHECK_RUN_ON(&encoder_queue_);
+ // Pause video if paused by caller or as long as the network is down or the
+ // pacer queue has grown too large in buffered mode.
+ // If the pacer queue has grown too large or the network is down,
+ // `last_encoder_rate_settings_->encoder_target` will be 0.
+ return !last_encoder_rate_settings_ ||
+ last_encoder_rate_settings_->encoder_target == DataRate::Zero();
+}
+
+void VideoStreamEncoder::TraceFrameDropStart() {
+ RTC_DCHECK_RUN_ON(&encoder_queue_);
+ // Start trace event only on the first frame after encoder is paused.
+ if (!encoder_paused_and_dropped_frame_) {
+ TRACE_EVENT_ASYNC_BEGIN0("webrtc", "EncoderPaused", this);
+ }
+ encoder_paused_and_dropped_frame_ = true;
+}
+
+void VideoStreamEncoder::TraceFrameDropEnd() {
+ RTC_DCHECK_RUN_ON(&encoder_queue_);
+ // End trace event on first frame after encoder resumes, if frame was dropped.
+ if (encoder_paused_and_dropped_frame_) {
+ TRACE_EVENT_ASYNC_END0("webrtc", "EncoderPaused", this);
+ }
+ encoder_paused_and_dropped_frame_ = false;
+}
+
+VideoStreamEncoder::EncoderRateSettings
+VideoStreamEncoder::UpdateBitrateAllocation(
+ const EncoderRateSettings& rate_settings) {
+ VideoBitrateAllocation new_allocation;
+ // Only call allocators if bitrate > 0 (ie, not suspended), otherwise they
+ // might cap the bitrate to the min bitrate configured.
+ if (rate_allocator_ && rate_settings.encoder_target > DataRate::Zero()) {
+ new_allocation = rate_allocator_->Allocate(VideoBitrateAllocationParameters(
+ rate_settings.encoder_target, rate_settings.stable_encoder_target,
+ rate_settings.rate_control.framerate_fps));
+ }
+
+ EncoderRateSettings new_rate_settings = rate_settings;
+ new_rate_settings.rate_control.target_bitrate = new_allocation;
+ new_rate_settings.rate_control.bitrate = new_allocation;
+ // VideoBitrateAllocator subclasses may allocate a bitrate higher than the
+ // target in order to sustain the min bitrate of the video codec. In this
+ // case, make sure the bandwidth allocation is at least equal the allocation
+ // as that is part of the document contract for that field.
+ new_rate_settings.rate_control.bandwidth_allocation =
+ std::max(new_rate_settings.rate_control.bandwidth_allocation,
+ DataRate::BitsPerSec(
+ new_rate_settings.rate_control.bitrate.get_sum_bps()));
+
+ if (bitrate_adjuster_) {
+ VideoBitrateAllocation adjusted_allocation =
+ bitrate_adjuster_->AdjustRateAllocation(new_rate_settings.rate_control);
+ RTC_LOG(LS_VERBOSE) << "Adjusting allocation, fps = "
+ << rate_settings.rate_control.framerate_fps << ", from "
+ << new_allocation.ToString() << ", to "
+ << adjusted_allocation.ToString();
+ new_rate_settings.rate_control.bitrate = adjusted_allocation;
+ }
+
+ return new_rate_settings;
+}
+
+uint32_t VideoStreamEncoder::GetInputFramerateFps() {
+ const uint32_t default_fps = max_framerate_ != -1 ? max_framerate_ : 30;
+
+ // This method may be called after we cleared out the frame_cadence_adapter_
+ // reference in Stop(). In such a situation it's probably not important with a
+ // decent estimate.
+ absl::optional<uint32_t> input_fps =
+ frame_cadence_adapter_ ? frame_cadence_adapter_->GetInputFrameRateFps()
+ : absl::nullopt;
+ if (!input_fps || *input_fps == 0) {
+ return default_fps;
+ }
+ return *input_fps;
+}
+
+void VideoStreamEncoder::SetEncoderRates(
+ const EncoderRateSettings& rate_settings) {
+ RTC_DCHECK_GT(rate_settings.rate_control.framerate_fps, 0.0);
+ bool rate_control_changed =
+ (!last_encoder_rate_settings_.has_value() ||
+ last_encoder_rate_settings_->rate_control != rate_settings.rate_control);
+ // For layer allocation signal we care only about the target bitrate (not the
+ // adjusted one) and the target fps.
+ bool layer_allocation_changed =
+ !last_encoder_rate_settings_.has_value() ||
+ last_encoder_rate_settings_->rate_control.target_bitrate !=
+ rate_settings.rate_control.target_bitrate ||
+ last_encoder_rate_settings_->rate_control.framerate_fps !=
+ rate_settings.rate_control.framerate_fps;
+
+ if (last_encoder_rate_settings_ != rate_settings) {
+ last_encoder_rate_settings_ = rate_settings;
+ }
+
+ if (!encoder_)
+ return;
+
+ // Make the cadence adapter know if streams were disabled.
+ for (int spatial_index = 0;
+ spatial_index != send_codec_.numberOfSimulcastStreams; ++spatial_index) {
+ frame_cadence_adapter_->UpdateLayerStatus(
+ spatial_index,
+ /*enabled=*/rate_settings.rate_control.target_bitrate
+ .GetSpatialLayerSum(spatial_index) > 0);
+ }
+
+ // `bitrate_allocation` is 0 it means that the network is down or the send
+ // pacer is full. We currently don't pass this on to the encoder since it is
+ // unclear how current encoder implementations behave when given a zero target
+ // bitrate.
+ // TODO(perkj): Make sure all known encoder implementations handle zero
+ // target bitrate and remove this check.
+ if (rate_settings.rate_control.bitrate.get_sum_bps() == 0)
+ return;
+
+ if (rate_control_changed) {
+ encoder_->SetRates(rate_settings.rate_control);
+
+ encoder_stats_observer_->OnBitrateAllocationUpdated(
+ send_codec_, rate_settings.rate_control.bitrate);
+ frame_encode_metadata_writer_.OnSetRates(
+ rate_settings.rate_control.bitrate,
+ static_cast<uint32_t>(rate_settings.rate_control.framerate_fps + 0.5));
+ stream_resource_manager_.SetEncoderRates(rate_settings.rate_control);
+ if (layer_allocation_changed &&
+ allocation_cb_type_ ==
+ BitrateAllocationCallbackType::kVideoLayersAllocation) {
+ sink_->OnVideoLayersAllocationUpdated(CreateVideoLayersAllocation(
+ send_codec_, rate_settings.rate_control, encoder_->GetEncoderInfo()));
+ }
+ }
+ if ((allocation_cb_type_ ==
+ BitrateAllocationCallbackType::kVideoBitrateAllocation) ||
+ (encoder_config_.content_type ==
+ VideoEncoderConfig::ContentType::kScreen &&
+ allocation_cb_type_ == BitrateAllocationCallbackType::
+ kVideoBitrateAllocationWhenScreenSharing)) {
+ sink_->OnBitrateAllocationUpdated(
+ // Update allocation according to info from encoder. An encoder may
+ // choose to not use all layers due to for example HW.
+ UpdateAllocationFromEncoderInfo(
+ rate_settings.rate_control.target_bitrate,
+ encoder_->GetEncoderInfo()));
+ }
+}
+
+void VideoStreamEncoder::MaybeEncodeVideoFrame(const VideoFrame& video_frame,
+ int64_t time_when_posted_us) {
+ RTC_DCHECK_RUN_ON(&encoder_queue_);
+ input_state_provider_.OnFrameSizeObserved(video_frame.size());
+
+ if (!last_frame_info_ || video_frame.width() != last_frame_info_->width ||
+ video_frame.height() != last_frame_info_->height ||
+ video_frame.is_texture() != last_frame_info_->is_texture) {
+ if ((!last_frame_info_ || video_frame.width() != last_frame_info_->width ||
+ video_frame.height() != last_frame_info_->height) &&
+ settings_.encoder_switch_request_callback && encoder_selector_) {
+ if (auto encoder = encoder_selector_->OnResolutionChange(
+ {video_frame.width(), video_frame.height()})) {
+ settings_.encoder_switch_request_callback->RequestEncoderSwitch(
+ *encoder, /*allow_default_fallback=*/false);
+ }
+ }
+
+ pending_encoder_reconfiguration_ = true;
+ last_frame_info_ = VideoFrameInfo(video_frame.width(), video_frame.height(),
+ video_frame.is_texture());
+ RTC_LOG(LS_INFO) << "Video frame parameters changed: dimensions="
+ << last_frame_info_->width << "x"
+ << last_frame_info_->height
+ << ", texture=" << last_frame_info_->is_texture << ".";
+ // Force full frame update, since resolution has changed.
+ accumulated_update_rect_ =
+ VideoFrame::UpdateRect{0, 0, video_frame.width(), video_frame.height()};
+ }
+
+ // We have to create the encoder before the frame drop logic,
+ // because the latter depends on encoder_->GetScalingSettings.
+ // According to the testcase
+ // InitialFrameDropOffWhenEncoderDisabledScaling, the return value
+ // from GetScalingSettings should enable or disable the frame drop.
+
+ // Update input frame rate before we start using it. If we update it after
+ // any potential frame drop we are going to artificially increase frame sizes.
+ // Poll the rate before updating, otherwise we risk the rate being estimated
+ // a little too high at the start of the call when then window is small.
+ uint32_t framerate_fps = GetInputFramerateFps();
+ frame_cadence_adapter_->UpdateFrameRate();
+
+ int64_t now_ms = clock_->TimeInMilliseconds();
+ if (pending_encoder_reconfiguration_) {
+ ReconfigureEncoder();
+ last_parameters_update_ms_.emplace(now_ms);
+ } else if (!last_parameters_update_ms_ ||
+ now_ms - *last_parameters_update_ms_ >=
+ kParameterUpdateIntervalMs) {
+ if (last_encoder_rate_settings_) {
+ // Clone rate settings before update, so that SetEncoderRates() will
+ // actually detect the change between the input and
+ // `last_encoder_rate_setings_`, triggering the call to SetRate() on the
+ // encoder.
+ EncoderRateSettings new_rate_settings = *last_encoder_rate_settings_;
+ new_rate_settings.rate_control.framerate_fps =
+ static_cast<double>(framerate_fps);
+ SetEncoderRates(UpdateBitrateAllocation(new_rate_settings));
+ }
+ last_parameters_update_ms_.emplace(now_ms);
+ }
+
+ // Because pending frame will be dropped in any case, we need to
+ // remember its updated region.
+ if (pending_frame_) {
+ encoder_stats_observer_->OnFrameDropped(
+ VideoStreamEncoderObserver::DropReason::kEncoderQueue);
+ accumulated_update_rect_.Union(pending_frame_->update_rect());
+ accumulated_update_rect_is_valid_ &= pending_frame_->has_update_rect();
+ }
+
+ if (DropDueToSize(video_frame.size())) {
+ RTC_LOG(LS_INFO) << "Dropping frame. Too large for target bitrate.";
+ stream_resource_manager_.OnFrameDroppedDueToSize();
+ // Storing references to a native buffer risks blocking frame capture.
+ if (video_frame.video_frame_buffer()->type() !=
+ VideoFrameBuffer::Type::kNative) {
+ pending_frame_ = video_frame;
+ pending_frame_post_time_us_ = time_when_posted_us;
+ } else {
+ // Ensure that any previously stored frame is dropped.
+ pending_frame_.reset();
+ accumulated_update_rect_.Union(video_frame.update_rect());
+ accumulated_update_rect_is_valid_ &= video_frame.has_update_rect();
+ encoder_stats_observer_->OnFrameDropped(
+ VideoStreamEncoderObserver::DropReason::kEncoderQueue);
+ }
+ return;
+ }
+ stream_resource_manager_.OnMaybeEncodeFrame();
+
+ if (EncoderPaused()) {
+ // Storing references to a native buffer risks blocking frame capture.
+ if (video_frame.video_frame_buffer()->type() !=
+ VideoFrameBuffer::Type::kNative) {
+ if (pending_frame_)
+ TraceFrameDropStart();
+ pending_frame_ = video_frame;
+ pending_frame_post_time_us_ = time_when_posted_us;
+ } else {
+ // Ensure that any previously stored frame is dropped.
+ pending_frame_.reset();
+ TraceFrameDropStart();
+ accumulated_update_rect_.Union(video_frame.update_rect());
+ accumulated_update_rect_is_valid_ &= video_frame.has_update_rect();
+ encoder_stats_observer_->OnFrameDropped(
+ VideoStreamEncoderObserver::DropReason::kEncoderQueue);
+ }
+ return;
+ }
+
+ pending_frame_.reset();
+
+ frame_dropper_.Leak(framerate_fps);
+ // Frame dropping is enabled iff frame dropping is not force-disabled, and
+ // rate controller is not trusted.
+ const bool frame_dropping_enabled =
+ !force_disable_frame_dropper_ &&
+ !encoder_info_.has_trusted_rate_controller;
+ frame_dropper_.Enable(frame_dropping_enabled);
+ if (frame_dropping_enabled && frame_dropper_.DropFrame()) {
+ RTC_LOG(LS_VERBOSE)
+ << "Drop Frame: "
+ "target bitrate "
+ << (last_encoder_rate_settings_
+ ? last_encoder_rate_settings_->encoder_target.bps()
+ : 0)
+ << ", input frame rate " << framerate_fps;
+ OnDroppedFrame(
+ EncodedImageCallback::DropReason::kDroppedByMediaOptimizations);
+ accumulated_update_rect_.Union(video_frame.update_rect());
+ accumulated_update_rect_is_valid_ &= video_frame.has_update_rect();
+ return;
+ }
+
+ EncodeVideoFrame(video_frame, time_when_posted_us);
+}
+
+void VideoStreamEncoder::EncodeVideoFrame(const VideoFrame& video_frame,
+ int64_t time_when_posted_us) {
+ RTC_DCHECK_RUN_ON(&encoder_queue_);
+ RTC_LOG(LS_VERBOSE) << __func__ << " posted " << time_when_posted_us
+ << " ntp time " << video_frame.ntp_time_ms();
+
+ // If the encoder fail we can't continue to encode frames. When this happens
+ // the WebrtcVideoSender is notified and the whole VideoSendStream is
+ // recreated.
+ if (encoder_failed_ || !encoder_initialized_)
+ return;
+
+ // It's possible that EncodeVideoFrame can be called after we've completed
+ // a Stop() operation. Check if the encoder_ is set before continuing.
+ // See: bugs.webrtc.org/12857
+ if (!encoder_)
+ return;
+
+ TraceFrameDropEnd();
+
+ // Encoder metadata needs to be updated before encode complete callback.
+ VideoEncoder::EncoderInfo info = encoder_->GetEncoderInfo();
+ if (info.implementation_name != encoder_info_.implementation_name ||
+ info.is_hardware_accelerated != encoder_info_.is_hardware_accelerated) {
+ encoder_stats_observer_->OnEncoderImplementationChanged({
+ .name = info.implementation_name,
+ .is_hardware_accelerated = info.is_hardware_accelerated,
+ });
+ if (bitrate_adjuster_) {
+ // Encoder implementation changed, reset overshoot detector states.
+ bitrate_adjuster_->Reset();
+ }
+ }
+
+ if (encoder_info_ != info) {
+ OnEncoderSettingsChanged();
+ stream_resource_manager_.ConfigureEncodeUsageResource();
+ // Re-configure scalers when encoder info changed. Consider two cases:
+ // 1. When the status of the scaler changes from enabled to disabled, if we
+ // don't do this CL, scaler will adapt up/down to trigger an unnecessary
+ // full ReconfigureEncoder() when the scaler should be banned.
+ // 2. When the status of the scaler changes from disabled to enabled, if we
+ // don't do this CL, scaler will not work until some code trigger
+ // ReconfigureEncoder(). In extreme cases, the scaler doesn't even work for
+ // a long time when we expect that the scaler should work.
+ stream_resource_manager_.ConfigureQualityScaler(info);
+ stream_resource_manager_.ConfigureBandwidthQualityScaler(info);
+
+ RTC_LOG(LS_INFO) << "Encoder info changed to " << info.ToString();
+ }
+
+ if (bitrate_adjuster_) {
+ for (size_t si = 0; si < kMaxSpatialLayers; ++si) {
+ if (info.fps_allocation[si] != encoder_info_.fps_allocation[si]) {
+ bitrate_adjuster_->OnEncoderInfo(info);
+ break;
+ }
+ }
+ }
+ encoder_info_ = info;
+ last_encode_info_ms_ = clock_->TimeInMilliseconds();
+
+ VideoFrame out_frame(video_frame);
+ // Crop or scale the frame if needed. Dimension may be reduced to fit encoder
+ // requirements, e.g. some encoders may require them to be divisible by 4.
+ if ((crop_width_ > 0 || crop_height_ > 0) &&
+ (out_frame.video_frame_buffer()->type() !=
+ VideoFrameBuffer::Type::kNative ||
+ !info.supports_native_handle)) {
+ int cropped_width = video_frame.width() - crop_width_;
+ int cropped_height = video_frame.height() - crop_height_;
+ rtc::scoped_refptr<VideoFrameBuffer> cropped_buffer;
+ // TODO(ilnik): Remove scaling if cropping is too big, as it should never
+ // happen after SinkWants signaled correctly from ReconfigureEncoder.
+ VideoFrame::UpdateRect update_rect = video_frame.update_rect();
+ if (crop_width_ < 4 && crop_height_ < 4) {
+ // The difference is small, crop without scaling.
+ cropped_buffer = video_frame.video_frame_buffer()->CropAndScale(
+ crop_width_ / 2, crop_height_ / 2, cropped_width, cropped_height,
+ cropped_width, cropped_height);
+ update_rect.offset_x -= crop_width_ / 2;
+ update_rect.offset_y -= crop_height_ / 2;
+ update_rect.Intersect(
+ VideoFrame::UpdateRect{0, 0, cropped_width, cropped_height});
+
+ } else {
+ // The difference is large, scale it.
+ cropped_buffer = video_frame.video_frame_buffer()->Scale(cropped_width,
+ cropped_height);
+ if (!update_rect.IsEmpty()) {
+ // Since we can't reason about pixels after scaling, we invalidate whole
+ // picture, if anything changed.
+ update_rect =
+ VideoFrame::UpdateRect{0, 0, cropped_width, cropped_height};
+ }
+ }
+ if (!cropped_buffer) {
+ RTC_LOG(LS_ERROR) << "Cropping and scaling frame failed, dropping frame.";
+ return;
+ }
+
+ out_frame.set_video_frame_buffer(cropped_buffer);
+ out_frame.set_update_rect(update_rect);
+ out_frame.set_ntp_time_ms(video_frame.ntp_time_ms());
+ // Since accumulated_update_rect_ is constructed before cropping,
+ // we can't trust it. If any changes were pending, we invalidate whole
+ // frame here.
+ if (!accumulated_update_rect_.IsEmpty()) {
+ accumulated_update_rect_ =
+ VideoFrame::UpdateRect{0, 0, out_frame.width(), out_frame.height()};
+ accumulated_update_rect_is_valid_ = false;
+ }
+ }
+
+ if (!accumulated_update_rect_is_valid_) {
+ out_frame.clear_update_rect();
+ } else if (!accumulated_update_rect_.IsEmpty() &&
+ out_frame.has_update_rect()) {
+ accumulated_update_rect_.Union(out_frame.update_rect());
+ accumulated_update_rect_.Intersect(
+ VideoFrame::UpdateRect{0, 0, out_frame.width(), out_frame.height()});
+ out_frame.set_update_rect(accumulated_update_rect_);
+ accumulated_update_rect_.MakeEmptyUpdate();
+ }
+ accumulated_update_rect_is_valid_ = true;
+
+ TRACE_EVENT_ASYNC_STEP0("webrtc", "Video", video_frame.render_time_ms(),
+ "Encode");
+
+ stream_resource_manager_.OnEncodeStarted(out_frame, time_when_posted_us);
+
+ // The encoder should get the size that it expects.
+ RTC_DCHECK(send_codec_.width <= out_frame.width() &&
+ send_codec_.height <= out_frame.height())
+ << "Encoder configured to " << send_codec_.width << "x"
+ << send_codec_.height << " received a too small frame "
+ << out_frame.width() << "x" << out_frame.height();
+
+ TRACE_EVENT1("webrtc", "VCMGenericEncoder::Encode", "timestamp",
+ out_frame.timestamp());
+
+ frame_encode_metadata_writer_.OnEncodeStarted(out_frame);
+
+ const int32_t encode_status = encoder_->Encode(out_frame, &next_frame_types_);
+ was_encode_called_since_last_initialization_ = true;
+
+ if (encode_status < 0) {
+ RTC_LOG(LS_ERROR) << "Encoder failed, failing encoder format: "
+ << encoder_config_.video_format.ToString();
+ RequestEncoderSwitch();
+ return;
+ }
+
+ for (auto& it : next_frame_types_) {
+ it = VideoFrameType::kVideoFrameDelta;
+ }
+}
+
+void VideoStreamEncoder::RequestRefreshFrame() {
+ worker_queue_->PostTask(SafeTask(task_safety_.flag(), [this] {
+ RTC_DCHECK_RUN_ON(worker_queue_);
+ video_source_sink_controller_.RequestRefreshFrame();
+ }));
+}
+
+void VideoStreamEncoder::SendKeyFrame(
+ const std::vector<VideoFrameType>& layers) {
+ if (!encoder_queue_.IsCurrent()) {
+ encoder_queue_.PostTask([this, layers] { SendKeyFrame(layers); });
+ return;
+ }
+ RTC_DCHECK_RUN_ON(&encoder_queue_);
+ TRACE_EVENT0("webrtc", "OnKeyFrameRequest");
+ RTC_DCHECK(!next_frame_types_.empty());
+
+ if (frame_cadence_adapter_)
+ frame_cadence_adapter_->ProcessKeyFrameRequest();
+
+ if (!encoder_) {
+ RTC_DLOG(LS_INFO) << __func__ << " no encoder.";
+ return; // Shutting down, or not configured yet.
+ }
+
+ if (!layers.empty()) {
+ RTC_DCHECK_EQ(layers.size(), next_frame_types_.size());
+ for (size_t i = 0; i < layers.size() && i < next_frame_types_.size(); i++) {
+ next_frame_types_[i] = layers[i];
+ }
+ } else {
+ std::fill(next_frame_types_.begin(), next_frame_types_.end(),
+ VideoFrameType::kVideoFrameKey);
+ }
+}
+
+void VideoStreamEncoder::OnLossNotification(
+ const VideoEncoder::LossNotification& loss_notification) {
+ if (!encoder_queue_.IsCurrent()) {
+ encoder_queue_.PostTask(
+ [this, loss_notification] { OnLossNotification(loss_notification); });
+ return;
+ }
+
+ RTC_DCHECK_RUN_ON(&encoder_queue_);
+ if (encoder_) {
+ encoder_->OnLossNotification(loss_notification);
+ }
+}
+
+EncodedImage VideoStreamEncoder::AugmentEncodedImage(
+ const EncodedImage& encoded_image,
+ const CodecSpecificInfo* codec_specific_info) {
+ EncodedImage image_copy(encoded_image);
+ const size_t spatial_idx = encoded_image.SpatialIndex().value_or(0);
+ frame_encode_metadata_writer_.FillTimingInfo(spatial_idx, &image_copy);
+ frame_encode_metadata_writer_.UpdateBitstream(codec_specific_info,
+ &image_copy);
+ VideoCodecType codec_type = codec_specific_info
+ ? codec_specific_info->codecType
+ : VideoCodecType::kVideoCodecGeneric;
+ if (image_copy.qp_ < 0 && qp_parsing_allowed_) {
+ // Parse encoded frame QP if that was not provided by encoder.
+ image_copy.qp_ = qp_parser_
+ .Parse(codec_type, spatial_idx, image_copy.data(),
+ image_copy.size())
+ .value_or(-1);
+ }
+ RTC_LOG(LS_VERBOSE) << __func__ << " spatial_idx " << spatial_idx << " qp "
+ << image_copy.qp_;
+ image_copy.SetAtTargetQuality(codec_type == kVideoCodecVP8 &&
+ image_copy.qp_ <= kVp8SteadyStateQpThreshold);
+
+ // Piggyback ALR experiment group id and simulcast id into the content type.
+ const uint8_t experiment_id =
+ experiment_groups_[videocontenttypehelpers::IsScreenshare(
+ image_copy.content_type_)];
+
+ // TODO(ilnik): This will force content type extension to be present even
+ // for realtime video. At the expense of miniscule overhead we will get
+ // sliced receive statistics.
+ RTC_CHECK(videocontenttypehelpers::SetExperimentId(&image_copy.content_type_,
+ experiment_id));
+ // We count simulcast streams from 1 on the wire. That's why we set simulcast
+ // id in content type to +1 of that is actual simulcast index. This is because
+ // value 0 on the wire is reserved for 'no simulcast stream specified'.
+ RTC_CHECK(videocontenttypehelpers::SetSimulcastId(
+ &image_copy.content_type_, static_cast<uint8_t>(spatial_idx + 1)));
+
+ return image_copy;
+}
+
+EncodedImageCallback::Result VideoStreamEncoder::OnEncodedImage(
+ const EncodedImage& encoded_image,
+ const CodecSpecificInfo* codec_specific_info) {
+ TRACE_EVENT_INSTANT1("webrtc", "VCMEncodedFrameCallback::Encoded",
+ "timestamp", encoded_image.Timestamp());
+
+ // TODO(bugs.webrtc.org/10520): Signal the simulcast id explicitly.
+
+ const size_t spatial_idx = encoded_image.SpatialIndex().value_or(0);
+ const VideoCodecType codec_type = codec_specific_info
+ ? codec_specific_info->codecType
+ : VideoCodecType::kVideoCodecGeneric;
+ EncodedImage image_copy =
+ AugmentEncodedImage(encoded_image, codec_specific_info);
+
+ // Post a task because `send_codec_` requires `encoder_queue_` lock and we
+ // need to update on quality convergence.
+ unsigned int image_width = image_copy._encodedWidth;
+ unsigned int image_height = image_copy._encodedHeight;
+ encoder_queue_.PostTask([this, codec_type, image_width, image_height,
+ spatial_idx,
+ at_target_quality = image_copy.IsAtTargetQuality()] {
+ RTC_DCHECK_RUN_ON(&encoder_queue_);
+
+ // Let the frame cadence adapter know about quality convergence.
+ if (frame_cadence_adapter_)
+ frame_cadence_adapter_->UpdateLayerQualityConvergence(spatial_idx,
+ at_target_quality);
+
+ // Currently, the internal quality scaler is used for VP9 instead of the
+ // webrtc qp scaler (in the no-svc case or if only a single spatial layer is
+ // encoded). It has to be explicitly detected and reported to adaptation
+ // metrics.
+ if (codec_type == VideoCodecType::kVideoCodecVP9 &&
+ send_codec_.VP9()->automaticResizeOn) {
+ unsigned int expected_width = send_codec_.width;
+ unsigned int expected_height = send_codec_.height;
+ int num_active_layers = 0;
+ for (int i = 0; i < send_codec_.VP9()->numberOfSpatialLayers; ++i) {
+ if (send_codec_.spatialLayers[i].active) {
+ ++num_active_layers;
+ expected_width = send_codec_.spatialLayers[i].width;
+ expected_height = send_codec_.spatialLayers[i].height;
+ }
+ }
+ RTC_DCHECK_LE(num_active_layers, 1)
+ << "VP9 quality scaling is enabled for "
+ "SVC with several active layers.";
+ encoder_stats_observer_->OnEncoderInternalScalerUpdate(
+ image_width < expected_width || image_height < expected_height);
+ }
+ });
+
+ // Encoded is called on whatever thread the real encoder implementation run
+ // on. In the case of hardware encoders, there might be several encoders
+ // running in parallel on different threads.
+ encoder_stats_observer_->OnSendEncodedImage(image_copy, codec_specific_info);
+
+ EncodedImageCallback::Result result =
+ sink_->OnEncodedImage(image_copy, codec_specific_info);
+
+ // We are only interested in propagating the meta-data about the image, not
+ // encoded data itself, to the post encode function. Since we cannot be sure
+ // the pointer will still be valid when run on the task queue, set it to null.
+ DataSize frame_size = DataSize::Bytes(image_copy.size());
+ image_copy.ClearEncodedData();
+
+ int temporal_index = 0;
+ if (codec_specific_info) {
+ if (codec_specific_info->codecType == kVideoCodecVP9) {
+ temporal_index = codec_specific_info->codecSpecific.VP9.temporal_idx;
+ } else if (codec_specific_info->codecType == kVideoCodecVP8) {
+ temporal_index = codec_specific_info->codecSpecific.VP8.temporalIdx;
+ }
+ }
+ if (temporal_index == kNoTemporalIdx) {
+ temporal_index = 0;
+ }
+
+ RunPostEncode(image_copy, clock_->CurrentTime().us(), temporal_index,
+ frame_size);
+
+ if (result.error == Result::OK) {
+ // In case of an internal encoder running on a separate thread, the
+ // decision to drop a frame might be a frame late and signaled via
+ // atomic flag. This is because we can't easily wait for the worker thread
+ // without risking deadlocks, eg during shutdown when the worker thread
+ // might be waiting for the internal encoder threads to stop.
+ if (pending_frame_drops_.load() > 0) {
+ int pending_drops = pending_frame_drops_.fetch_sub(1);
+ RTC_DCHECK_GT(pending_drops, 0);
+ result.drop_next_frame = true;
+ }
+ }
+
+ return result;
+}
+
+void VideoStreamEncoder::OnDroppedFrame(DropReason reason) {
+ switch (reason) {
+ case DropReason::kDroppedByMediaOptimizations:
+ encoder_stats_observer_->OnFrameDropped(
+ VideoStreamEncoderObserver::DropReason::kMediaOptimization);
+ break;
+ case DropReason::kDroppedByEncoder:
+ encoder_stats_observer_->OnFrameDropped(
+ VideoStreamEncoderObserver::DropReason::kEncoder);
+ break;
+ }
+ sink_->OnDroppedFrame(reason);
+ encoder_queue_.PostTask([this, reason] {
+ RTC_DCHECK_RUN_ON(&encoder_queue_);
+ stream_resource_manager_.OnFrameDropped(reason);
+ });
+}
+
+DataRate VideoStreamEncoder::UpdateTargetBitrate(DataRate target_bitrate,
+ double cwnd_reduce_ratio) {
+ RTC_DCHECK_RUN_ON(&encoder_queue_);
+ DataRate updated_target_bitrate = target_bitrate;
+
+ // Drop frames when congestion window pushback ratio is larger than 1
+ // percent and target bitrate is larger than codec min bitrate.
+ // When target_bitrate is 0 means codec is paused, skip frame dropping.
+ if (cwnd_reduce_ratio > 0.01 && target_bitrate.bps() > 0 &&
+ target_bitrate.bps() > send_codec_.minBitrate * 1000) {
+ int reduce_bitrate_bps = std::min(
+ static_cast<int>(target_bitrate.bps() * cwnd_reduce_ratio),
+ static_cast<int>(target_bitrate.bps() - send_codec_.minBitrate * 1000));
+ if (reduce_bitrate_bps > 0) {
+ // At maximum the congestion window can drop 1/2 frames.
+ cwnd_frame_drop_interval_ = std::max(
+ 2, static_cast<int>(target_bitrate.bps() / reduce_bitrate_bps));
+ // Reduce target bitrate accordingly.
+ updated_target_bitrate =
+ target_bitrate - (target_bitrate / cwnd_frame_drop_interval_.value());
+ return updated_target_bitrate;
+ }
+ }
+ cwnd_frame_drop_interval_.reset();
+ return updated_target_bitrate;
+}
+
+void VideoStreamEncoder::OnBitrateUpdated(DataRate target_bitrate,
+ DataRate stable_target_bitrate,
+ DataRate link_allocation,
+ uint8_t fraction_lost,
+ int64_t round_trip_time_ms,
+ double cwnd_reduce_ratio) {
+ RTC_DCHECK_GE(link_allocation, target_bitrate);
+ if (!encoder_queue_.IsCurrent()) {
+ encoder_queue_.PostTask([this, target_bitrate, stable_target_bitrate,
+ link_allocation, fraction_lost, round_trip_time_ms,
+ cwnd_reduce_ratio] {
+ DataRate updated_target_bitrate =
+ UpdateTargetBitrate(target_bitrate, cwnd_reduce_ratio);
+ OnBitrateUpdated(updated_target_bitrate, stable_target_bitrate,
+ link_allocation, fraction_lost, round_trip_time_ms,
+ cwnd_reduce_ratio);
+ });
+ return;
+ }
+ RTC_DCHECK_RUN_ON(&encoder_queue_);
+
+ const bool video_is_suspended = target_bitrate == DataRate::Zero();
+ const bool video_suspension_changed = video_is_suspended != EncoderPaused();
+
+ if (!video_is_suspended && settings_.encoder_switch_request_callback &&
+ encoder_selector_) {
+ if (auto encoder = encoder_selector_->OnAvailableBitrate(link_allocation)) {
+ settings_.encoder_switch_request_callback->RequestEncoderSwitch(
+ *encoder, /*allow_default_fallback=*/false);
+ }
+ }
+
+ RTC_DCHECK(sink_) << "sink_ must be set before the encoder is active.";
+
+ RTC_LOG(LS_VERBOSE) << "OnBitrateUpdated, bitrate " << target_bitrate.bps()
+ << " stable bitrate = " << stable_target_bitrate.bps()
+ << " link allocation bitrate = " << link_allocation.bps()
+ << " packet loss " << static_cast<int>(fraction_lost)
+ << " rtt " << round_trip_time_ms;
+
+ if (encoder_) {
+ encoder_->OnPacketLossRateUpdate(static_cast<float>(fraction_lost) / 256.f);
+ encoder_->OnRttUpdate(round_trip_time_ms);
+ }
+
+ uint32_t framerate_fps = GetInputFramerateFps();
+ frame_dropper_.SetRates((target_bitrate.bps() + 500) / 1000, framerate_fps);
+
+ EncoderRateSettings new_rate_settings{
+ VideoBitrateAllocation(), static_cast<double>(framerate_fps),
+ link_allocation, target_bitrate, stable_target_bitrate};
+ SetEncoderRates(UpdateBitrateAllocation(new_rate_settings));
+
+ if (target_bitrate.bps() != 0)
+ encoder_target_bitrate_bps_ = target_bitrate.bps();
+
+ stream_resource_manager_.SetTargetBitrate(target_bitrate);
+
+ if (video_suspension_changed) {
+ RTC_LOG(LS_INFO) << "Video suspend state changed to: "
+ << (video_is_suspended ? "suspended" : "not suspended");
+ encoder_stats_observer_->OnSuspendChange(video_is_suspended);
+
+ if (!video_is_suspended && pending_frame_ &&
+ !DropDueToSize(pending_frame_->size())) {
+ // A pending stored frame can be processed.
+ int64_t pending_time_us =
+ clock_->CurrentTime().us() - pending_frame_post_time_us_;
+ if (pending_time_us < kPendingFrameTimeoutMs * 1000)
+ EncodeVideoFrame(*pending_frame_, pending_frame_post_time_us_);
+ pending_frame_.reset();
+ } else if (!video_is_suspended && !pending_frame_ &&
+ encoder_paused_and_dropped_frame_) {
+ // A frame was enqueued during pause-state, but since it was a native
+ // frame we could not store it in `pending_frame_` so request a
+ // refresh-frame instead.
+ RequestRefreshFrame();
+ }
+ }
+}
+
+bool VideoStreamEncoder::DropDueToSize(uint32_t pixel_count) const {
+ if (!encoder_ || !stream_resource_manager_.DropInitialFrames() ||
+ !encoder_target_bitrate_bps_.has_value()) {
+ return false;
+ }
+
+ bool simulcast_or_svc =
+ (send_codec_.codecType == VideoCodecType::kVideoCodecVP9 &&
+ send_codec_.VP9().numberOfSpatialLayers > 1) ||
+ (send_codec_.numberOfSimulcastStreams > 1 ||
+ encoder_config_.simulcast_layers.size() > 1);
+
+ if (simulcast_or_svc) {
+ if (stream_resource_manager_.SingleActiveStreamPixels()) {
+ pixel_count = stream_resource_manager_.SingleActiveStreamPixels().value();
+ } else {
+ return false;
+ }
+ }
+
+ uint32_t bitrate_bps =
+ stream_resource_manager_.UseBandwidthAllocationBps().value_or(
+ encoder_target_bitrate_bps_.value());
+
+ absl::optional<VideoEncoder::ResolutionBitrateLimits> encoder_bitrate_limits =
+ GetEncoderInfoWithBitrateLimitUpdate(
+ encoder_->GetEncoderInfo(), encoder_config_, default_limits_allowed_)
+ .GetEncoderBitrateLimitsForResolution(pixel_count);
+
+ if (encoder_bitrate_limits.has_value()) {
+ // Use bitrate limits provided by encoder.
+ return bitrate_bps <
+ static_cast<uint32_t>(encoder_bitrate_limits->min_start_bitrate_bps);
+ }
+
+ if (bitrate_bps < 300000 /* qvga */) {
+ return pixel_count > 320 * 240;
+ } else if (bitrate_bps < 500000 /* vga */) {
+ return pixel_count > 640 * 480;
+ }
+ return false;
+}
+
+void VideoStreamEncoder::OnVideoSourceRestrictionsUpdated(
+ VideoSourceRestrictions restrictions,
+ const VideoAdaptationCounters& adaptation_counters,
+ rtc::scoped_refptr<Resource> reason,
+ const VideoSourceRestrictions& unfiltered_restrictions) {
+ RTC_DCHECK_RUN_ON(&encoder_queue_);
+ RTC_LOG(LS_INFO) << "Updating sink restrictions from "
+ << (reason ? reason->Name() : std::string("<null>"))
+ << " to " << restrictions.ToString();
+
+ // TODO(webrtc:14451) Split video_source_sink_controller_
+ // so that ownership on restrictions/wants is kept on &encoder_queue_
+ latest_restrictions_ = restrictions;
+
+ worker_queue_->PostTask(SafeTask(
+ task_safety_.flag(), [this, restrictions = std::move(restrictions)]() {
+ RTC_DCHECK_RUN_ON(worker_queue_);
+ video_source_sink_controller_.SetRestrictions(std::move(restrictions));
+ video_source_sink_controller_.PushSourceSinkSettings();
+ }));
+}
+
+void VideoStreamEncoder::RunPostEncode(const EncodedImage& encoded_image,
+ int64_t time_sent_us,
+ int temporal_index,
+ DataSize frame_size) {
+ if (!encoder_queue_.IsCurrent()) {
+ encoder_queue_.PostTask([this, encoded_image, time_sent_us, temporal_index,
+ frame_size] {
+ RunPostEncode(encoded_image, time_sent_us, temporal_index, frame_size);
+ });
+ return;
+ }
+
+ RTC_DCHECK_RUN_ON(&encoder_queue_);
+
+ absl::optional<int> encode_duration_us;
+ if (encoded_image.timing_.flags != VideoSendTiming::kInvalid) {
+ encode_duration_us =
+ TimeDelta::Millis(encoded_image.timing_.encode_finish_ms -
+ encoded_image.timing_.encode_start_ms)
+ .us();
+ }
+
+ // Run post encode tasks, such as overuse detection and frame rate/drop
+ // stats for internal encoders.
+ const bool keyframe =
+ encoded_image._frameType == VideoFrameType::kVideoFrameKey;
+
+ if (!frame_size.IsZero()) {
+ frame_dropper_.Fill(frame_size.bytes(), !keyframe);
+ }
+
+ stream_resource_manager_.OnEncodeCompleted(encoded_image, time_sent_us,
+ encode_duration_us, frame_size);
+ if (bitrate_adjuster_) {
+ bitrate_adjuster_->OnEncodedFrame(
+ frame_size, encoded_image.SpatialIndex().value_or(0), temporal_index);
+ }
+}
+
+void VideoStreamEncoder::ReleaseEncoder() {
+ if (!encoder_ || !encoder_initialized_) {
+ return;
+ }
+ encoder_->Release();
+ encoder_initialized_ = false;
+ TRACE_EVENT0("webrtc", "VCMGenericEncoder::Release");
+}
+
+VideoStreamEncoder::AutomaticAnimationDetectionExperiment
+VideoStreamEncoder::ParseAutomatincAnimationDetectionFieldTrial() const {
+ AutomaticAnimationDetectionExperiment result;
+
+ result.Parser()->Parse(
+ field_trials_.Lookup("WebRTC-AutomaticAnimationDetectionScreenshare"));
+
+ if (!result.enabled) {
+ RTC_LOG(LS_INFO) << "Automatic animation detection experiment is disabled.";
+ return result;
+ }
+
+ RTC_LOG(LS_INFO) << "Automatic animation detection experiment settings:"
+ " min_duration_ms="
+ << result.min_duration_ms
+ << " min_area_ration=" << result.min_area_ratio
+ << " min_fps=" << result.min_fps;
+
+ return result;
+}
+
+void VideoStreamEncoder::CheckForAnimatedContent(
+ const VideoFrame& frame,
+ int64_t time_when_posted_in_us) {
+ if (!automatic_animation_detection_experiment_.enabled ||
+ encoder_config_.content_type !=
+ VideoEncoderConfig::ContentType::kScreen ||
+ stream_resource_manager_.degradation_preference() !=
+ DegradationPreference::BALANCED) {
+ return;
+ }
+
+ if (expect_resize_state_ == ExpectResizeState::kResize && last_frame_info_ &&
+ last_frame_info_->width != frame.width() &&
+ last_frame_info_->height != frame.height()) {
+ // On applying resolution cap there will be one frame with no/different
+ // update, which should be skipped.
+ // It can be delayed by several frames.
+ expect_resize_state_ = ExpectResizeState::kFirstFrameAfterResize;
+ return;
+ }
+
+ if (expect_resize_state_ == ExpectResizeState::kFirstFrameAfterResize) {
+ // The first frame after resize should have new, scaled update_rect.
+ if (frame.has_update_rect()) {
+ last_update_rect_ = frame.update_rect();
+ } else {
+ last_update_rect_ = absl::nullopt;
+ }
+ expect_resize_state_ = ExpectResizeState::kNoResize;
+ }
+
+ bool should_cap_resolution = false;
+ if (!frame.has_update_rect()) {
+ last_update_rect_ = absl::nullopt;
+ animation_start_time_ = Timestamp::PlusInfinity();
+ } else if ((!last_update_rect_ ||
+ frame.update_rect() != *last_update_rect_)) {
+ last_update_rect_ = frame.update_rect();
+ animation_start_time_ = Timestamp::Micros(time_when_posted_in_us);
+ } else {
+ TimeDelta animation_duration =
+ Timestamp::Micros(time_when_posted_in_us) - animation_start_time_;
+ float area_ratio = static_cast<float>(last_update_rect_->width *
+ last_update_rect_->height) /
+ (frame.width() * frame.height());
+ if (animation_duration.ms() >=
+ automatic_animation_detection_experiment_.min_duration_ms &&
+ area_ratio >=
+ automatic_animation_detection_experiment_.min_area_ratio &&
+ encoder_stats_observer_->GetInputFrameRate() >=
+ automatic_animation_detection_experiment_.min_fps) {
+ should_cap_resolution = true;
+ }
+ }
+ if (cap_resolution_due_to_video_content_ != should_cap_resolution) {
+ expect_resize_state_ = should_cap_resolution ? ExpectResizeState::kResize
+ : ExpectResizeState::kNoResize;
+ cap_resolution_due_to_video_content_ = should_cap_resolution;
+ if (should_cap_resolution) {
+ RTC_LOG(LS_INFO) << "Applying resolution cap due to animation detection.";
+ } else {
+ RTC_LOG(LS_INFO) << "Removing resolution cap due to no consistent "
+ "animation detection.";
+ }
+ // TODO(webrtc:14451) Split video_source_sink_controller_
+ // so that ownership on restrictions/wants is kept on &encoder_queue_
+ if (should_cap_resolution) {
+ animate_restrictions_ =
+ VideoSourceRestrictions(kMaxAnimationPixels,
+ /* target_pixels_per_frame= */ absl::nullopt,
+ /* max_frame_rate= */ absl::nullopt);
+ } else {
+ animate_restrictions_.reset();
+ }
+
+ worker_queue_->PostTask(
+ SafeTask(task_safety_.flag(), [this, should_cap_resolution]() {
+ RTC_DCHECK_RUN_ON(worker_queue_);
+ video_source_sink_controller_.SetPixelsPerFrameUpperLimit(
+ should_cap_resolution
+ ? absl::optional<size_t>(kMaxAnimationPixels)
+ : absl::nullopt);
+ video_source_sink_controller_.PushSourceSinkSettings();
+ }));
+ }
+}
+
+void VideoStreamEncoder::InjectAdaptationResource(
+ rtc::scoped_refptr<Resource> resource,
+ VideoAdaptationReason reason) {
+ encoder_queue_.PostTask([this, resource = std::move(resource), reason] {
+ RTC_DCHECK_RUN_ON(&encoder_queue_);
+ additional_resources_.push_back(resource);
+ stream_resource_manager_.AddResource(resource, reason);
+ });
+}
+
+void VideoStreamEncoder::InjectAdaptationConstraint(
+ AdaptationConstraint* adaptation_constraint) {
+ rtc::Event event;
+ encoder_queue_.PostTask([this, adaptation_constraint, &event] {
+ RTC_DCHECK_RUN_ON(&encoder_queue_);
+ if (!resource_adaptation_processor_) {
+ // The VideoStreamEncoder was stopped and the processor destroyed before
+ // this task had a chance to execute. No action needed.
+ return;
+ }
+ adaptation_constraints_.push_back(adaptation_constraint);
+ video_stream_adapter_->AddAdaptationConstraint(adaptation_constraint);
+ event.Set();
+ });
+ event.Wait(rtc::Event::kForever);
+}
+
+void VideoStreamEncoder::AddRestrictionsListenerForTesting(
+ VideoSourceRestrictionsListener* restrictions_listener) {
+ rtc::Event event;
+ encoder_queue_.PostTask([this, restrictions_listener, &event] {
+ RTC_DCHECK_RUN_ON(&encoder_queue_);
+ RTC_DCHECK(resource_adaptation_processor_);
+ video_stream_adapter_->AddRestrictionsListener(restrictions_listener);
+ event.Set();
+ });
+ event.Wait(rtc::Event::kForever);
+}
+
+void VideoStreamEncoder::RemoveRestrictionsListenerForTesting(
+ VideoSourceRestrictionsListener* restrictions_listener) {
+ rtc::Event event;
+ encoder_queue_.PostTask([this, restrictions_listener, &event] {
+ RTC_DCHECK_RUN_ON(&encoder_queue_);
+ RTC_DCHECK(resource_adaptation_processor_);
+ video_stream_adapter_->RemoveRestrictionsListener(restrictions_listener);
+ event.Set();
+ });
+ event.Wait(rtc::Event::kForever);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/video/video_stream_encoder.h b/third_party/libwebrtc/video/video_stream_encoder.h
new file mode 100644
index 0000000000..ccff3ffefd
--- /dev/null
+++ b/third_party/libwebrtc/video/video_stream_encoder.h
@@ -0,0 +1,494 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VIDEO_VIDEO_STREAM_ENCODER_H_
+#define VIDEO_VIDEO_STREAM_ENCODER_H_
+
+#include <atomic>
+#include <map>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "absl/container/inlined_vector.h"
+#include "api/adaptation/resource.h"
+#include "api/field_trials_view.h"
+#include "api/rtp_sender_interface.h"
+#include "api/sequence_checker.h"
+#include "api/task_queue/pending_task_safety_flag.h"
+#include "api/units/data_rate.h"
+#include "api/video/encoded_image.h"
+#include "api/video/video_bitrate_allocator.h"
+#include "api/video/video_rotation.h"
+#include "api/video/video_sink_interface.h"
+#include "api/video/video_stream_encoder_settings.h"
+#include "api/video_codecs/video_codec.h"
+#include "api/video_codecs/video_encoder.h"
+#include "call/adaptation/adaptation_constraint.h"
+#include "call/adaptation/resource_adaptation_processor.h"
+#include "call/adaptation/resource_adaptation_processor_interface.h"
+#include "call/adaptation/video_source_restrictions.h"
+#include "call/adaptation/video_stream_input_state_provider.h"
+#include "modules/video_coding/utility/frame_dropper.h"
+#include "modules/video_coding/utility/qp_parser.h"
+#include "rtc_base/experiments/rate_control_settings.h"
+#include "rtc_base/numerics/exp_filter.h"
+#include "rtc_base/race_checker.h"
+#include "rtc_base/rate_statistics.h"
+#include "rtc_base/task_queue.h"
+#include "rtc_base/thread_annotations.h"
+#include "system_wrappers/include/clock.h"
+#include "video/adaptation/video_stream_encoder_resource_manager.h"
+#include "video/encoder_bitrate_adjuster.h"
+#include "video/frame_cadence_adapter.h"
+#include "video/frame_encode_metadata_writer.h"
+#include "video/video_source_sink_controller.h"
+#include "video/video_stream_encoder_interface.h"
+#include "video/video_stream_encoder_observer.h"
+
+namespace webrtc {
+
+// VideoStreamEncoder represent a video encoder that accepts raw video frames as
+// input and produces an encoded bit stream.
+// Usage:
+// Instantiate.
+// Call SetSink.
+// Call SetSource.
+// Call ConfigureEncoder with the codec settings.
+// Call Stop() when done.
+class VideoStreamEncoder : public VideoStreamEncoderInterface,
+ private EncodedImageCallback,
+ public VideoSourceRestrictionsListener {
+ public:
+ // TODO(bugs.webrtc.org/12000): Reporting of VideoBitrateAllocation is being
+ // deprecated. Instead VideoLayersAllocation should be reported.
+ enum class BitrateAllocationCallbackType {
+ kVideoBitrateAllocation,
+ kVideoBitrateAllocationWhenScreenSharing,
+ kVideoLayersAllocation
+ };
+ VideoStreamEncoder(
+ Clock* clock,
+ uint32_t number_of_cores,
+ VideoStreamEncoderObserver* encoder_stats_observer,
+ const VideoStreamEncoderSettings& settings,
+ std::unique_ptr<OveruseFrameDetector> overuse_detector,
+ std::unique_ptr<FrameCadenceAdapterInterface> frame_cadence_adapter,
+ std::unique_ptr<webrtc::TaskQueueBase, webrtc::TaskQueueDeleter>
+ encoder_queue,
+ BitrateAllocationCallbackType allocation_cb_type,
+ const FieldTrialsView& field_trials,
+ webrtc::VideoEncoderFactory::EncoderSelectorInterface* encoder_selector =
+ nullptr);
+ ~VideoStreamEncoder() override;
+
+ VideoStreamEncoder(const VideoStreamEncoder&) = delete;
+ VideoStreamEncoder& operator=(const VideoStreamEncoder&) = delete;
+
+ void AddAdaptationResource(rtc::scoped_refptr<Resource> resource) override;
+ std::vector<rtc::scoped_refptr<Resource>> GetAdaptationResources() override;
+
+ void SetSource(rtc::VideoSourceInterface<VideoFrame>* source,
+ const DegradationPreference& degradation_preference) override;
+
+ void SetSink(EncoderSink* sink, bool rotation_applied) override;
+
+ // TODO(perkj): Can we remove VideoCodec.startBitrate ?
+ void SetStartBitrate(int start_bitrate_bps) override;
+
+ void SetFecControllerOverride(
+ FecControllerOverride* fec_controller_override) override;
+
+ void ConfigureEncoder(VideoEncoderConfig config,
+ size_t max_data_payload_length) override;
+ void ConfigureEncoder(VideoEncoderConfig config,
+ size_t max_data_payload_length,
+ SetParametersCallback callback) override;
+
+ // Permanently stop encoding. After this method has returned, it is
+ // guaranteed that no encoded frames will be delivered to the sink.
+ void Stop() override;
+
+ void SendKeyFrame(const std::vector<VideoFrameType>& layers = {}) override;
+
+ void OnLossNotification(
+ const VideoEncoder::LossNotification& loss_notification) override;
+
+ void OnBitrateUpdated(DataRate target_bitrate,
+ DataRate stable_target_bitrate,
+ DataRate target_headroom,
+ uint8_t fraction_lost,
+ int64_t round_trip_time_ms,
+ double cwnd_reduce_ratio) override;
+
+ DataRate UpdateTargetBitrate(DataRate target_bitrate,
+ double cwnd_reduce_ratio);
+
+ protected:
+ // Used for testing. For example the `ScalingObserverInterface` methods must
+ // be called on `encoder_queue_`.
+ TaskQueueBase* encoder_queue() { return encoder_queue_.Get(); }
+
+ void OnVideoSourceRestrictionsUpdated(
+ VideoSourceRestrictions restrictions,
+ const VideoAdaptationCounters& adaptation_counters,
+ rtc::scoped_refptr<Resource> reason,
+ const VideoSourceRestrictions& unfiltered_restrictions) override;
+
+ // Used for injected test resources.
+ // TODO(eshr): Move all adaptation tests out of VideoStreamEncoder tests.
+ void InjectAdaptationResource(rtc::scoped_refptr<Resource> resource,
+ VideoAdaptationReason reason);
+ void InjectAdaptationConstraint(AdaptationConstraint* adaptation_constraint);
+
+ void AddRestrictionsListenerForTesting(
+ VideoSourceRestrictionsListener* restrictions_listener);
+ void RemoveRestrictionsListenerForTesting(
+ VideoSourceRestrictionsListener* restrictions_listener);
+
+ private:
+ class CadenceCallback : public FrameCadenceAdapterInterface::Callback {
+ public:
+ explicit CadenceCallback(VideoStreamEncoder& video_stream_encoder)
+ : video_stream_encoder_(video_stream_encoder) {}
+ // FrameCadenceAdapterInterface::Callback overrides.
+ void OnFrame(Timestamp post_time,
+ int frames_scheduled_for_processing,
+ const VideoFrame& frame) override {
+ video_stream_encoder_.OnFrame(post_time, frames_scheduled_for_processing,
+ frame);
+ }
+ void OnDiscardedFrame() override {
+ video_stream_encoder_.OnDiscardedFrame();
+ }
+ void RequestRefreshFrame() override {
+ video_stream_encoder_.RequestRefreshFrame();
+ }
+
+ private:
+ VideoStreamEncoder& video_stream_encoder_;
+ };
+
+ class VideoFrameInfo {
+ public:
+ VideoFrameInfo(int width, int height, bool is_texture)
+ : width(width), height(height), is_texture(is_texture) {}
+ int width;
+ int height;
+ bool is_texture;
+ int pixel_count() const { return width * height; }
+ };
+
+ struct EncoderRateSettings {
+ EncoderRateSettings();
+ EncoderRateSettings(const VideoBitrateAllocation& bitrate,
+ double framerate_fps,
+ DataRate bandwidth_allocation,
+ DataRate encoder_target,
+ DataRate stable_encoder_target);
+ bool operator==(const EncoderRateSettings& rhs) const;
+ bool operator!=(const EncoderRateSettings& rhs) const;
+
+ VideoEncoder::RateControlParameters rate_control;
+ // This is the scalar target bitrate before the VideoBitrateAllocator, i.e.
+ // the `target_bitrate` argument of the OnBitrateUpdated() method. This is
+ // needed because the bitrate allocator may truncate the total bitrate and a
+ // later call to the same allocator instance, e.g.
+ // |using last_encoder_rate_setings_->bitrate.get_sum_bps()|, may trick it
+ // into thinking the available bitrate has decreased since the last call.
+ DataRate encoder_target;
+ DataRate stable_encoder_target;
+ };
+
+ class DegradationPreferenceManager;
+
+ void ReconfigureEncoder() RTC_RUN_ON(&encoder_queue_);
+ void OnEncoderSettingsChanged() RTC_RUN_ON(&encoder_queue_);
+ void OnFrame(Timestamp post_time,
+ int frames_scheduled_for_processing,
+ const VideoFrame& video_frame);
+ void OnDiscardedFrame();
+ void RequestRefreshFrame();
+
+ void MaybeEncodeVideoFrame(const VideoFrame& frame,
+ int64_t time_when_posted_in_ms);
+
+ void EncodeVideoFrame(const VideoFrame& frame,
+ int64_t time_when_posted_in_ms);
+ // Indicates whether frame should be dropped because the pixel count is too
+ // large for the current bitrate configuration.
+ bool DropDueToSize(uint32_t pixel_count) const RTC_RUN_ON(&encoder_queue_);
+
+ // Implements EncodedImageCallback.
+ EncodedImageCallback::Result OnEncodedImage(
+ const EncodedImage& encoded_image,
+ const CodecSpecificInfo* codec_specific_info) override;
+
+ void OnDroppedFrame(EncodedImageCallback::DropReason reason) override;
+
+ bool EncoderPaused() const;
+ void TraceFrameDropStart();
+ void TraceFrameDropEnd();
+
+ // Returns a copy of `rate_settings` with the `bitrate` field updated using
+ // the current VideoBitrateAllocator.
+ EncoderRateSettings UpdateBitrateAllocation(
+ const EncoderRateSettings& rate_settings) RTC_RUN_ON(&encoder_queue_);
+
+ uint32_t GetInputFramerateFps() RTC_RUN_ON(&encoder_queue_);
+ void SetEncoderRates(const EncoderRateSettings& rate_settings)
+ RTC_RUN_ON(&encoder_queue_);
+
+ void RunPostEncode(const EncodedImage& encoded_image,
+ int64_t time_sent_us,
+ int temporal_index,
+ DataSize frame_size);
+ void ReleaseEncoder() RTC_RUN_ON(&encoder_queue_);
+ // After calling this function `resource_adaptation_processor_` will be null.
+ void ShutdownResourceAdaptationQueue();
+
+ void CheckForAnimatedContent(const VideoFrame& frame,
+ int64_t time_when_posted_in_ms)
+ RTC_RUN_ON(&encoder_queue_);
+
+ void RequestEncoderSwitch() RTC_RUN_ON(&encoder_queue_);
+
+ // Augments an EncodedImage received from an encoder with parsable
+ // information.
+ EncodedImage AugmentEncodedImage(
+ const EncodedImage& encoded_image,
+ const CodecSpecificInfo* codec_specific_info);
+
+ const FieldTrialsView& field_trials_;
+ TaskQueueBase* const worker_queue_;
+
+ const int number_of_cores_;
+
+ EncoderSink* sink_;
+ const VideoStreamEncoderSettings settings_;
+ const BitrateAllocationCallbackType allocation_cb_type_;
+ const RateControlSettings rate_control_settings_;
+
+ webrtc::VideoEncoderFactory::EncoderSelectorInterface* const
+ encoder_selector_from_constructor_;
+ std::unique_ptr<VideoEncoderFactory::EncoderSelectorInterface> const
+ encoder_selector_from_factory_;
+ // Pointing to either encoder_selector_from_constructor_ or
+ // encoder_selector_from_factory_ but can be nullptr.
+ VideoEncoderFactory::EncoderSelectorInterface* const encoder_selector_;
+
+ VideoStreamEncoderObserver* const encoder_stats_observer_;
+ // Adapter that avoids public inheritance of the cadence adapter's callback
+ // interface.
+ CadenceCallback cadence_callback_;
+ // Frame cadence encoder adapter. Frames enter this adapter first, and it then
+ // forwards them to our OnFrame method.
+ std::unique_ptr<FrameCadenceAdapterInterface> frame_cadence_adapter_
+ RTC_GUARDED_BY(&encoder_queue_) RTC_PT_GUARDED_BY(&encoder_queue_);
+
+ VideoEncoderConfig encoder_config_ RTC_GUARDED_BY(&encoder_queue_);
+ std::unique_ptr<VideoEncoder> encoder_ RTC_GUARDED_BY(&encoder_queue_)
+ RTC_PT_GUARDED_BY(&encoder_queue_);
+ bool encoder_initialized_;
+ std::unique_ptr<VideoBitrateAllocator> rate_allocator_
+ RTC_GUARDED_BY(&encoder_queue_) RTC_PT_GUARDED_BY(&encoder_queue_);
+ int max_framerate_ RTC_GUARDED_BY(&encoder_queue_);
+
+ // Set when ConfigureEncoder has been called in order to lazy reconfigure the
+ // encoder on the next frame.
+ bool pending_encoder_reconfiguration_ RTC_GUARDED_BY(&encoder_queue_);
+ // Set when configuration must create a new encoder object, e.g.,
+ // because of a codec change.
+ bool pending_encoder_creation_ RTC_GUARDED_BY(&encoder_queue_);
+ absl::InlinedVector<SetParametersCallback, 2> encoder_configuration_callbacks_
+ RTC_GUARDED_BY(&encoder_queue_);
+
+ absl::optional<VideoFrameInfo> last_frame_info_
+ RTC_GUARDED_BY(&encoder_queue_);
+ int crop_width_ RTC_GUARDED_BY(&encoder_queue_);
+ int crop_height_ RTC_GUARDED_BY(&encoder_queue_);
+ absl::optional<uint32_t> encoder_target_bitrate_bps_
+ RTC_GUARDED_BY(&encoder_queue_);
+ size_t max_data_payload_length_ RTC_GUARDED_BY(&encoder_queue_);
+ absl::optional<EncoderRateSettings> last_encoder_rate_settings_
+ RTC_GUARDED_BY(&encoder_queue_);
+ bool encoder_paused_and_dropped_frame_ RTC_GUARDED_BY(&encoder_queue_);
+
+ // Set to true if at least one frame was sent to encoder since last encoder
+ // initialization.
+ bool was_encode_called_since_last_initialization_
+ RTC_GUARDED_BY(&encoder_queue_);
+
+ bool encoder_failed_ RTC_GUARDED_BY(&encoder_queue_);
+ Clock* const clock_;
+
+ // Used to make sure incoming time stamp is increasing for every frame.
+ int64_t last_captured_timestamp_ RTC_GUARDED_BY(&encoder_queue_);
+ // Delta used for translating between NTP and internal timestamps.
+ const int64_t delta_ntp_internal_ms_ RTC_GUARDED_BY(&encoder_queue_);
+
+ int64_t last_frame_log_ms_ RTC_GUARDED_BY(&encoder_queue_);
+ int captured_frame_count_ RTC_GUARDED_BY(&encoder_queue_);
+ int dropped_frame_cwnd_pushback_count_ RTC_GUARDED_BY(&encoder_queue_);
+ int dropped_frame_encoder_block_count_ RTC_GUARDED_BY(&encoder_queue_);
+ absl::optional<VideoFrame> pending_frame_ RTC_GUARDED_BY(&encoder_queue_);
+ int64_t pending_frame_post_time_us_ RTC_GUARDED_BY(&encoder_queue_);
+
+ VideoFrame::UpdateRect accumulated_update_rect_
+ RTC_GUARDED_BY(&encoder_queue_);
+ bool accumulated_update_rect_is_valid_ RTC_GUARDED_BY(&encoder_queue_);
+
+ // Used for automatic content type detection.
+ absl::optional<VideoFrame::UpdateRect> last_update_rect_
+ RTC_GUARDED_BY(&encoder_queue_);
+ Timestamp animation_start_time_ RTC_GUARDED_BY(&encoder_queue_);
+ bool cap_resolution_due_to_video_content_ RTC_GUARDED_BY(&encoder_queue_);
+ // Used to correctly ignore changes in update_rect introduced by
+ // resize triggered by animation detection.
+ enum class ExpectResizeState {
+ kNoResize, // Normal operation.
+ kResize, // Resize was triggered by the animation detection.
+ kFirstFrameAfterResize // Resize observed.
+ } expect_resize_state_ RTC_GUARDED_BY(&encoder_queue_);
+
+ FecControllerOverride* fec_controller_override_
+ RTC_GUARDED_BY(&encoder_queue_);
+ absl::optional<int64_t> last_parameters_update_ms_
+ RTC_GUARDED_BY(&encoder_queue_);
+ absl::optional<int64_t> last_encode_info_ms_ RTC_GUARDED_BY(&encoder_queue_);
+
+ VideoEncoder::EncoderInfo encoder_info_ RTC_GUARDED_BY(&encoder_queue_);
+ VideoCodec send_codec_ RTC_GUARDED_BY(&encoder_queue_);
+
+ FrameDropper frame_dropper_ RTC_GUARDED_BY(&encoder_queue_);
+ // If frame dropper is not force disabled, frame dropping might still be
+ // disabled if VideoEncoder::GetEncoderInfo() indicates that the encoder has a
+ // trusted rate controller. This is determined on a per-frame basis, as the
+ // encoder behavior might dynamically change.
+ bool force_disable_frame_dropper_ RTC_GUARDED_BY(&encoder_queue_);
+ // Incremented on worker thread whenever `frame_dropper_` determines that a
+ // frame should be dropped. Decremented on whichever thread runs
+ // OnEncodedImage(), which is only called by one thread but not necessarily
+ // the worker thread.
+ std::atomic<int> pending_frame_drops_;
+
+ // Congestion window frame drop ratio (drop 1 in every
+ // cwnd_frame_drop_interval_ frames).
+ absl::optional<int> cwnd_frame_drop_interval_ RTC_GUARDED_BY(&encoder_queue_);
+ // Frame counter for congestion window frame drop.
+ int cwnd_frame_counter_ RTC_GUARDED_BY(&encoder_queue_);
+
+ std::unique_ptr<EncoderBitrateAdjuster> bitrate_adjuster_
+ RTC_GUARDED_BY(&encoder_queue_);
+
+ // TODO(sprang): Change actually support keyframe per simulcast stream, or
+ // turn this into a simple bool `pending_keyframe_request_`.
+ std::vector<VideoFrameType> next_frame_types_ RTC_GUARDED_BY(&encoder_queue_);
+
+ FrameEncodeMetadataWriter frame_encode_metadata_writer_;
+
+ // Experiment groups parsed from field trials for realtime video ([0]) and
+ // screenshare ([1]). 0 means no group specified. Positive values are
+ // experiment group numbers incremented by 1.
+ const std::array<uint8_t, 2> experiment_groups_;
+
+ struct AutomaticAnimationDetectionExperiment {
+ bool enabled = false;
+ int min_duration_ms = 2000;
+ double min_area_ratio = 0.8;
+ int min_fps = 10;
+ std::unique_ptr<StructParametersParser> Parser() {
+ return StructParametersParser::Create(
+ "enabled", &enabled, //
+ "min_duration_ms", &min_duration_ms, //
+ "min_area_ratio", &min_area_ratio, //
+ "min_fps", &min_fps);
+ }
+ };
+
+ AutomaticAnimationDetectionExperiment
+ ParseAutomatincAnimationDetectionFieldTrial() const;
+
+ AutomaticAnimationDetectionExperiment
+ automatic_animation_detection_experiment_ RTC_GUARDED_BY(&encoder_queue_);
+
+ // Provides video stream input states: current resolution and frame rate.
+ VideoStreamInputStateProvider input_state_provider_;
+
+ const std::unique_ptr<VideoStreamAdapter> video_stream_adapter_
+ RTC_GUARDED_BY(&encoder_queue_);
+ // Responsible for adapting input resolution or frame rate to ensure resources
+ // (e.g. CPU or bandwidth) are not overused. Adding resources can occur on any
+ // thread.
+ std::unique_ptr<ResourceAdaptationProcessorInterface>
+ resource_adaptation_processor_ RTC_GUARDED_BY(&encoder_queue_);
+ std::unique_ptr<DegradationPreferenceManager> degradation_preference_manager_
+ RTC_GUARDED_BY(&encoder_queue_);
+ std::vector<AdaptationConstraint*> adaptation_constraints_
+ RTC_GUARDED_BY(&encoder_queue_);
+ // Handles input, output and stats reporting related to VideoStreamEncoder
+ // specific resources, such as "encode usage percent" measurements and "QP
+ // scaling". Also involved with various mitigations such as initial frame
+ // dropping.
+ // The manager primarily operates on the `encoder_queue_` but its lifetime is
+ // tied to the VideoStreamEncoder (which is destroyed off the encoder queue)
+ // and its resource list is accessible from any thread.
+ VideoStreamEncoderResourceManager stream_resource_manager_
+ RTC_GUARDED_BY(&encoder_queue_);
+ std::vector<rtc::scoped_refptr<Resource>> additional_resources_
+ RTC_GUARDED_BY(&encoder_queue_);
+ // Carries out the VideoSourceRestrictions provided by the
+ // ResourceAdaptationProcessor, i.e. reconfigures the source of video frames
+ // to provide us with different resolution or frame rate.
+ // This class is thread-safe.
+ VideoSourceSinkController video_source_sink_controller_
+ RTC_GUARDED_BY(worker_queue_);
+
+ // Default bitrate limits in EncoderInfoSettings allowed.
+ const bool default_limits_allowed_;
+
+ // QP parser is used to extract QP value from encoded frame when that is not
+ // provided by encoder.
+ QpParser qp_parser_;
+ const bool qp_parsing_allowed_;
+
+ // Enables encoder switching on initialization failures.
+ bool switch_encoder_on_init_failures_;
+
+ const absl::optional<int> vp9_low_tier_core_threshold_;
+
+ // These are copies of restrictions (glorified max_pixel_count) set by
+ // a) OnVideoSourceRestrictionsUpdated
+ // b) CheckForAnimatedContent
+ // They are used to scale down encoding resolution if needed when using
+ // requested_resolution.
+ //
+ // TODO(webrtc:14451) Split video_source_sink_controller_
+ // so that ownership on restrictions/wants is kept on &encoder_queue_, that
+ // these extra copies would not be needed.
+ absl::optional<VideoSourceRestrictions> latest_restrictions_
+ RTC_GUARDED_BY(&encoder_queue_);
+ absl::optional<VideoSourceRestrictions> animate_restrictions_
+ RTC_GUARDED_BY(&encoder_queue_);
+
+ // Used to cancel any potentially pending tasks to the worker thread.
+ // Refrenced by tasks running on `encoder_queue_` so need to be destroyed
+ // after stopping that queue. Must be created and destroyed on
+ // `worker_queue_`.
+ ScopedTaskSafety task_safety_;
+
+ // Public methods are proxied to the task queues. The queues must be destroyed
+ // first to make sure no tasks run that use other members.
+ rtc::TaskQueue encoder_queue_;
+};
+
+} // namespace webrtc
+
+#endif // VIDEO_VIDEO_STREAM_ENCODER_H_
diff --git a/third_party/libwebrtc/video/video_stream_encoder_impl_gn/moz.build b/third_party/libwebrtc/video/video_stream_encoder_impl_gn/moz.build
new file mode 100644
index 0000000000..cd70bcde35
--- /dev/null
+++ b/third_party/libwebrtc/video/video_stream_encoder_impl_gn/moz.build
@@ -0,0 +1,238 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+DEFINES["WEBRTC_STRICT_FIELD_TRIALS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "!/third_party/libwebrtc/gen",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/video/alignment_adjuster.cc",
+ "/third_party/libwebrtc/video/encoder_bitrate_adjuster.cc",
+ "/third_party/libwebrtc/video/encoder_overshoot_detector.cc",
+ "/third_party/libwebrtc/video/frame_encode_metadata_writer.cc",
+ "/third_party/libwebrtc/video/video_source_sink_controller.cc",
+ "/third_party/libwebrtc/video/video_stream_encoder.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "GLESv2",
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "crypt32",
+ "iphlpapi",
+ "secur32",
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "mips32":
+
+ DEFINES["MIPS32_LE"] = True
+ DEFINES["MIPS_FPU_LE"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "mips64":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("video_stream_encoder_impl_gn")
diff --git a/third_party/libwebrtc/video/video_stream_encoder_interface.h b/third_party/libwebrtc/video/video_stream_encoder_interface.h
new file mode 100644
index 0000000000..25190aa474
--- /dev/null
+++ b/third_party/libwebrtc/video/video_stream_encoder_interface.h
@@ -0,0 +1,147 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VIDEO_VIDEO_STREAM_ENCODER_INTERFACE_H_
+#define VIDEO_VIDEO_STREAM_ENCODER_INTERFACE_H_
+
+#include <vector>
+
+#include "api/adaptation/resource.h"
+#include "api/fec_controller_override.h"
+#include "api/rtc_error.h"
+#include "api/rtp_parameters.h" // For DegradationPreference.
+#include "api/rtp_sender_interface.h"
+#include "api/scoped_refptr.h"
+#include "api/units/data_rate.h"
+#include "api/video/video_bitrate_allocator.h"
+#include "api/video/video_layers_allocation.h"
+#include "api/video/video_sink_interface.h"
+#include "api/video/video_source_interface.h"
+#include "api/video_codecs/video_encoder.h"
+#include "video/config/video_encoder_config.h"
+
+namespace webrtc {
+
+// This interface represents a class responsible for creating and driving the
+// encoder(s) for a single video stream. It is also responsible for adaptation
+// decisions related to video quality, requesting reduced frame rate or
+// resolution from the VideoSource when needed.
+// TODO(bugs.webrtc.org/8830): This interface is under development. Changes
+// under consideration include:
+//
+// 1. Taking out responsibility for adaptation decisions, instead only reporting
+// per-frame measurements to the decision maker.
+//
+// 2. Moving responsibility for simulcast and for software fallback into this
+// class.
+class VideoStreamEncoderInterface {
+ public:
+ // Interface for receiving encoded video frames and notifications about
+ // configuration changes.
+ class EncoderSink : public EncodedImageCallback {
+ public:
+ virtual void OnEncoderConfigurationChanged(
+ std::vector<VideoStream> streams,
+ bool is_svc,
+ VideoEncoderConfig::ContentType content_type,
+ int min_transmit_bitrate_bps) = 0;
+
+ virtual void OnBitrateAllocationUpdated(
+ const VideoBitrateAllocation& allocation) = 0;
+
+ virtual void OnVideoLayersAllocationUpdated(
+ VideoLayersAllocation allocation) = 0;
+ };
+
+ virtual ~VideoStreamEncoderInterface() = default;
+
+ // If the resource is overusing, the VideoStreamEncoder will try to reduce
+ // resolution or frame rate until no resource is overusing.
+ // TODO(https://crbug.com/webrtc/11565): When the ResourceAdaptationProcessor
+ // is moved to Call this method could be deleted altogether in favor of
+ // Call-level APIs only.
+ virtual void AddAdaptationResource(rtc::scoped_refptr<Resource> resource) = 0;
+ virtual std::vector<rtc::scoped_refptr<Resource>>
+ GetAdaptationResources() = 0;
+
+ // Sets the source that will provide video frames to the VideoStreamEncoder's
+ // OnFrame method. `degradation_preference` control whether or not resolution
+ // or frame rate may be reduced. The VideoStreamEncoder registers itself with
+ // `source`, and signals adaptation decisions to the source in the form of
+ // VideoSinkWants.
+ // TODO(bugs.webrtc.org/14246): When adaptation logic is extracted from this
+ // class, it no longer needs to know the source.
+ virtual void SetSource(
+ rtc::VideoSourceInterface<VideoFrame>* source,
+ const DegradationPreference& degradation_preference) = 0;
+
+ // Sets the `sink` that gets the encoded frames. `rotation_applied` means
+ // that the source must support rotation. Only set `rotation_applied` if the
+ // remote side does not support the rotation extension.
+ virtual void SetSink(EncoderSink* sink, bool rotation_applied) = 0;
+
+ // Sets an initial bitrate, later overriden by OnBitrateUpdated. Mainly
+ // affects the resolution of the initial key frame: If incoming frames are
+ // larger than reasonable for the start bitrate, and scaling is enabled,
+ // VideoStreamEncoder asks the source to scale down and drops a few initial
+ // frames.
+ // TODO(nisse): This is a poor interface, and mixes bandwidth estimation and
+ // codec configuration in an undesired way. For the actual send bandwidth, we
+ // should always be somewhat conservative, but we may nevertheless want to let
+ // the application configure a more optimistic quality for the initial
+ // resolution. Should be replaced by a construction time setting.
+ virtual void SetStartBitrate(int start_bitrate_bps) = 0;
+
+ // Request a key frame. Used for signalling from the remote receiver with
+ // no arguments and for RTCRtpSender.generateKeyFrame with a list of
+ // rids/layers.
+ virtual void SendKeyFrame(const std::vector<VideoFrameType>& layers = {}) = 0;
+
+ // Inform the encoder that a loss has occurred.
+ virtual void OnLossNotification(
+ const VideoEncoder::LossNotification& loss_notification) = 0;
+
+ // Set the currently estimated network properties. A `target_bitrate`
+ // of zero pauses the encoder.
+ // `stable_target_bitrate` is a filtered version of `target_bitrate`. It is
+ // always less or equal to it. It can be used to avoid rapid changes of
+ // expensive encoding settings, such as resolution.
+ // `link_allocation` is the bandwidth available for this video stream on the
+ // network link. It is always at least `target_bitrate` but may be higher
+ // if we are not network constrained.
+ virtual void OnBitrateUpdated(DataRate target_bitrate,
+ DataRate stable_target_bitrate,
+ DataRate link_allocation,
+ uint8_t fraction_lost,
+ int64_t round_trip_time_ms,
+ double cwnd_reduce_ratio) = 0;
+
+ // Set a FecControllerOverride, through which the encoder may override
+ // decisions made by FecController.
+ virtual void SetFecControllerOverride(
+ FecControllerOverride* fec_controller_override) = 0;
+
+ // Creates and configures an encoder with the given `config`. The
+ // `max_data_payload_length` is used to support single NAL unit
+ // packetization for H.264.
+ virtual void ConfigureEncoder(VideoEncoderConfig config,
+ size_t max_data_payload_length) = 0;
+ virtual void ConfigureEncoder(VideoEncoderConfig config,
+ size_t max_data_payload_length,
+ SetParametersCallback callback) = 0;
+
+ // Permanently stop encoding. After this method has returned, it is
+ // guaranteed that no encoded frames will be delivered to the sink.
+ virtual void Stop() = 0;
+};
+
+} // namespace webrtc
+
+#endif // VIDEO_VIDEO_STREAM_ENCODER_INTERFACE_H_
diff --git a/third_party/libwebrtc/video/video_stream_encoder_interface_gn/moz.build b/third_party/libwebrtc/video/video_stream_encoder_interface_gn/moz.build
new file mode 100644
index 0000000000..e058513d4f
--- /dev/null
+++ b/third_party/libwebrtc/video/video_stream_encoder_interface_gn/moz.build
@@ -0,0 +1,209 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+DEFINES["WEBRTC_STRICT_FIELD_TRIALS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "!/third_party/libwebrtc/gen",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "mips32":
+
+ DEFINES["MIPS32_LE"] = True
+ DEFINES["MIPS_FPU_LE"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "mips64":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("video_stream_encoder_interface_gn")
diff --git a/third_party/libwebrtc/video/video_stream_encoder_observer.h b/third_party/libwebrtc/video/video_stream_encoder_observer.h
new file mode 100644
index 0000000000..c10412181d
--- /dev/null
+++ b/third_party/libwebrtc/video/video_stream_encoder_observer.h
@@ -0,0 +1,119 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VIDEO_VIDEO_STREAM_ENCODER_OBSERVER_H_
+#define VIDEO_VIDEO_STREAM_ENCODER_OBSERVER_H_
+
+#include <string>
+#include <vector>
+
+#include "api/video/video_adaptation_counters.h"
+#include "api/video/video_adaptation_reason.h"
+#include "api/video/video_bitrate_allocation.h"
+#include "api/video_codecs/video_encoder.h"
+#include "video/config/video_encoder_config.h"
+
+namespace webrtc {
+
+// TODO(nisse): Used for the OnSendEncodedImage callback below. The callback
+// wants metadata such as size, encode timing, qp, but doesn't need actual
+// encoded data. So use some other type to represent that.
+class EncodedImage;
+
+struct EncoderImplementation {
+ const std::string& name;
+ bool is_hardware_accelerated;
+};
+
+// Broken out into a base class, with public inheritance below, only to ease
+// unit testing of the internal class OveruseFrameDetector.
+class CpuOveruseMetricsObserver {
+ public:
+ virtual ~CpuOveruseMetricsObserver() = default;
+ virtual void OnEncodedFrameTimeMeasured(int encode_duration_ms,
+ int encode_usage_percent) = 0;
+};
+
+class VideoStreamEncoderObserver : public CpuOveruseMetricsObserver {
+ public:
+ struct AdaptationSettings {
+ AdaptationSettings()
+ : resolution_scaling_enabled(false), framerate_scaling_enabled(false) {}
+
+ AdaptationSettings(bool resolution_scaling_enabled,
+ bool framerate_scaling_enabled)
+ : resolution_scaling_enabled(resolution_scaling_enabled),
+ framerate_scaling_enabled(framerate_scaling_enabled) {}
+
+ bool resolution_scaling_enabled;
+ bool framerate_scaling_enabled;
+ };
+
+ enum class DropReason {
+ kSource,
+ kEncoderQueue,
+ kEncoder,
+ kMediaOptimization,
+ kCongestionWindow
+ };
+
+ ~VideoStreamEncoderObserver() override = default;
+
+ virtual void OnIncomingFrame(int width, int height) = 0;
+
+ // TODO(bugs.webrtc.org/8504): Merge into one callback per encoded frame.
+ using CpuOveruseMetricsObserver::OnEncodedFrameTimeMeasured;
+ virtual void OnSendEncodedImage(const EncodedImage& encoded_image,
+ const CodecSpecificInfo* codec_info) = 0;
+
+ virtual void OnEncoderImplementationChanged(
+ EncoderImplementation implementation) = 0;
+
+ virtual void OnFrameDropped(DropReason reason) = 0;
+
+ // Used to indicate change in content type, which may require a change in
+ // how stats are collected and set the configured preferred media bitrate.
+ virtual void OnEncoderReconfigured(
+ const VideoEncoderConfig& encoder_config,
+ const std::vector<VideoStream>& streams) = 0;
+
+ virtual void OnAdaptationChanged(
+ VideoAdaptationReason reason,
+ const VideoAdaptationCounters& cpu_steps,
+ const VideoAdaptationCounters& quality_steps) = 0;
+ virtual void ClearAdaptationStats() = 0;
+
+ virtual void UpdateAdaptationSettings(
+ AdaptationSettings cpu_settings,
+ AdaptationSettings quality_settings) = 0;
+ virtual void OnMinPixelLimitReached() = 0;
+ virtual void OnInitialQualityResolutionAdaptDown() = 0;
+
+ virtual void OnSuspendChange(bool is_suspended) = 0;
+
+ virtual void OnBitrateAllocationUpdated(
+ const VideoCodec& codec,
+ const VideoBitrateAllocation& allocation) {}
+
+ // Informes observer if an internal encoder scaler has reduced video
+ // resolution or not. `is_scaled` is a flag indicating if the video is scaled
+ // down.
+ virtual void OnEncoderInternalScalerUpdate(bool is_scaled) {}
+
+ // TODO(bugs.webrtc.org/14246): VideoStreamEncoder wants to query the stats,
+ // which makes this not a pure observer. GetInputFrameRate is needed for the
+ // cpu adaptation, so can be deleted if that responsibility is moved out to a
+ // VideoStreamAdaptor class.
+ virtual int GetInputFrameRate() const = 0;
+};
+
+} // namespace webrtc
+
+#endif // VIDEO_VIDEO_STREAM_ENCODER_OBSERVER_H_
diff --git a/third_party/libwebrtc/video/video_stream_encoder_unittest.cc b/third_party/libwebrtc/video/video_stream_encoder_unittest.cc
new file mode 100644
index 0000000000..cdd4c75ab7
--- /dev/null
+++ b/third_party/libwebrtc/video/video_stream_encoder_unittest.cc
@@ -0,0 +1,9528 @@
+
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include "video/video_stream_encoder.h"
+
+#include <algorithm>
+#include <limits>
+#include <memory>
+#include <tuple>
+#include <utility>
+
+#include "absl/memory/memory.h"
+#include "api/field_trials_view.h"
+#include "api/rtp_parameters.h"
+#include "api/task_queue/default_task_queue_factory.h"
+#include "api/task_queue/task_queue_base.h"
+#include "api/task_queue/task_queue_factory.h"
+#include "api/test/mock_fec_controller_override.h"
+#include "api/test/mock_video_encoder.h"
+#include "api/test/mock_video_encoder_factory.h"
+#include "api/units/data_rate.h"
+#include "api/units/time_delta.h"
+#include "api/video/builtin_video_bitrate_allocator_factory.h"
+#include "api/video/i420_buffer.h"
+#include "api/video/nv12_buffer.h"
+#include "api/video/video_adaptation_reason.h"
+#include "api/video/video_bitrate_allocation.h"
+#include "api/video_codecs/sdp_video_format.h"
+#include "api/video_codecs/video_codec.h"
+#include "api/video_codecs/video_encoder.h"
+#include "api/video_codecs/vp8_temporal_layers.h"
+#include "api/video_codecs/vp8_temporal_layers_factory.h"
+#include "call/adaptation/test/fake_adaptation_constraint.h"
+#include "call/adaptation/test/fake_resource.h"
+#include "common_video/h264/h264_common.h"
+#include "common_video/include/video_frame_buffer.h"
+#include "media/base/video_adapter.h"
+#include "media/engine/webrtc_video_engine.h"
+#include "modules/video_coding/codecs/av1/libaom_av1_encoder.h"
+#include "modules/video_coding/codecs/h264/include/h264.h"
+#include "modules/video_coding/codecs/multiplex/include/multiplex_encoder_adapter.h"
+#include "modules/video_coding/codecs/vp8/include/vp8.h"
+#include "modules/video_coding/codecs/vp9/include/vp9.h"
+#include "modules/video_coding/codecs/vp9/include/vp9_globals.h"
+#include "modules/video_coding/codecs/vp9/svc_config.h"
+#include "modules/video_coding/utility/quality_scaler.h"
+#include "modules/video_coding/utility/simulcast_rate_allocator.h"
+#include "modules/video_coding/utility/vp8_constants.h"
+#include "rtc_base/event.h"
+#include "rtc_base/experiments/encoder_info_settings.h"
+#include "rtc_base/gunit.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/ref_counted_object.h"
+#include "rtc_base/synchronization/mutex.h"
+#include "system_wrappers/include/metrics.h"
+#include "test/encoder_settings.h"
+#include "test/fake_encoder.h"
+#include "test/frame_forwarder.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+#include "test/mappable_native_buffer.h"
+#include "test/scoped_key_value_config.h"
+#include "test/time_controller/simulated_time_controller.h"
+#include "test/video_encoder_nullable_proxy_factory.h"
+#include "test/video_encoder_proxy_factory.h"
+#include "video/config/encoder_stream_factory.h"
+#include "video/frame_cadence_adapter.h"
+#include "video/send_statistics_proxy.h"
+
+namespace webrtc {
+
+using ::testing::_;
+using ::testing::AllOf;
+using ::testing::Eq;
+using ::testing::Field;
+using ::testing::Ge;
+using ::testing::Gt;
+using ::testing::Invoke;
+using ::testing::Le;
+using ::testing::Lt;
+using ::testing::Matcher;
+using ::testing::Mock;
+using ::testing::NiceMock;
+using ::testing::Optional;
+using ::testing::Return;
+using ::testing::SizeIs;
+using ::testing::StrictMock;
+
+namespace {
+const int kMinPixelsPerFrame = 320 * 180;
+const int kQpLow = 1;
+const int kQpHigh = 2;
+const int kMinFramerateFps = 2;
+const int kMinBalancedFramerateFps = 7;
+constexpr TimeDelta kFrameTimeout = TimeDelta::Millis(100);
+const size_t kMaxPayloadLength = 1440;
+const DataRate kTargetBitrate = DataRate::KilobitsPerSec(1000);
+const DataRate kLowTargetBitrate = DataRate::KilobitsPerSec(100);
+const DataRate kStartBitrate = DataRate::KilobitsPerSec(600);
+const DataRate kSimulcastTargetBitrate = DataRate::KilobitsPerSec(3150);
+const int kMaxInitialFramedrop = 4;
+const int kDefaultFramerate = 30;
+const int64_t kFrameIntervalMs = rtc::kNumMillisecsPerSec / kDefaultFramerate;
+const int64_t kProcessIntervalMs = 1000;
+const VideoEncoder::ResolutionBitrateLimits
+ kEncoderBitrateLimits540p(960 * 540, 100 * 1000, 100 * 1000, 2000 * 1000);
+const VideoEncoder::ResolutionBitrateLimits
+ kEncoderBitrateLimits720p(1280 * 720, 200 * 1000, 200 * 1000, 4000 * 1000);
+
+uint8_t kOptimalSps[] = {0, 0, 0, 1, H264::NaluType::kSps,
+ 0x00, 0x00, 0x03, 0x03, 0xF4,
+ 0x05, 0x03, 0xC7, 0xE0, 0x1B,
+ 0x41, 0x10, 0x8D, 0x00};
+
+const uint8_t kCodedFrameVp8Qp25[] = {
+ 0x10, 0x02, 0x00, 0x9d, 0x01, 0x2a, 0x10, 0x00, 0x10, 0x00,
+ 0x02, 0x47, 0x08, 0x85, 0x85, 0x88, 0x85, 0x84, 0x88, 0x0c,
+ 0x82, 0x00, 0x0c, 0x0d, 0x60, 0x00, 0xfe, 0xfc, 0x5c, 0xd0};
+
+VideoFrame CreateSimpleNV12Frame() {
+ return VideoFrame::Builder()
+ .set_video_frame_buffer(rtc::make_ref_counted<NV12Buffer>(
+ /*width=*/16, /*height=*/16))
+ .build();
+}
+
+void PassAFrame(
+ TaskQueueBase* encoder_queue,
+ FrameCadenceAdapterInterface::Callback* video_stream_encoder_callback,
+ int64_t ntp_time_ms) {
+ encoder_queue->PostTask([video_stream_encoder_callback, ntp_time_ms] {
+ video_stream_encoder_callback->OnFrame(Timestamp::Millis(ntp_time_ms), 1,
+ CreateSimpleNV12Frame());
+ });
+}
+
+class TestBuffer : public webrtc::I420Buffer {
+ public:
+ TestBuffer(rtc::Event* event, int width, int height)
+ : I420Buffer(width, height), event_(event) {}
+
+ private:
+ friend class rtc::RefCountedObject<TestBuffer>;
+ ~TestBuffer() override {
+ if (event_)
+ event_->Set();
+ }
+ rtc::Event* const event_;
+};
+
+// A fake native buffer that can't be converted to I420. Upon scaling, it
+// produces another FakeNativeBuffer.
+class FakeNativeBuffer : public webrtc::VideoFrameBuffer {
+ public:
+ FakeNativeBuffer(rtc::Event* event, int width, int height)
+ : event_(event), width_(width), height_(height) {}
+ webrtc::VideoFrameBuffer::Type type() const override { return Type::kNative; }
+ int width() const override { return width_; }
+ int height() const override { return height_; }
+ rtc::scoped_refptr<webrtc::I420BufferInterface> ToI420() override {
+ return nullptr;
+ }
+ rtc::scoped_refptr<VideoFrameBuffer> CropAndScale(
+ int offset_x,
+ int offset_y,
+ int crop_width,
+ int crop_height,
+ int scaled_width,
+ int scaled_height) override {
+ return rtc::make_ref_counted<FakeNativeBuffer>(nullptr, scaled_width,
+ scaled_height);
+ }
+
+ private:
+ friend class rtc::RefCountedObject<FakeNativeBuffer>;
+ ~FakeNativeBuffer() override {
+ if (event_)
+ event_->Set();
+ }
+ rtc::Event* const event_;
+ const int width_;
+ const int height_;
+};
+
+// A fake native buffer that is backed by an NV12 buffer.
+class FakeNV12NativeBuffer : public webrtc::VideoFrameBuffer {
+ public:
+ FakeNV12NativeBuffer(rtc::Event* event, int width, int height)
+ : nv12_buffer_(NV12Buffer::Create(width, height)), event_(event) {}
+
+ webrtc::VideoFrameBuffer::Type type() const override { return Type::kNative; }
+ int width() const override { return nv12_buffer_->width(); }
+ int height() const override { return nv12_buffer_->height(); }
+ rtc::scoped_refptr<webrtc::I420BufferInterface> ToI420() override {
+ return nv12_buffer_->ToI420();
+ }
+ rtc::scoped_refptr<VideoFrameBuffer> GetMappedFrameBuffer(
+ rtc::ArrayView<VideoFrameBuffer::Type> types) override {
+ if (absl::c_find(types, Type::kNV12) != types.end()) {
+ return nv12_buffer_;
+ }
+ return nullptr;
+ }
+ const NV12BufferInterface* GetNV12() const { return nv12_buffer_.get(); }
+
+ private:
+ friend class rtc::RefCountedObject<FakeNV12NativeBuffer>;
+ ~FakeNV12NativeBuffer() override {
+ if (event_)
+ event_->Set();
+ }
+ rtc::scoped_refptr<NV12Buffer> nv12_buffer_;
+ rtc::Event* const event_;
+};
+
+class CpuOveruseDetectorProxy : public OveruseFrameDetector {
+ public:
+ CpuOveruseDetectorProxy(CpuOveruseMetricsObserver* metrics_observer,
+ const FieldTrialsView& field_trials)
+ : OveruseFrameDetector(metrics_observer, field_trials),
+ last_target_framerate_fps_(-1),
+ framerate_updated_event_(true /* manual_reset */,
+ false /* initially_signaled */) {}
+ virtual ~CpuOveruseDetectorProxy() {}
+
+ void OnTargetFramerateUpdated(int framerate_fps) override {
+ MutexLock lock(&lock_);
+ last_target_framerate_fps_ = framerate_fps;
+ OveruseFrameDetector::OnTargetFramerateUpdated(framerate_fps);
+ framerate_updated_event_.Set();
+ }
+
+ int GetLastTargetFramerate() {
+ MutexLock lock(&lock_);
+ return last_target_framerate_fps_;
+ }
+
+ CpuOveruseOptions GetOptions() { return options_; }
+
+ rtc::Event* framerate_updated_event() { return &framerate_updated_event_; }
+
+ private:
+ Mutex lock_;
+ int last_target_framerate_fps_ RTC_GUARDED_BY(lock_);
+ rtc::Event framerate_updated_event_;
+};
+
+class FakeVideoSourceRestrictionsListener
+ : public VideoSourceRestrictionsListener {
+ public:
+ FakeVideoSourceRestrictionsListener()
+ : was_restrictions_updated_(false), restrictions_updated_event_() {}
+ ~FakeVideoSourceRestrictionsListener() override {
+ RTC_DCHECK(was_restrictions_updated_);
+ }
+
+ rtc::Event* restrictions_updated_event() {
+ return &restrictions_updated_event_;
+ }
+
+ // VideoSourceRestrictionsListener implementation.
+ void OnVideoSourceRestrictionsUpdated(
+ VideoSourceRestrictions restrictions,
+ const VideoAdaptationCounters& adaptation_counters,
+ rtc::scoped_refptr<Resource> reason,
+ const VideoSourceRestrictions& unfiltered_restrictions) override {
+ was_restrictions_updated_ = true;
+ restrictions_updated_event_.Set();
+ }
+
+ private:
+ bool was_restrictions_updated_;
+ rtc::Event restrictions_updated_event_;
+};
+
+auto WantsFps(Matcher<int> fps_matcher) {
+ return Field("max_framerate_fps", &rtc::VideoSinkWants::max_framerate_fps,
+ fps_matcher);
+}
+
+auto WantsMaxPixels(Matcher<int> max_pixel_matcher) {
+ return Field("max_pixel_count", &rtc::VideoSinkWants::max_pixel_count,
+ AllOf(max_pixel_matcher, Gt(0)));
+}
+
+auto ResolutionMax() {
+ return AllOf(
+ WantsMaxPixels(Eq(std::numeric_limits<int>::max())),
+ Field("target_pixel_count", &rtc::VideoSinkWants::target_pixel_count,
+ Eq(absl::nullopt)));
+}
+
+auto FpsMax() {
+ return WantsFps(Eq(kDefaultFramerate));
+}
+
+auto FpsUnlimited() {
+ return WantsFps(Eq(std::numeric_limits<int>::max()));
+}
+
+auto FpsMatchesResolutionMax(Matcher<int> fps_matcher) {
+ return AllOf(WantsFps(fps_matcher), ResolutionMax());
+}
+
+auto FpsMaxResolutionMatches(Matcher<int> pixel_matcher) {
+ return AllOf(FpsMax(), WantsMaxPixels(pixel_matcher));
+}
+
+auto FpsMaxResolutionMax() {
+ return AllOf(FpsMax(), ResolutionMax());
+}
+
+auto UnlimitedSinkWants() {
+ return AllOf(FpsUnlimited(), ResolutionMax());
+}
+
+auto FpsInRangeForPixelsInBalanced(int last_frame_pixels) {
+ Matcher<int> fps_range_matcher;
+
+ if (last_frame_pixels <= 320 * 240) {
+ fps_range_matcher = AllOf(Ge(7), Le(10));
+ } else if (last_frame_pixels <= 480 * 360) {
+ fps_range_matcher = AllOf(Ge(10), Le(15));
+ } else if (last_frame_pixels <= 640 * 480) {
+ fps_range_matcher = Ge(15);
+ } else {
+ fps_range_matcher = Eq(kDefaultFramerate);
+ }
+ return Field("max_framerate_fps", &rtc::VideoSinkWants::max_framerate_fps,
+ fps_range_matcher);
+}
+
+auto FpsEqResolutionEqTo(const rtc::VideoSinkWants& other_wants) {
+ return AllOf(WantsFps(Eq(other_wants.max_framerate_fps)),
+ WantsMaxPixels(Eq(other_wants.max_pixel_count)));
+}
+
+auto FpsMaxResolutionLt(const rtc::VideoSinkWants& other_wants) {
+ return AllOf(FpsMax(), WantsMaxPixels(Lt(other_wants.max_pixel_count)));
+}
+
+auto FpsMaxResolutionGt(const rtc::VideoSinkWants& other_wants) {
+ return AllOf(FpsMax(), WantsMaxPixels(Gt(other_wants.max_pixel_count)));
+}
+
+auto FpsLtResolutionEq(const rtc::VideoSinkWants& other_wants) {
+ return AllOf(WantsFps(Lt(other_wants.max_framerate_fps)),
+ WantsMaxPixels(Eq(other_wants.max_pixel_count)));
+}
+
+auto FpsGtResolutionEq(const rtc::VideoSinkWants& other_wants) {
+ return AllOf(WantsFps(Gt(other_wants.max_framerate_fps)),
+ WantsMaxPixels(Eq(other_wants.max_pixel_count)));
+}
+
+auto FpsEqResolutionLt(const rtc::VideoSinkWants& other_wants) {
+ return AllOf(WantsFps(Eq(other_wants.max_framerate_fps)),
+ WantsMaxPixels(Lt(other_wants.max_pixel_count)));
+}
+
+auto FpsEqResolutionGt(const rtc::VideoSinkWants& other_wants) {
+ return AllOf(WantsFps(Eq(other_wants.max_framerate_fps)),
+ WantsMaxPixels(Gt(other_wants.max_pixel_count)));
+}
+
+class VideoStreamEncoderUnderTest : public VideoStreamEncoder {
+ public:
+ VideoStreamEncoderUnderTest(
+ TimeController* time_controller,
+ std::unique_ptr<FrameCadenceAdapterInterface> cadence_adapter,
+ std::unique_ptr<webrtc::TaskQueueBase, webrtc::TaskQueueDeleter>
+ encoder_queue,
+ SendStatisticsProxy* stats_proxy,
+ const VideoStreamEncoderSettings& settings,
+ VideoStreamEncoder::BitrateAllocationCallbackType
+ allocation_callback_type,
+ const FieldTrialsView& field_trials,
+ int num_cores)
+ : VideoStreamEncoder(
+ time_controller->GetClock(),
+ num_cores,
+ stats_proxy,
+ settings,
+ std::unique_ptr<OveruseFrameDetector>(
+ overuse_detector_proxy_ =
+ new CpuOveruseDetectorProxy(stats_proxy, field_trials)),
+ std::move(cadence_adapter),
+ std::move(encoder_queue),
+ allocation_callback_type,
+ field_trials),
+ time_controller_(time_controller),
+ fake_cpu_resource_(FakeResource::Create("FakeResource[CPU]")),
+ fake_quality_resource_(FakeResource::Create("FakeResource[QP]")),
+ fake_adaptation_constraint_("FakeAdaptationConstraint") {
+ InjectAdaptationResource(fake_quality_resource_,
+ VideoAdaptationReason::kQuality);
+ InjectAdaptationResource(fake_cpu_resource_, VideoAdaptationReason::kCpu);
+ InjectAdaptationConstraint(&fake_adaptation_constraint_);
+ }
+
+ void SetSourceAndWaitForRestrictionsUpdated(
+ rtc::VideoSourceInterface<VideoFrame>* source,
+ const DegradationPreference& degradation_preference) {
+ FakeVideoSourceRestrictionsListener listener;
+ AddRestrictionsListenerForTesting(&listener);
+ SetSource(source, degradation_preference);
+ listener.restrictions_updated_event()->Wait(TimeDelta::Seconds(5));
+ RemoveRestrictionsListenerForTesting(&listener);
+ }
+
+ void SetSourceAndWaitForFramerateUpdated(
+ rtc::VideoSourceInterface<VideoFrame>* source,
+ const DegradationPreference& degradation_preference) {
+ overuse_detector_proxy_->framerate_updated_event()->Reset();
+ SetSource(source, degradation_preference);
+ overuse_detector_proxy_->framerate_updated_event()->Wait(
+ TimeDelta::Seconds(5));
+ }
+
+ void OnBitrateUpdatedAndWaitForManagedResources(
+ DataRate target_bitrate,
+ DataRate stable_target_bitrate,
+ DataRate link_allocation,
+ uint8_t fraction_lost,
+ int64_t round_trip_time_ms,
+ double cwnd_reduce_ratio) {
+ OnBitrateUpdated(target_bitrate, stable_target_bitrate, link_allocation,
+ fraction_lost, round_trip_time_ms, cwnd_reduce_ratio);
+ // Bitrate is updated on the encoder queue.
+ WaitUntilTaskQueueIsIdle();
+ }
+
+ // This is used as a synchronisation mechanism, to make sure that the
+ // encoder queue is not blocked before we start sending it frames.
+ void WaitUntilTaskQueueIsIdle() {
+ time_controller_->AdvanceTime(TimeDelta::Zero());
+ }
+
+ // Triggers resource usage measurements on the fake CPU resource.
+ void TriggerCpuOveruse() {
+ rtc::Event event;
+ encoder_queue()->PostTask([this, &event] {
+ fake_cpu_resource_->SetUsageState(ResourceUsageState::kOveruse);
+ event.Set();
+ });
+ ASSERT_TRUE(event.Wait(TimeDelta::Seconds(5)));
+ time_controller_->AdvanceTime(TimeDelta::Zero());
+ }
+
+ void TriggerCpuUnderuse() {
+ rtc::Event event;
+ encoder_queue()->PostTask([this, &event] {
+ fake_cpu_resource_->SetUsageState(ResourceUsageState::kUnderuse);
+ event.Set();
+ });
+ ASSERT_TRUE(event.Wait(TimeDelta::Seconds(5)));
+ time_controller_->AdvanceTime(TimeDelta::Zero());
+ }
+
+ // Triggers resource usage measurements on the fake quality resource.
+ void TriggerQualityLow() {
+ rtc::Event event;
+ encoder_queue()->PostTask([this, &event] {
+ fake_quality_resource_->SetUsageState(ResourceUsageState::kOveruse);
+ event.Set();
+ });
+ ASSERT_TRUE(event.Wait(TimeDelta::Seconds(5)));
+ time_controller_->AdvanceTime(TimeDelta::Zero());
+ }
+ void TriggerQualityHigh() {
+ rtc::Event event;
+ encoder_queue()->PostTask([this, &event] {
+ fake_quality_resource_->SetUsageState(ResourceUsageState::kUnderuse);
+ event.Set();
+ });
+ ASSERT_TRUE(event.Wait(TimeDelta::Seconds(5)));
+ time_controller_->AdvanceTime(TimeDelta::Zero());
+ }
+
+ TimeController* const time_controller_;
+ CpuOveruseDetectorProxy* overuse_detector_proxy_;
+ rtc::scoped_refptr<FakeResource> fake_cpu_resource_;
+ rtc::scoped_refptr<FakeResource> fake_quality_resource_;
+ FakeAdaptationConstraint fake_adaptation_constraint_;
+};
+
+// Simulates simulcast behavior and makes highest stream resolutions divisible
+// by 4.
+class CroppingVideoStreamFactory
+ : public VideoEncoderConfig::VideoStreamFactoryInterface {
+ public:
+ CroppingVideoStreamFactory() {}
+
+ private:
+ std::vector<VideoStream> CreateEncoderStreams(
+ int frame_width,
+ int frame_height,
+ const VideoEncoderConfig& encoder_config) override {
+ std::vector<VideoStream> streams = test::CreateVideoStreams(
+ frame_width - frame_width % 4, frame_height - frame_height % 4,
+ encoder_config);
+ return streams;
+ }
+};
+
+class AdaptingFrameForwarder : public test::FrameForwarder {
+ public:
+ explicit AdaptingFrameForwarder(TimeController* time_controller)
+ : time_controller_(time_controller), adaptation_enabled_(false) {}
+ ~AdaptingFrameForwarder() override {}
+
+ void set_adaptation_enabled(bool enabled) {
+ MutexLock lock(&mutex_);
+ adaptation_enabled_ = enabled;
+ }
+
+ bool adaption_enabled() const {
+ MutexLock lock(&mutex_);
+ return adaptation_enabled_;
+ }
+
+ // The "last wants" is a snapshot of the previous rtc::VideoSinkWants where
+ // the resolution or frame rate was different than it is currently. If
+ // something else is modified, such as encoder resolutions, but the resolution
+ // and frame rate stays the same, last wants is not updated.
+ rtc::VideoSinkWants last_wants() const {
+ MutexLock lock(&mutex_);
+ return last_wants_;
+ }
+
+ absl::optional<int> last_sent_width() const { return last_width_; }
+ absl::optional<int> last_sent_height() const { return last_height_; }
+
+ void IncomingCapturedFrame(const VideoFrame& video_frame) override {
+ RTC_DCHECK(time_controller_->GetMainThread()->IsCurrent());
+ time_controller_->AdvanceTime(TimeDelta::Zero());
+
+ int cropped_width = 0;
+ int cropped_height = 0;
+ int out_width = 0;
+ int out_height = 0;
+ if (adaption_enabled()) {
+ RTC_DLOG(LS_INFO) << "IncomingCapturedFrame: AdaptFrameResolution()"
+ << "w=" << video_frame.width()
+ << "h=" << video_frame.height();
+ if (adapter_.AdaptFrameResolution(
+ video_frame.width(), video_frame.height(),
+ video_frame.timestamp_us() * 1000, &cropped_width,
+ &cropped_height, &out_width, &out_height)) {
+ VideoFrame adapted_frame =
+ VideoFrame::Builder()
+ .set_video_frame_buffer(rtc::make_ref_counted<TestBuffer>(
+ nullptr, out_width, out_height))
+ .set_ntp_time_ms(video_frame.ntp_time_ms())
+ .set_timestamp_ms(99)
+ .set_rotation(kVideoRotation_0)
+ .build();
+ if (video_frame.has_update_rect()) {
+ adapted_frame.set_update_rect(
+ video_frame.update_rect().ScaleWithFrame(
+ video_frame.width(), video_frame.height(), 0, 0,
+ video_frame.width(), video_frame.height(), out_width,
+ out_height));
+ }
+ test::FrameForwarder::IncomingCapturedFrame(adapted_frame);
+ last_width_.emplace(adapted_frame.width());
+ last_height_.emplace(adapted_frame.height());
+ } else {
+ last_width_ = absl::nullopt;
+ last_height_ = absl::nullopt;
+ }
+ } else {
+ RTC_DLOG(LS_INFO) << "IncomingCapturedFrame: adaptation not enabled";
+ test::FrameForwarder::IncomingCapturedFrame(video_frame);
+ last_width_.emplace(video_frame.width());
+ last_height_.emplace(video_frame.height());
+ }
+ }
+
+ void OnOutputFormatRequest(int width, int height) {
+ absl::optional<std::pair<int, int>> target_aspect_ratio =
+ std::make_pair(width, height);
+ absl::optional<int> max_pixel_count = width * height;
+ absl::optional<int> max_fps;
+ adapter_.OnOutputFormatRequest(target_aspect_ratio, max_pixel_count,
+ max_fps);
+ }
+
+ void AddOrUpdateSink(rtc::VideoSinkInterface<VideoFrame>* sink,
+ const rtc::VideoSinkWants& wants) override {
+ MutexLock lock(&mutex_);
+ rtc::VideoSinkWants prev_wants = sink_wants_locked();
+ bool did_adapt =
+ prev_wants.max_pixel_count != wants.max_pixel_count ||
+ prev_wants.target_pixel_count != wants.target_pixel_count ||
+ prev_wants.max_framerate_fps != wants.max_framerate_fps;
+ if (did_adapt) {
+ last_wants_ = prev_wants;
+ }
+ adapter_.OnSinkWants(wants);
+ test::FrameForwarder::AddOrUpdateSinkLocked(sink, wants);
+ }
+
+ void RequestRefreshFrame() override { ++refresh_frames_requested_; }
+
+ TimeController* const time_controller_;
+ cricket::VideoAdapter adapter_;
+ bool adaptation_enabled_ RTC_GUARDED_BY(mutex_);
+ rtc::VideoSinkWants last_wants_ RTC_GUARDED_BY(mutex_);
+ absl::optional<int> last_width_;
+ absl::optional<int> last_height_;
+ int refresh_frames_requested_{0};
+};
+
+// TODO(nisse): Mock only VideoStreamEncoderObserver.
+class MockableSendStatisticsProxy : public SendStatisticsProxy {
+ public:
+ MockableSendStatisticsProxy(Clock* clock,
+ const VideoSendStream::Config& config,
+ VideoEncoderConfig::ContentType content_type,
+ const FieldTrialsView& field_trials)
+ : SendStatisticsProxy(clock, config, content_type, field_trials) {}
+
+ VideoSendStream::Stats GetStats() override {
+ MutexLock lock(&lock_);
+ if (mock_stats_)
+ return *mock_stats_;
+ return SendStatisticsProxy::GetStats();
+ }
+
+ int GetInputFrameRate() const override {
+ MutexLock lock(&lock_);
+ if (mock_stats_)
+ return mock_stats_->input_frame_rate;
+ return SendStatisticsProxy::GetInputFrameRate();
+ }
+ void SetMockStats(const VideoSendStream::Stats& stats) {
+ MutexLock lock(&lock_);
+ mock_stats_.emplace(stats);
+ }
+
+ void ResetMockStats() {
+ MutexLock lock(&lock_);
+ mock_stats_.reset();
+ }
+
+ void SetDroppedFrameCallback(std::function<void(DropReason)> callback) {
+ on_frame_dropped_ = std::move(callback);
+ }
+
+ private:
+ void OnFrameDropped(DropReason reason) override {
+ SendStatisticsProxy::OnFrameDropped(reason);
+ if (on_frame_dropped_)
+ on_frame_dropped_(reason);
+ }
+
+ mutable Mutex lock_;
+ absl::optional<VideoSendStream::Stats> mock_stats_ RTC_GUARDED_BY(lock_);
+ std::function<void(DropReason)> on_frame_dropped_;
+};
+
+class SimpleVideoStreamEncoderFactory {
+ public:
+ class AdaptedVideoStreamEncoder : public VideoStreamEncoder {
+ public:
+ using VideoStreamEncoder::VideoStreamEncoder;
+ ~AdaptedVideoStreamEncoder() { Stop(); }
+ };
+
+ class MockFakeEncoder : public test::FakeEncoder {
+ public:
+ using FakeEncoder::FakeEncoder;
+ MOCK_METHOD(CodecSpecificInfo,
+ EncodeHook,
+ (EncodedImage & encoded_image,
+ rtc::scoped_refptr<EncodedImageBuffer> buffer),
+ (override));
+ };
+
+ SimpleVideoStreamEncoderFactory() {
+ encoder_settings_.encoder_factory = &encoder_factory_;
+ encoder_settings_.bitrate_allocator_factory =
+ bitrate_allocator_factory_.get();
+ }
+
+ std::unique_ptr<AdaptedVideoStreamEncoder> CreateWithEncoderQueue(
+ std::unique_ptr<FrameCadenceAdapterInterface> zero_hertz_adapter,
+ std::unique_ptr<TaskQueueBase, TaskQueueDeleter> encoder_queue,
+ const FieldTrialsView* field_trials = nullptr) {
+ auto result = std::make_unique<AdaptedVideoStreamEncoder>(
+ time_controller_.GetClock(),
+ /*number_of_cores=*/1,
+ /*stats_proxy=*/stats_proxy_.get(), encoder_settings_,
+ std::make_unique<CpuOveruseDetectorProxy>(
+ /*stats_proxy=*/nullptr,
+ field_trials ? *field_trials : field_trials_),
+ std::move(zero_hertz_adapter), std::move(encoder_queue),
+ VideoStreamEncoder::BitrateAllocationCallbackType::
+ kVideoBitrateAllocation,
+ field_trials ? *field_trials : field_trials_);
+ result->SetSink(&sink_, /*rotation_applied=*/false);
+ return result;
+ }
+
+ std::unique_ptr<AdaptedVideoStreamEncoder> Create(
+ std::unique_ptr<FrameCadenceAdapterInterface> zero_hertz_adapter,
+ TaskQueueBase** encoder_queue_ptr = nullptr) {
+ auto encoder_queue =
+ time_controller_.GetTaskQueueFactory()->CreateTaskQueue(
+ "EncoderQueue", TaskQueueFactory::Priority::NORMAL);
+ if (encoder_queue_ptr)
+ *encoder_queue_ptr = encoder_queue.get();
+ return CreateWithEncoderQueue(std::move(zero_hertz_adapter),
+ std::move(encoder_queue));
+ }
+
+ void DepleteTaskQueues() { time_controller_.AdvanceTime(TimeDelta::Zero()); }
+ MockFakeEncoder& GetMockFakeEncoder() { return mock_fake_encoder_; }
+
+ GlobalSimulatedTimeController* GetTimeController() {
+ return &time_controller_;
+ }
+
+ private:
+ class NullEncoderSink : public VideoStreamEncoderInterface::EncoderSink {
+ public:
+ ~NullEncoderSink() override = default;
+ void OnEncoderConfigurationChanged(
+ std::vector<VideoStream> streams,
+ bool is_svc,
+ VideoEncoderConfig::ContentType content_type,
+ int min_transmit_bitrate_bps) override {}
+ void OnBitrateAllocationUpdated(
+ const VideoBitrateAllocation& allocation) override {}
+ void OnVideoLayersAllocationUpdated(
+ VideoLayersAllocation allocation) override {}
+ Result OnEncodedImage(
+ const EncodedImage& encoded_image,
+ const CodecSpecificInfo* codec_specific_info) override {
+ return Result(EncodedImageCallback::Result::OK);
+ }
+ };
+
+ test::ScopedKeyValueConfig field_trials_;
+ GlobalSimulatedTimeController time_controller_{Timestamp::Zero()};
+ std::unique_ptr<TaskQueueFactory> task_queue_factory_{
+ time_controller_.CreateTaskQueueFactory()};
+ std::unique_ptr<MockableSendStatisticsProxy> stats_proxy_ =
+ std::make_unique<MockableSendStatisticsProxy>(
+ time_controller_.GetClock(),
+ VideoSendStream::Config(nullptr),
+ webrtc::VideoEncoderConfig::ContentType::kRealtimeVideo,
+ field_trials_);
+ std::unique_ptr<VideoBitrateAllocatorFactory> bitrate_allocator_factory_ =
+ CreateBuiltinVideoBitrateAllocatorFactory();
+ VideoStreamEncoderSettings encoder_settings_{
+ VideoEncoder::Capabilities(/*loss_notification=*/false)};
+ MockFakeEncoder mock_fake_encoder_{time_controller_.GetClock()};
+ test::VideoEncoderProxyFactory encoder_factory_{&mock_fake_encoder_};
+ NullEncoderSink sink_;
+};
+
+class MockFrameCadenceAdapter : public FrameCadenceAdapterInterface {
+ public:
+ MOCK_METHOD(void, Initialize, (Callback * callback), (override));
+ MOCK_METHOD(void,
+ SetZeroHertzModeEnabled,
+ (absl::optional<ZeroHertzModeParams>),
+ (override));
+ MOCK_METHOD(void, OnFrame, (const VideoFrame&), (override));
+ MOCK_METHOD(absl::optional<uint32_t>, GetInputFrameRateFps, (), (override));
+ MOCK_METHOD(void, UpdateFrameRate, (), (override));
+ MOCK_METHOD(void,
+ UpdateLayerQualityConvergence,
+ (size_t spatial_index, bool converged),
+ (override));
+ MOCK_METHOD(void,
+ UpdateLayerStatus,
+ (size_t spatial_index, bool enabled),
+ (override));
+ MOCK_METHOD(void, ProcessKeyFrameRequest, (), (override));
+};
+
+class MockEncoderSelector
+ : public VideoEncoderFactory::EncoderSelectorInterface {
+ public:
+ MOCK_METHOD(void,
+ OnCurrentEncoder,
+ (const SdpVideoFormat& format),
+ (override));
+ MOCK_METHOD(absl::optional<SdpVideoFormat>,
+ OnAvailableBitrate,
+ (const DataRate& rate),
+ (override));
+ MOCK_METHOD(absl::optional<SdpVideoFormat>,
+ OnResolutionChange,
+ (const RenderResolution& resolution),
+ (override));
+ MOCK_METHOD(absl::optional<SdpVideoFormat>, OnEncoderBroken, (), (override));
+};
+
+class MockVideoSourceInterface : public rtc::VideoSourceInterface<VideoFrame> {
+ public:
+ MOCK_METHOD(void,
+ AddOrUpdateSink,
+ (rtc::VideoSinkInterface<VideoFrame>*,
+ const rtc::VideoSinkWants&),
+ (override));
+ MOCK_METHOD(void,
+ RemoveSink,
+ (rtc::VideoSinkInterface<VideoFrame>*),
+ (override));
+ MOCK_METHOD(void, RequestRefreshFrame, (), (override));
+};
+
+} // namespace
+
+class VideoStreamEncoderTest : public ::testing::Test {
+ public:
+ static constexpr TimeDelta kDefaultTimeout = TimeDelta::Seconds(1);
+
+ VideoStreamEncoderTest()
+ : video_send_config_(VideoSendStream::Config(nullptr)),
+ codec_width_(320),
+ codec_height_(240),
+ max_framerate_(kDefaultFramerate),
+ fake_encoder_(&time_controller_),
+ encoder_factory_(&fake_encoder_),
+ stats_proxy_(new MockableSendStatisticsProxy(
+ time_controller_.GetClock(),
+ video_send_config_,
+ webrtc::VideoEncoderConfig::ContentType::kRealtimeVideo,
+ field_trials_)),
+ sink_(&time_controller_, &fake_encoder_) {}
+
+ void SetUp() override {
+ metrics::Reset();
+ video_send_config_ = VideoSendStream::Config(nullptr);
+ video_send_config_.encoder_settings.encoder_factory = &encoder_factory_;
+ video_send_config_.encoder_settings.bitrate_allocator_factory =
+ &bitrate_allocator_factory_;
+ video_send_config_.rtp.payload_name = "FAKE";
+ video_send_config_.rtp.payload_type = 125;
+
+ VideoEncoderConfig video_encoder_config;
+ test::FillEncoderConfiguration(kVideoCodecVP8, 1, &video_encoder_config);
+ EXPECT_EQ(1u, video_encoder_config.simulcast_layers.size());
+ video_encoder_config.simulcast_layers[0].num_temporal_layers = 1;
+ video_encoder_config.simulcast_layers[0].max_framerate = max_framerate_;
+ video_encoder_config_ = video_encoder_config.Copy();
+
+ ConfigureEncoder(std::move(video_encoder_config));
+ }
+
+ void ConfigureEncoder(
+ VideoEncoderConfig video_encoder_config,
+ VideoStreamEncoder::BitrateAllocationCallbackType
+ allocation_callback_type =
+ VideoStreamEncoder::BitrateAllocationCallbackType::
+ kVideoBitrateAllocationWhenScreenSharing,
+ int num_cores = 1) {
+ if (video_stream_encoder_)
+ video_stream_encoder_->Stop();
+
+ auto encoder_queue = GetTaskQueueFactory()->CreateTaskQueue(
+ "EncoderQueue", TaskQueueFactory::Priority::NORMAL);
+ TaskQueueBase* encoder_queue_ptr = encoder_queue.get();
+ std::unique_ptr<FrameCadenceAdapterInterface> cadence_adapter =
+ FrameCadenceAdapterInterface::Create(time_controller_.GetClock(),
+ encoder_queue_ptr, field_trials_);
+ video_stream_encoder_ = std::make_unique<VideoStreamEncoderUnderTest>(
+ &time_controller_, std::move(cadence_adapter), std::move(encoder_queue),
+ stats_proxy_.get(), video_send_config_.encoder_settings,
+ allocation_callback_type, field_trials_, num_cores);
+ video_stream_encoder_->SetSink(&sink_, /*rotation_applied=*/false);
+ video_stream_encoder_->SetSource(
+ &video_source_, webrtc::DegradationPreference::MAINTAIN_FRAMERATE);
+ video_stream_encoder_->SetStartBitrate(kTargetBitrate.bps());
+ video_stream_encoder_->ConfigureEncoder(std::move(video_encoder_config),
+ kMaxPayloadLength, nullptr);
+ video_stream_encoder_->WaitUntilTaskQueueIsIdle();
+ }
+
+ void ResetEncoder(const std::string& payload_name,
+ size_t num_streams,
+ size_t num_temporal_layers,
+ unsigned char num_spatial_layers,
+ bool screenshare,
+ VideoStreamEncoder::BitrateAllocationCallbackType
+ allocation_callback_type =
+ VideoStreamEncoder::BitrateAllocationCallbackType::
+ kVideoBitrateAllocationWhenScreenSharing,
+ int num_cores = 1) {
+ video_send_config_.rtp.payload_name = payload_name;
+
+ VideoEncoderConfig video_encoder_config;
+ test::FillEncoderConfiguration(PayloadStringToCodecType(payload_name),
+ num_streams, &video_encoder_config);
+ for (auto& layer : video_encoder_config.simulcast_layers) {
+ layer.num_temporal_layers = num_temporal_layers;
+ layer.max_framerate = kDefaultFramerate;
+ }
+ video_encoder_config.max_bitrate_bps =
+ num_streams == 1 ? kTargetBitrate.bps() : kSimulcastTargetBitrate.bps();
+ video_encoder_config.content_type =
+ screenshare ? VideoEncoderConfig::ContentType::kScreen
+ : VideoEncoderConfig::ContentType::kRealtimeVideo;
+ if (payload_name == "VP9") {
+ VideoCodecVP9 vp9_settings = VideoEncoder::GetDefaultVp9Settings();
+ vp9_settings.numberOfSpatialLayers = num_spatial_layers;
+ vp9_settings.automaticResizeOn = num_spatial_layers <= 1;
+ video_encoder_config.encoder_specific_settings =
+ rtc::make_ref_counted<VideoEncoderConfig::Vp9EncoderSpecificSettings>(
+ vp9_settings);
+ }
+ ConfigureEncoder(std::move(video_encoder_config), allocation_callback_type,
+ num_cores);
+ }
+
+ VideoFrame CreateFrame(int64_t ntp_time_ms,
+ rtc::Event* destruction_event) const {
+ return VideoFrame::Builder()
+ .set_video_frame_buffer(rtc::make_ref_counted<TestBuffer>(
+ destruction_event, codec_width_, codec_height_))
+ .set_ntp_time_ms(ntp_time_ms)
+ .set_timestamp_ms(99)
+ .set_rotation(kVideoRotation_0)
+ .build();
+ }
+
+ VideoFrame CreateFrameWithUpdatedPixel(int64_t ntp_time_ms,
+ rtc::Event* destruction_event,
+ int offset_x) const {
+ return VideoFrame::Builder()
+ .set_video_frame_buffer(rtc::make_ref_counted<TestBuffer>(
+ destruction_event, codec_width_, codec_height_))
+ .set_ntp_time_ms(ntp_time_ms)
+ .set_timestamp_ms(99)
+ .set_rotation(kVideoRotation_0)
+ .set_update_rect(VideoFrame::UpdateRect{offset_x, 0, 1, 1})
+ .build();
+ }
+
+ VideoFrame CreateFrame(int64_t ntp_time_ms, int width, int height) const {
+ auto buffer = rtc::make_ref_counted<TestBuffer>(nullptr, width, height);
+ I420Buffer::SetBlack(buffer.get());
+ return VideoFrame::Builder()
+ .set_video_frame_buffer(std::move(buffer))
+ .set_ntp_time_ms(ntp_time_ms)
+ .set_timestamp_ms(ntp_time_ms)
+ .set_rotation(kVideoRotation_0)
+ .build();
+ }
+
+ VideoFrame CreateNV12Frame(int64_t ntp_time_ms, int width, int height) const {
+ return VideoFrame::Builder()
+ .set_video_frame_buffer(NV12Buffer::Create(width, height))
+ .set_ntp_time_ms(ntp_time_ms)
+ .set_timestamp_ms(ntp_time_ms)
+ .set_rotation(kVideoRotation_0)
+ .build();
+ }
+
+ VideoFrame CreateFakeNativeFrame(int64_t ntp_time_ms,
+ rtc::Event* destruction_event,
+ int width,
+ int height) const {
+ return VideoFrame::Builder()
+ .set_video_frame_buffer(rtc::make_ref_counted<FakeNativeBuffer>(
+ destruction_event, width, height))
+ .set_ntp_time_ms(ntp_time_ms)
+ .set_timestamp_ms(99)
+ .set_rotation(kVideoRotation_0)
+ .build();
+ }
+
+ VideoFrame CreateFakeNV12NativeFrame(int64_t ntp_time_ms,
+ rtc::Event* destruction_event,
+ int width,
+ int height) const {
+ return VideoFrame::Builder()
+ .set_video_frame_buffer(rtc::make_ref_counted<FakeNV12NativeBuffer>(
+ destruction_event, width, height))
+ .set_ntp_time_ms(ntp_time_ms)
+ .set_timestamp_ms(99)
+ .set_rotation(kVideoRotation_0)
+ .build();
+ }
+
+ VideoFrame CreateFakeNativeFrame(int64_t ntp_time_ms,
+ rtc::Event* destruction_event) const {
+ return CreateFakeNativeFrame(ntp_time_ms, destruction_event, codec_width_,
+ codec_height_);
+ }
+
+ void VerifyAllocatedBitrate(const VideoBitrateAllocation& expected_bitrate) {
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kTargetBitrate, kTargetBitrate, kTargetBitrate, 0, 0, 0);
+
+ video_source_.IncomingCapturedFrame(
+ CreateFrame(1, codec_width_, codec_height_));
+ WaitForEncodedFrame(1);
+ EXPECT_EQ(expected_bitrate, sink_.GetLastVideoBitrateAllocation());
+ }
+
+ void WaitForEncodedFrame(int64_t expected_ntp_time) {
+ sink_.WaitForEncodedFrame(expected_ntp_time);
+ AdvanceTime(TimeDelta::Seconds(1) / max_framerate_);
+ }
+
+ bool TimedWaitForEncodedFrame(int64_t expected_ntp_time, TimeDelta timeout) {
+ bool ok = sink_.TimedWaitForEncodedFrame(expected_ntp_time, timeout);
+ AdvanceTime(TimeDelta::Seconds(1) / max_framerate_);
+ return ok;
+ }
+
+ void WaitForEncodedFrame(uint32_t expected_width, uint32_t expected_height) {
+ sink_.WaitForEncodedFrame(expected_width, expected_height);
+ AdvanceTime(TimeDelta::Seconds(1) / max_framerate_);
+ }
+
+ void ExpectDroppedFrame() {
+ sink_.ExpectDroppedFrame();
+ AdvanceTime(TimeDelta::Seconds(1) / max_framerate_);
+ }
+
+ bool WaitForFrame(TimeDelta timeout) {
+ bool ok = sink_.WaitForFrame(timeout);
+ AdvanceTime(TimeDelta::Seconds(1) / max_framerate_);
+ return ok;
+ }
+
+ class TestEncoder : public test::FakeEncoder {
+ public:
+ explicit TestEncoder(TimeController* time_controller)
+ : FakeEncoder(time_controller->GetClock()),
+ time_controller_(time_controller) {
+ RTC_DCHECK(time_controller_);
+ }
+
+ VideoEncoder::EncoderInfo GetEncoderInfo() const override {
+ MutexLock lock(&local_mutex_);
+ EncoderInfo info = FakeEncoder::GetEncoderInfo();
+ if (initialized_ == EncoderState::kInitialized) {
+ if (quality_scaling_) {
+ info.scaling_settings = VideoEncoder::ScalingSettings(
+ kQpLow, kQpHigh, kMinPixelsPerFrame);
+ }
+ info.is_hardware_accelerated = is_hardware_accelerated_;
+ for (int i = 0; i < kMaxSpatialLayers; ++i) {
+ if (temporal_layers_supported_[i]) {
+ info.fps_allocation[i].clear();
+ int num_layers = temporal_layers_supported_[i].value() ? 2 : 1;
+ for (int tid = 0; tid < num_layers; ++tid)
+ info.fps_allocation[i].push_back(255 / (num_layers - tid));
+ }
+ }
+ }
+
+ info.resolution_bitrate_limits = resolution_bitrate_limits_;
+ info.requested_resolution_alignment = requested_resolution_alignment_;
+ info.apply_alignment_to_all_simulcast_layers =
+ apply_alignment_to_all_simulcast_layers_;
+ info.preferred_pixel_formats = preferred_pixel_formats_;
+ if (is_qp_trusted_.has_value()) {
+ info.is_qp_trusted = is_qp_trusted_;
+ }
+ return info;
+ }
+
+ int32_t RegisterEncodeCompleteCallback(
+ EncodedImageCallback* callback) override {
+ MutexLock lock(&local_mutex_);
+ encoded_image_callback_ = callback;
+ return FakeEncoder::RegisterEncodeCompleteCallback(callback);
+ }
+
+ void ContinueEncode() { continue_encode_event_.Set(); }
+
+ void CheckLastTimeStampsMatch(int64_t ntp_time_ms,
+ uint32_t timestamp) const {
+ MutexLock lock(&local_mutex_);
+ EXPECT_EQ(timestamp_, timestamp);
+ EXPECT_EQ(ntp_time_ms_, ntp_time_ms);
+ }
+
+ void SetQualityScaling(bool b) {
+ MutexLock lock(&local_mutex_);
+ quality_scaling_ = b;
+ }
+
+ void SetRequestedResolutionAlignment(
+ uint32_t requested_resolution_alignment) {
+ MutexLock lock(&local_mutex_);
+ requested_resolution_alignment_ = requested_resolution_alignment;
+ }
+
+ void SetApplyAlignmentToAllSimulcastLayers(bool b) {
+ MutexLock lock(&local_mutex_);
+ apply_alignment_to_all_simulcast_layers_ = b;
+ }
+
+ void SetIsHardwareAccelerated(bool is_hardware_accelerated) {
+ MutexLock lock(&local_mutex_);
+ is_hardware_accelerated_ = is_hardware_accelerated;
+ }
+
+ void SetTemporalLayersSupported(size_t spatial_idx, bool supported) {
+ RTC_DCHECK_LT(spatial_idx, kMaxSpatialLayers);
+ MutexLock lock(&local_mutex_);
+ temporal_layers_supported_[spatial_idx] = supported;
+ }
+
+ void SetResolutionBitrateLimits(
+ std::vector<ResolutionBitrateLimits> thresholds) {
+ MutexLock lock(&local_mutex_);
+ resolution_bitrate_limits_ = thresholds;
+ }
+
+ void ForceInitEncodeFailure(bool force_failure) {
+ MutexLock lock(&local_mutex_);
+ force_init_encode_failed_ = force_failure;
+ }
+
+ void SimulateOvershoot(double rate_factor) {
+ MutexLock lock(&local_mutex_);
+ rate_factor_ = rate_factor;
+ }
+
+ uint32_t GetLastFramerate() const {
+ MutexLock lock(&local_mutex_);
+ return last_framerate_;
+ }
+
+ VideoFrame::UpdateRect GetLastUpdateRect() const {
+ MutexLock lock(&local_mutex_);
+ return last_update_rect_;
+ }
+
+ const std::vector<VideoFrameType>& LastFrameTypes() const {
+ MutexLock lock(&local_mutex_);
+ return last_frame_types_;
+ }
+
+ void InjectFrame(const VideoFrame& input_image, bool keyframe) {
+ const std::vector<VideoFrameType> frame_type = {
+ keyframe ? VideoFrameType::kVideoFrameKey
+ : VideoFrameType::kVideoFrameDelta};
+ {
+ MutexLock lock(&local_mutex_);
+ last_frame_types_ = frame_type;
+ }
+ FakeEncoder::Encode(input_image, &frame_type);
+ }
+
+ void InjectEncodedImage(const EncodedImage& image,
+ const CodecSpecificInfo* codec_specific_info) {
+ MutexLock lock(&local_mutex_);
+ encoded_image_callback_->OnEncodedImage(image, codec_specific_info);
+ }
+
+ void SetEncodedImageData(
+ rtc::scoped_refptr<EncodedImageBufferInterface> encoded_image_data) {
+ MutexLock lock(&local_mutex_);
+ encoded_image_data_ = encoded_image_data;
+ }
+
+ void ExpectNullFrame() {
+ MutexLock lock(&local_mutex_);
+ expect_null_frame_ = true;
+ }
+
+ absl::optional<VideoEncoder::RateControlParameters>
+ GetAndResetLastRateControlSettings() {
+ auto settings = last_rate_control_settings_;
+ last_rate_control_settings_.reset();
+ return settings;
+ }
+
+ int GetLastInputWidth() const {
+ MutexLock lock(&local_mutex_);
+ return last_input_width_;
+ }
+
+ int GetLastInputHeight() const {
+ MutexLock lock(&local_mutex_);
+ return last_input_height_;
+ }
+
+ absl::optional<VideoFrameBuffer::Type> GetLastInputPixelFormat() {
+ MutexLock lock(&local_mutex_);
+ return last_input_pixel_format_;
+ }
+
+ int GetNumSetRates() const {
+ MutexLock lock(&local_mutex_);
+ return num_set_rates_;
+ }
+
+ void SetPreferredPixelFormats(
+ absl::InlinedVector<VideoFrameBuffer::Type, kMaxPreferredPixelFormats>
+ pixel_formats) {
+ MutexLock lock(&local_mutex_);
+ preferred_pixel_formats_ = std::move(pixel_formats);
+ }
+
+ void SetIsQpTrusted(absl::optional<bool> trusted) {
+ MutexLock lock(&local_mutex_);
+ is_qp_trusted_ = trusted;
+ }
+
+ VideoCodecComplexity LastEncoderComplexity() {
+ MutexLock lock(&local_mutex_);
+ return last_encoder_complexity_;
+ }
+
+ private:
+ int32_t Encode(const VideoFrame& input_image,
+ const std::vector<VideoFrameType>* frame_types) override {
+ {
+ MutexLock lock(&local_mutex_);
+ if (expect_null_frame_) {
+ EXPECT_EQ(input_image.timestamp(), 0u);
+ EXPECT_EQ(input_image.width(), 1);
+ last_frame_types_ = *frame_types;
+ expect_null_frame_ = false;
+ } else {
+ EXPECT_GT(input_image.timestamp(), timestamp_);
+ EXPECT_GT(input_image.ntp_time_ms(), ntp_time_ms_);
+ EXPECT_EQ(input_image.timestamp(), input_image.ntp_time_ms() * 90);
+ }
+
+ timestamp_ = input_image.timestamp();
+ ntp_time_ms_ = input_image.ntp_time_ms();
+ last_input_width_ = input_image.width();
+ last_input_height_ = input_image.height();
+ last_update_rect_ = input_image.update_rect();
+ last_frame_types_ = *frame_types;
+ last_input_pixel_format_ = input_image.video_frame_buffer()->type();
+ }
+ int32_t result = FakeEncoder::Encode(input_image, frame_types);
+ return result;
+ }
+
+ CodecSpecificInfo EncodeHook(
+ EncodedImage& encoded_image,
+ rtc::scoped_refptr<EncodedImageBuffer> buffer) override {
+ CodecSpecificInfo codec_specific;
+ {
+ MutexLock lock(&mutex_);
+ codec_specific.codecType = config_.codecType;
+ }
+ MutexLock lock(&local_mutex_);
+ if (encoded_image_data_) {
+ encoded_image.SetEncodedData(encoded_image_data_);
+ }
+ return codec_specific;
+ }
+
+ int32_t InitEncode(const VideoCodec* config,
+ const Settings& settings) override {
+ int res = FakeEncoder::InitEncode(config, settings);
+
+ MutexLock lock(&local_mutex_);
+ EXPECT_EQ(initialized_, EncoderState::kUninitialized);
+
+ if (config->codecType == kVideoCodecVP8) {
+ // Simulate setting up temporal layers, in order to validate the life
+ // cycle of these objects.
+ Vp8TemporalLayersFactory factory;
+ frame_buffer_controller_ =
+ factory.Create(*config, settings, &fec_controller_override_);
+ }
+
+ last_encoder_complexity_ = config->GetVideoEncoderComplexity();
+
+ if (force_init_encode_failed_) {
+ initialized_ = EncoderState::kInitializationFailed;
+ return -1;
+ }
+
+ initialized_ = EncoderState::kInitialized;
+ return res;
+ }
+
+ int32_t Release() override {
+ MutexLock lock(&local_mutex_);
+ EXPECT_NE(initialized_, EncoderState::kUninitialized);
+ initialized_ = EncoderState::kUninitialized;
+ return FakeEncoder::Release();
+ }
+
+ void SetRates(const RateControlParameters& parameters) {
+ MutexLock lock(&local_mutex_);
+ num_set_rates_++;
+ VideoBitrateAllocation adjusted_rate_allocation;
+ for (size_t si = 0; si < kMaxSpatialLayers; ++si) {
+ for (size_t ti = 0; ti < kMaxTemporalStreams; ++ti) {
+ if (parameters.bitrate.HasBitrate(si, ti)) {
+ adjusted_rate_allocation.SetBitrate(
+ si, ti,
+ static_cast<uint32_t>(parameters.bitrate.GetBitrate(si, ti) *
+ rate_factor_));
+ }
+ }
+ }
+ last_framerate_ = static_cast<uint32_t>(parameters.framerate_fps + 0.5);
+ last_rate_control_settings_ = parameters;
+ RateControlParameters adjusted_paramters = parameters;
+ adjusted_paramters.bitrate = adjusted_rate_allocation;
+ FakeEncoder::SetRates(adjusted_paramters);
+ }
+
+ TimeController* const time_controller_;
+ mutable Mutex local_mutex_;
+ enum class EncoderState {
+ kUninitialized,
+ kInitializationFailed,
+ kInitialized
+ } initialized_ RTC_GUARDED_BY(local_mutex_) = EncoderState::kUninitialized;
+ rtc::Event continue_encode_event_;
+ uint32_t timestamp_ RTC_GUARDED_BY(local_mutex_) = 0;
+ int64_t ntp_time_ms_ RTC_GUARDED_BY(local_mutex_) = 0;
+ int last_input_width_ RTC_GUARDED_BY(local_mutex_) = 0;
+ int last_input_height_ RTC_GUARDED_BY(local_mutex_) = 0;
+ bool quality_scaling_ RTC_GUARDED_BY(local_mutex_) = true;
+ uint32_t requested_resolution_alignment_ RTC_GUARDED_BY(local_mutex_) = 1;
+ bool apply_alignment_to_all_simulcast_layers_ RTC_GUARDED_BY(local_mutex_) =
+ false;
+ bool is_hardware_accelerated_ RTC_GUARDED_BY(local_mutex_) = false;
+ rtc::scoped_refptr<EncodedImageBufferInterface> encoded_image_data_
+ RTC_GUARDED_BY(local_mutex_);
+ std::unique_ptr<Vp8FrameBufferController> frame_buffer_controller_
+ RTC_GUARDED_BY(local_mutex_);
+ absl::optional<bool>
+ temporal_layers_supported_[kMaxSpatialLayers] RTC_GUARDED_BY(
+ local_mutex_);
+ bool force_init_encode_failed_ RTC_GUARDED_BY(local_mutex_) = false;
+ double rate_factor_ RTC_GUARDED_BY(local_mutex_) = 1.0;
+ uint32_t last_framerate_ RTC_GUARDED_BY(local_mutex_) = 0;
+ absl::optional<VideoEncoder::RateControlParameters>
+ last_rate_control_settings_;
+ VideoFrame::UpdateRect last_update_rect_ RTC_GUARDED_BY(local_mutex_) = {
+ 0, 0, 0, 0};
+ std::vector<VideoFrameType> last_frame_types_;
+ bool expect_null_frame_ = false;
+ EncodedImageCallback* encoded_image_callback_ RTC_GUARDED_BY(local_mutex_) =
+ nullptr;
+ NiceMock<MockFecControllerOverride> fec_controller_override_;
+ std::vector<ResolutionBitrateLimits> resolution_bitrate_limits_
+ RTC_GUARDED_BY(local_mutex_);
+ int num_set_rates_ RTC_GUARDED_BY(local_mutex_) = 0;
+ absl::optional<VideoFrameBuffer::Type> last_input_pixel_format_
+ RTC_GUARDED_BY(local_mutex_);
+ absl::InlinedVector<VideoFrameBuffer::Type, kMaxPreferredPixelFormats>
+ preferred_pixel_formats_ RTC_GUARDED_BY(local_mutex_);
+ absl::optional<bool> is_qp_trusted_ RTC_GUARDED_BY(local_mutex_);
+ VideoCodecComplexity last_encoder_complexity_ RTC_GUARDED_BY(local_mutex_){
+ VideoCodecComplexity::kComplexityNormal};
+ };
+
+ class TestSink : public VideoStreamEncoder::EncoderSink {
+ public:
+ TestSink(TimeController* time_controller, TestEncoder* test_encoder)
+ : time_controller_(time_controller), test_encoder_(test_encoder) {
+ RTC_DCHECK(time_controller_);
+ }
+
+ void WaitForEncodedFrame(int64_t expected_ntp_time) {
+ EXPECT_TRUE(TimedWaitForEncodedFrame(expected_ntp_time, kDefaultTimeout));
+ }
+
+ bool TimedWaitForEncodedFrame(int64_t expected_ntp_time,
+ TimeDelta timeout) {
+ uint32_t timestamp = 0;
+ if (!WaitForFrame(timeout))
+ return false;
+ {
+ MutexLock lock(&mutex_);
+ timestamp = last_timestamp_;
+ }
+ test_encoder_->CheckLastTimeStampsMatch(expected_ntp_time, timestamp);
+ return true;
+ }
+
+ void WaitForEncodedFrame(uint32_t expected_width,
+ uint32_t expected_height) {
+ EXPECT_TRUE(WaitForFrame(kDefaultTimeout));
+ CheckLastFrameSizeMatches(expected_width, expected_height);
+ }
+
+ void CheckLastFrameSizeMatches(uint32_t expected_width,
+ uint32_t expected_height) {
+ uint32_t width = 0;
+ uint32_t height = 0;
+ {
+ MutexLock lock(&mutex_);
+ width = last_width_;
+ height = last_height_;
+ }
+ EXPECT_EQ(expected_height, height);
+ EXPECT_EQ(expected_width, width);
+ }
+
+ void CheckLastFrameRotationMatches(VideoRotation expected_rotation) {
+ VideoRotation rotation;
+ {
+ MutexLock lock(&mutex_);
+ rotation = last_rotation_;
+ }
+ EXPECT_EQ(expected_rotation, rotation);
+ }
+
+ void ExpectDroppedFrame() {
+ EXPECT_FALSE(WaitForFrame(TimeDelta::Millis(100)));
+ }
+
+ bool WaitForFrame(TimeDelta timeout) {
+ RTC_DCHECK(time_controller_->GetMainThread()->IsCurrent());
+ time_controller_->AdvanceTime(TimeDelta::Zero());
+ bool ret = encoded_frame_event_.Wait(timeout);
+ time_controller_->AdvanceTime(TimeDelta::Zero());
+ return ret;
+ }
+
+ void SetExpectNoFrames() {
+ MutexLock lock(&mutex_);
+ expect_frames_ = false;
+ }
+
+ int number_of_reconfigurations() const {
+ MutexLock lock(&mutex_);
+ return number_of_reconfigurations_;
+ }
+
+ int last_min_transmit_bitrate() const {
+ MutexLock lock(&mutex_);
+ return min_transmit_bitrate_bps_;
+ }
+
+ void SetNumExpectedLayers(size_t num_layers) {
+ MutexLock lock(&mutex_);
+ num_expected_layers_ = num_layers;
+ }
+
+ int64_t GetLastCaptureTimeMs() const {
+ MutexLock lock(&mutex_);
+ return last_capture_time_ms_;
+ }
+
+ const EncodedImage& GetLastEncodedImage() {
+ MutexLock lock(&mutex_);
+ return last_encoded_image_;
+ }
+
+ std::vector<uint8_t> GetLastEncodedImageData() {
+ MutexLock lock(&mutex_);
+ return std::move(last_encoded_image_data_);
+ }
+
+ VideoBitrateAllocation GetLastVideoBitrateAllocation() {
+ MutexLock lock(&mutex_);
+ return last_bitrate_allocation_;
+ }
+
+ int number_of_bitrate_allocations() const {
+ MutexLock lock(&mutex_);
+ return number_of_bitrate_allocations_;
+ }
+
+ VideoLayersAllocation GetLastVideoLayersAllocation() {
+ MutexLock lock(&mutex_);
+ return last_layers_allocation_;
+ }
+
+ int number_of_layers_allocations() const {
+ MutexLock lock(&mutex_);
+ return number_of_layers_allocations_;
+ }
+
+ private:
+ Result OnEncodedImage(
+ const EncodedImage& encoded_image,
+ const CodecSpecificInfo* codec_specific_info) override {
+ MutexLock lock(&mutex_);
+ EXPECT_TRUE(expect_frames_);
+ last_encoded_image_ = EncodedImage(encoded_image);
+ last_encoded_image_data_ = std::vector<uint8_t>(
+ encoded_image.data(), encoded_image.data() + encoded_image.size());
+ uint32_t timestamp = encoded_image.Timestamp();
+ if (last_timestamp_ != timestamp) {
+ num_received_layers_ = 1;
+ last_width_ = encoded_image._encodedWidth;
+ last_height_ = encoded_image._encodedHeight;
+ } else {
+ ++num_received_layers_;
+ last_width_ = std::max(encoded_image._encodedWidth, last_width_);
+ last_height_ = std::max(encoded_image._encodedHeight, last_height_);
+ }
+ last_timestamp_ = timestamp;
+ last_capture_time_ms_ = encoded_image.capture_time_ms_;
+ last_rotation_ = encoded_image.rotation_;
+ if (num_received_layers_ == num_expected_layers_) {
+ encoded_frame_event_.Set();
+ }
+ return Result(Result::OK, last_timestamp_);
+ }
+
+ void OnEncoderConfigurationChanged(
+ std::vector<VideoStream> streams,
+ bool is_svc,
+ VideoEncoderConfig::ContentType content_type,
+ int min_transmit_bitrate_bps) override {
+ MutexLock lock(&mutex_);
+ ++number_of_reconfigurations_;
+ min_transmit_bitrate_bps_ = min_transmit_bitrate_bps;
+ }
+
+ void OnBitrateAllocationUpdated(
+ const VideoBitrateAllocation& allocation) override {
+ MutexLock lock(&mutex_);
+ ++number_of_bitrate_allocations_;
+ last_bitrate_allocation_ = allocation;
+ }
+
+ void OnVideoLayersAllocationUpdated(
+ VideoLayersAllocation allocation) override {
+ MutexLock lock(&mutex_);
+ ++number_of_layers_allocations_;
+ last_layers_allocation_ = allocation;
+ rtc::StringBuilder log;
+ for (const auto& layer : allocation.active_spatial_layers) {
+ log << layer.width << "x" << layer.height << "@" << layer.frame_rate_fps
+ << "[";
+ for (const auto target_bitrate :
+ layer.target_bitrate_per_temporal_layer) {
+ log << target_bitrate.kbps() << ",";
+ }
+ log << "]";
+ }
+ RTC_DLOG(LS_INFO) << "OnVideoLayersAllocationUpdated " << log.str();
+ }
+
+ TimeController* const time_controller_;
+ mutable Mutex mutex_;
+ TestEncoder* test_encoder_;
+ rtc::Event encoded_frame_event_;
+ EncodedImage last_encoded_image_;
+ std::vector<uint8_t> last_encoded_image_data_;
+ uint32_t last_timestamp_ = 0;
+ int64_t last_capture_time_ms_ = 0;
+ uint32_t last_height_ = 0;
+ uint32_t last_width_ = 0;
+ VideoRotation last_rotation_ = kVideoRotation_0;
+ size_t num_expected_layers_ = 1;
+ size_t num_received_layers_ = 0;
+ bool expect_frames_ = true;
+ int number_of_reconfigurations_ = 0;
+ int min_transmit_bitrate_bps_ = 0;
+ VideoBitrateAllocation last_bitrate_allocation_ RTC_GUARDED_BY(&mutex_);
+ int number_of_bitrate_allocations_ RTC_GUARDED_BY(&mutex_) = 0;
+ VideoLayersAllocation last_layers_allocation_ RTC_GUARDED_BY(&mutex_);
+ int number_of_layers_allocations_ RTC_GUARDED_BY(&mutex_) = 0;
+ };
+
+ class VideoBitrateAllocatorProxyFactory
+ : public VideoBitrateAllocatorFactory {
+ public:
+ VideoBitrateAllocatorProxyFactory()
+ : bitrate_allocator_factory_(
+ CreateBuiltinVideoBitrateAllocatorFactory()) {}
+
+ std::unique_ptr<VideoBitrateAllocator> CreateVideoBitrateAllocator(
+ const VideoCodec& codec) override {
+ MutexLock lock(&mutex_);
+ codec_config_ = codec;
+ return bitrate_allocator_factory_->CreateVideoBitrateAllocator(codec);
+ }
+
+ VideoCodec codec_config() const {
+ MutexLock lock(&mutex_);
+ return codec_config_;
+ }
+
+ private:
+ std::unique_ptr<VideoBitrateAllocatorFactory> bitrate_allocator_factory_;
+
+ mutable Mutex mutex_;
+ VideoCodec codec_config_ RTC_GUARDED_BY(mutex_);
+ };
+
+ Clock* clock() { return time_controller_.GetClock(); }
+ void AdvanceTime(TimeDelta duration) {
+ time_controller_.AdvanceTime(duration);
+ }
+
+ int64_t CurrentTimeMs() { return clock()->CurrentTime().ms(); }
+
+ protected:
+ virtual TaskQueueFactory* GetTaskQueueFactory() {
+ return time_controller_.GetTaskQueueFactory();
+ }
+
+ test::ScopedKeyValueConfig field_trials_;
+ GlobalSimulatedTimeController time_controller_{Timestamp::Micros(1234)};
+ VideoSendStream::Config video_send_config_;
+ VideoEncoderConfig video_encoder_config_;
+ int codec_width_;
+ int codec_height_;
+ int max_framerate_;
+ TestEncoder fake_encoder_;
+ test::VideoEncoderProxyFactory encoder_factory_;
+ VideoBitrateAllocatorProxyFactory bitrate_allocator_factory_;
+ std::unique_ptr<MockableSendStatisticsProxy> stats_proxy_;
+ TestSink sink_;
+ AdaptingFrameForwarder video_source_{&time_controller_};
+ std::unique_ptr<VideoStreamEncoderUnderTest> video_stream_encoder_;
+};
+
+TEST_F(VideoStreamEncoderTest, EncodeOneFrame) {
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kTargetBitrate, kTargetBitrate, kTargetBitrate, 0, 0, 0);
+ rtc::Event frame_destroyed_event;
+ video_source_.IncomingCapturedFrame(CreateFrame(1, &frame_destroyed_event));
+ WaitForEncodedFrame(1);
+ EXPECT_TRUE(frame_destroyed_event.Wait(kDefaultTimeout));
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest, DropsFramesBeforeFirstOnBitrateUpdated) {
+ // Dropped since no target bitrate has been set.
+ rtc::Event frame_destroyed_event;
+ // The encoder will cache up to one frame for a short duration. Adding two
+ // frames means that the first frame will be dropped and the second frame will
+ // be sent when the encoder is enabled.
+ video_source_.IncomingCapturedFrame(CreateFrame(1, &frame_destroyed_event));
+ AdvanceTime(TimeDelta::Millis(10));
+ video_source_.IncomingCapturedFrame(CreateFrame(2, nullptr));
+ AdvanceTime(TimeDelta::Zero());
+ EXPECT_TRUE(frame_destroyed_event.Wait(kDefaultTimeout));
+
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kTargetBitrate, kTargetBitrate, kTargetBitrate, 0, 0, 0);
+
+ // The pending frame should be received.
+ WaitForEncodedFrame(2);
+ video_source_.IncomingCapturedFrame(CreateFrame(3, nullptr));
+
+ WaitForEncodedFrame(3);
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest, DropsFramesWhenRateSetToZero) {
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kTargetBitrate, kTargetBitrate, kTargetBitrate, 0, 0, 0);
+ video_source_.IncomingCapturedFrame(CreateFrame(1, nullptr));
+ WaitForEncodedFrame(1);
+
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ DataRate::Zero(), DataRate::Zero(), DataRate::Zero(), 0, 0, 0);
+
+ // The encoder will cache up to one frame for a short duration. Adding two
+ // frames means that the first frame will be dropped and the second frame will
+ // be sent when the encoder is resumed.
+ video_source_.IncomingCapturedFrame(CreateFrame(2, nullptr));
+ video_source_.IncomingCapturedFrame(CreateFrame(3, nullptr));
+
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kTargetBitrate, kTargetBitrate, kTargetBitrate, 0, 0, 0);
+ WaitForEncodedFrame(3);
+ video_source_.IncomingCapturedFrame(CreateFrame(4, nullptr));
+ WaitForEncodedFrame(4);
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest, DropsFramesWithSameOrOldNtpTimestamp) {
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kTargetBitrate, kTargetBitrate, kTargetBitrate, 0, 0, 0);
+ video_source_.IncomingCapturedFrame(CreateFrame(1, nullptr));
+ WaitForEncodedFrame(1);
+
+ // This frame will be dropped since it has the same ntp timestamp.
+ video_source_.IncomingCapturedFrame(CreateFrame(1, nullptr));
+
+ video_source_.IncomingCapturedFrame(CreateFrame(2, nullptr));
+ WaitForEncodedFrame(2);
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest, DropsFrameAfterStop) {
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kTargetBitrate, kTargetBitrate, kTargetBitrate, 0, 0, 0);
+
+ video_source_.IncomingCapturedFrame(CreateFrame(1, nullptr));
+ WaitForEncodedFrame(1);
+
+ video_stream_encoder_->Stop();
+ sink_.SetExpectNoFrames();
+ rtc::Event frame_destroyed_event;
+ video_source_.IncomingCapturedFrame(CreateFrame(2, &frame_destroyed_event));
+ EXPECT_TRUE(frame_destroyed_event.Wait(kDefaultTimeout));
+}
+
+TEST_F(VideoStreamEncoderTest, DropsPendingFramesOnSlowEncode) {
+ test::FrameForwarder source;
+ video_stream_encoder_->SetSource(&source,
+ DegradationPreference::MAINTAIN_FRAMERATE);
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kTargetBitrate, kTargetBitrate, kTargetBitrate, 0, 0, 0);
+
+ int dropped_count = 0;
+ stats_proxy_->SetDroppedFrameCallback(
+ [&dropped_count](VideoStreamEncoderObserver::DropReason) {
+ ++dropped_count;
+ });
+
+ source.IncomingCapturedFrame(CreateFrame(1, nullptr));
+ source.IncomingCapturedFrame(CreateFrame(2, nullptr));
+ WaitForEncodedFrame(2);
+ video_stream_encoder_->Stop();
+ EXPECT_EQ(1, dropped_count);
+}
+
+TEST_F(VideoStreamEncoderTest, NativeFrameWithoutI420SupportGetsDelivered) {
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kTargetBitrate, kTargetBitrate, kTargetBitrate, 0, 0, 0);
+
+ rtc::Event frame_destroyed_event;
+ video_source_.IncomingCapturedFrame(
+ CreateFakeNativeFrame(1, &frame_destroyed_event));
+ WaitForEncodedFrame(1);
+ EXPECT_EQ(VideoFrameBuffer::Type::kNative,
+ fake_encoder_.GetLastInputPixelFormat());
+ EXPECT_EQ(fake_encoder_.config().width, fake_encoder_.GetLastInputWidth());
+ EXPECT_EQ(fake_encoder_.config().height, fake_encoder_.GetLastInputHeight());
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest,
+ NativeFrameWithoutI420SupportGetsCroppedIfNecessary) {
+ // Use the cropping factory.
+ video_encoder_config_.video_stream_factory =
+ rtc::make_ref_counted<CroppingVideoStreamFactory>();
+ video_stream_encoder_->ConfigureEncoder(std::move(video_encoder_config_),
+ kMaxPayloadLength);
+ video_stream_encoder_->WaitUntilTaskQueueIsIdle();
+
+ // Capture a frame at codec_width_/codec_height_.
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kTargetBitrate, kTargetBitrate, kTargetBitrate, 0, 0, 0);
+ video_source_.IncomingCapturedFrame(CreateFrame(1, nullptr));
+ WaitForEncodedFrame(1);
+ // The encoder will have been configured once.
+ EXPECT_EQ(1, sink_.number_of_reconfigurations());
+ EXPECT_EQ(codec_width_, fake_encoder_.config().width);
+ EXPECT_EQ(codec_height_, fake_encoder_.config().height);
+
+ // Now send in a fake frame that needs to be cropped as the width/height
+ // aren't divisible by 4 (see CreateEncoderStreams above).
+ rtc::Event frame_destroyed_event;
+ video_source_.IncomingCapturedFrame(CreateFakeNativeFrame(
+ 2, &frame_destroyed_event, codec_width_ + 1, codec_height_ + 1));
+ WaitForEncodedFrame(2);
+ EXPECT_EQ(VideoFrameBuffer::Type::kNative,
+ fake_encoder_.GetLastInputPixelFormat());
+ EXPECT_EQ(fake_encoder_.config().width, fake_encoder_.GetLastInputWidth());
+ EXPECT_EQ(fake_encoder_.config().height, fake_encoder_.GetLastInputHeight());
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest, NonI420FramesShouldNotBeConvertedToI420) {
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kTargetBitrate, kTargetBitrate, kTargetBitrate, 0, 0, 0);
+
+ video_source_.IncomingCapturedFrame(
+ CreateNV12Frame(1, codec_width_, codec_height_));
+ WaitForEncodedFrame(1);
+ EXPECT_EQ(VideoFrameBuffer::Type::kNV12,
+ fake_encoder_.GetLastInputPixelFormat());
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest, NativeFrameGetsDelivered_NoFrameTypePreference) {
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kTargetBitrate, kTargetBitrate, kTargetBitrate, 0, 0, 0);
+
+ fake_encoder_.SetPreferredPixelFormats({});
+
+ rtc::Event frame_destroyed_event;
+ video_source_.IncomingCapturedFrame(CreateFakeNV12NativeFrame(
+ 1, &frame_destroyed_event, codec_width_, codec_height_));
+ WaitForEncodedFrame(1);
+ EXPECT_EQ(VideoFrameBuffer::Type::kNative,
+ fake_encoder_.GetLastInputPixelFormat());
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest,
+ NativeFrameGetsDelivered_PixelFormatPreferenceMatches) {
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kTargetBitrate, kTargetBitrate, kTargetBitrate, 0, 0, 0);
+
+ fake_encoder_.SetPreferredPixelFormats({VideoFrameBuffer::Type::kNV12});
+
+ rtc::Event frame_destroyed_event;
+ video_source_.IncomingCapturedFrame(CreateFakeNV12NativeFrame(
+ 1, &frame_destroyed_event, codec_width_, codec_height_));
+ WaitForEncodedFrame(1);
+ EXPECT_EQ(VideoFrameBuffer::Type::kNative,
+ fake_encoder_.GetLastInputPixelFormat());
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest, NativeFrameGetsDelivered_MappingIsNotFeasible) {
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kTargetBitrate, kTargetBitrate, kTargetBitrate, 0, 0, 0);
+
+ // Fake NV12 native frame does not allow mapping to I444.
+ fake_encoder_.SetPreferredPixelFormats({VideoFrameBuffer::Type::kI444});
+
+ rtc::Event frame_destroyed_event;
+ video_source_.IncomingCapturedFrame(CreateFakeNV12NativeFrame(
+ 1, &frame_destroyed_event, codec_width_, codec_height_));
+ WaitForEncodedFrame(1);
+ EXPECT_EQ(VideoFrameBuffer::Type::kNative,
+ fake_encoder_.GetLastInputPixelFormat());
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest, NativeFrameGetsDelivered_BackedByNV12) {
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kTargetBitrate, kTargetBitrate, kTargetBitrate, 0, 0, 0);
+
+ rtc::Event frame_destroyed_event;
+ video_source_.IncomingCapturedFrame(CreateFakeNV12NativeFrame(
+ 1, &frame_destroyed_event, codec_width_, codec_height_));
+ WaitForEncodedFrame(1);
+ EXPECT_EQ(VideoFrameBuffer::Type::kNative,
+ fake_encoder_.GetLastInputPixelFormat());
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest, DropsFramesWhenCongestionWindowPushbackSet) {
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kTargetBitrate, kTargetBitrate, kTargetBitrate, 0, 0, 0);
+ video_source_.IncomingCapturedFrame(CreateFrame(1, nullptr));
+ WaitForEncodedFrame(1);
+
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kTargetBitrate, kTargetBitrate, kTargetBitrate, 0, 0, 0.5);
+ // The congestion window pushback is set to 0.5, which will drop 1/2 of
+ // frames. Adding two frames means that the first frame will be dropped and
+ // the second frame will be sent to the encoder.
+ video_source_.IncomingCapturedFrame(CreateFrame(2, nullptr));
+ video_source_.IncomingCapturedFrame(CreateFrame(3, nullptr));
+ WaitForEncodedFrame(3);
+ video_source_.IncomingCapturedFrame(CreateFrame(4, nullptr));
+ video_source_.IncomingCapturedFrame(CreateFrame(5, nullptr));
+ WaitForEncodedFrame(5);
+ EXPECT_EQ(2u, stats_proxy_->GetStats().frames_dropped_by_congestion_window);
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest,
+ ConfigureEncoderTriggersOnEncoderConfigurationChanged) {
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kTargetBitrate, kTargetBitrate, kTargetBitrate, 0, 0, 0);
+ EXPECT_EQ(0, sink_.number_of_reconfigurations());
+
+ // Capture a frame and wait for it to synchronize with the encoder thread.
+ video_source_.IncomingCapturedFrame(CreateFrame(1, nullptr));
+ WaitForEncodedFrame(1);
+ // The encoder will have been configured once when the first frame is
+ // received.
+ EXPECT_EQ(1, sink_.number_of_reconfigurations());
+
+ VideoEncoderConfig video_encoder_config;
+ test::FillEncoderConfiguration(kVideoCodecVP8, 1, &video_encoder_config);
+ video_encoder_config.min_transmit_bitrate_bps = 9999;
+ video_stream_encoder_->ConfigureEncoder(std::move(video_encoder_config),
+ kMaxPayloadLength);
+
+ // Capture a frame and wait for it to synchronize with the encoder thread.
+ video_source_.IncomingCapturedFrame(CreateFrame(2, nullptr));
+ WaitForEncodedFrame(2);
+ EXPECT_EQ(2, sink_.number_of_reconfigurations());
+ EXPECT_EQ(9999, sink_.last_min_transmit_bitrate());
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest, FrameResolutionChangeReconfigureEncoder) {
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kTargetBitrate, kTargetBitrate, kTargetBitrate, 0, 0, 0);
+
+ // Capture a frame and wait for it to synchronize with the encoder thread.
+ video_source_.IncomingCapturedFrame(CreateFrame(1, nullptr));
+ WaitForEncodedFrame(1);
+ // The encoder will have been configured once.
+ EXPECT_EQ(1, sink_.number_of_reconfigurations());
+ EXPECT_EQ(codec_width_, fake_encoder_.config().width);
+ EXPECT_EQ(codec_height_, fake_encoder_.config().height);
+
+ codec_width_ *= 2;
+ codec_height_ *= 2;
+ // Capture a frame with a higher resolution and wait for it to synchronize
+ // with the encoder thread.
+ video_source_.IncomingCapturedFrame(CreateFrame(2, nullptr));
+ WaitForEncodedFrame(2);
+ EXPECT_EQ(codec_width_, fake_encoder_.config().width);
+ EXPECT_EQ(codec_height_, fake_encoder_.config().height);
+ EXPECT_EQ(2, sink_.number_of_reconfigurations());
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest,
+ EncoderInstanceDestroyedBeforeAnotherInstanceCreated) {
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kTargetBitrate, kTargetBitrate, kTargetBitrate, 0, 0, 0);
+
+ // Capture a frame and wait for it to synchronize with the encoder thread.
+ video_source_.IncomingCapturedFrame(CreateFrame(1, nullptr));
+ WaitForEncodedFrame(1);
+
+ VideoEncoderConfig video_encoder_config;
+ test::FillEncoderConfiguration(kVideoCodecVP8, 1, &video_encoder_config);
+ // Changing the max payload data length recreates encoder.
+ video_stream_encoder_->ConfigureEncoder(std::move(video_encoder_config),
+ kMaxPayloadLength / 2);
+
+ // Capture a frame and wait for it to synchronize with the encoder thread.
+ video_source_.IncomingCapturedFrame(CreateFrame(2, nullptr));
+ WaitForEncodedFrame(2);
+ EXPECT_EQ(1, encoder_factory_.GetMaxNumberOfSimultaneousEncoderInstances());
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest, BitrateLimitsChangeReconfigureRateAllocator) {
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kTargetBitrate, kTargetBitrate, kTargetBitrate, 0, 0, 0);
+
+ VideoEncoderConfig video_encoder_config;
+ test::FillEncoderConfiguration(kVideoCodecVP8, 1, &video_encoder_config);
+ video_encoder_config.max_bitrate_bps = kTargetBitrate.bps();
+ video_stream_encoder_->SetStartBitrate(kStartBitrate.bps());
+ video_stream_encoder_->ConfigureEncoder(video_encoder_config.Copy(),
+ kMaxPayloadLength);
+
+ // Capture a frame and wait for it to synchronize with the encoder thread.
+ video_source_.IncomingCapturedFrame(CreateFrame(1, nullptr));
+ WaitForEncodedFrame(1);
+ // The encoder will have been configured once when the first frame is
+ // received.
+ EXPECT_EQ(1, sink_.number_of_reconfigurations());
+ EXPECT_EQ(kTargetBitrate.bps(),
+ bitrate_allocator_factory_.codec_config().maxBitrate * 1000);
+ EXPECT_EQ(kStartBitrate.bps(),
+ bitrate_allocator_factory_.codec_config().startBitrate * 1000);
+
+ test::FillEncoderConfiguration(kVideoCodecVP8, 1,
+ &video_encoder_config); //???
+ video_encoder_config.max_bitrate_bps = kTargetBitrate.bps() * 2;
+ video_stream_encoder_->SetStartBitrate(kStartBitrate.bps() * 2);
+ video_stream_encoder_->ConfigureEncoder(std::move(video_encoder_config),
+ kMaxPayloadLength);
+
+ // Capture a frame and wait for it to synchronize with the encoder thread.
+ video_source_.IncomingCapturedFrame(CreateFrame(2, nullptr));
+ WaitForEncodedFrame(2);
+ EXPECT_EQ(2, sink_.number_of_reconfigurations());
+ // Bitrate limits have changed - rate allocator should be reconfigured,
+ // encoder should not be reconfigured.
+ EXPECT_EQ(kTargetBitrate.bps() * 2,
+ bitrate_allocator_factory_.codec_config().maxBitrate * 1000);
+ EXPECT_EQ(kStartBitrate.bps() * 2,
+ bitrate_allocator_factory_.codec_config().startBitrate * 1000);
+ EXPECT_EQ(1, fake_encoder_.GetNumInitializations());
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest,
+ IntersectionOfEncoderAndAppBitrateLimitsUsedWhenBothProvided) {
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kTargetBitrate, kTargetBitrate, kTargetBitrate, 0, 0, 0);
+
+ const uint32_t kMinEncBitrateKbps = 100;
+ const uint32_t kMaxEncBitrateKbps = 1000;
+ const VideoEncoder::ResolutionBitrateLimits encoder_bitrate_limits(
+ /*frame_size_pixels=*/codec_width_ * codec_height_,
+ /*min_start_bitrate_bps=*/0,
+ /*min_bitrate_bps=*/kMinEncBitrateKbps * 1000,
+ /*max_bitrate_bps=*/kMaxEncBitrateKbps * 1000);
+ fake_encoder_.SetResolutionBitrateLimits({encoder_bitrate_limits});
+
+ VideoEncoderConfig video_encoder_config;
+ test::FillEncoderConfiguration(kVideoCodecVP8, 1, &video_encoder_config);
+ video_encoder_config.max_bitrate_bps = (kMaxEncBitrateKbps + 1) * 1000;
+ video_encoder_config.simulcast_layers[0].min_bitrate_bps =
+ (kMinEncBitrateKbps + 1) * 1000;
+ video_stream_encoder_->ConfigureEncoder(video_encoder_config.Copy(),
+ kMaxPayloadLength);
+
+ // When both encoder and app provide bitrate limits, the intersection of
+ // provided sets should be used.
+ video_source_.IncomingCapturedFrame(CreateFrame(1, nullptr));
+ WaitForEncodedFrame(1);
+ EXPECT_EQ(kMaxEncBitrateKbps,
+ bitrate_allocator_factory_.codec_config().maxBitrate);
+ EXPECT_EQ(kMinEncBitrateKbps + 1,
+ bitrate_allocator_factory_.codec_config().minBitrate);
+
+ video_encoder_config.max_bitrate_bps = (kMaxEncBitrateKbps - 1) * 1000;
+ video_encoder_config.simulcast_layers[0].min_bitrate_bps =
+ (kMinEncBitrateKbps - 1) * 1000;
+ video_stream_encoder_->ConfigureEncoder(video_encoder_config.Copy(),
+ kMaxPayloadLength);
+ video_source_.IncomingCapturedFrame(CreateFrame(2, nullptr));
+ WaitForEncodedFrame(2);
+ EXPECT_EQ(kMaxEncBitrateKbps - 1,
+ bitrate_allocator_factory_.codec_config().maxBitrate);
+ EXPECT_EQ(kMinEncBitrateKbps,
+ bitrate_allocator_factory_.codec_config().minBitrate);
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest,
+ EncoderAndAppLimitsDontIntersectEncoderLimitsIgnored) {
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kTargetBitrate, kTargetBitrate, kTargetBitrate, 0, 0, 0);
+
+ const uint32_t kMinAppBitrateKbps = 100;
+ const uint32_t kMaxAppBitrateKbps = 200;
+ const uint32_t kMinEncBitrateKbps = kMaxAppBitrateKbps + 1;
+ const uint32_t kMaxEncBitrateKbps = kMaxAppBitrateKbps * 2;
+ const VideoEncoder::ResolutionBitrateLimits encoder_bitrate_limits(
+ /*frame_size_pixels=*/codec_width_ * codec_height_,
+ /*min_start_bitrate_bps=*/0,
+ /*min_bitrate_bps=*/kMinEncBitrateKbps * 1000,
+ /*max_bitrate_bps=*/kMaxEncBitrateKbps * 1000);
+ fake_encoder_.SetResolutionBitrateLimits({encoder_bitrate_limits});
+
+ VideoEncoderConfig video_encoder_config;
+ test::FillEncoderConfiguration(kVideoCodecVP8, 1, &video_encoder_config);
+ video_encoder_config.max_bitrate_bps = kMaxAppBitrateKbps * 1000;
+ video_encoder_config.simulcast_layers[0].min_bitrate_bps =
+ kMinAppBitrateKbps * 1000;
+ video_stream_encoder_->ConfigureEncoder(video_encoder_config.Copy(),
+ kMaxPayloadLength);
+
+ video_source_.IncomingCapturedFrame(CreateFrame(1, nullptr));
+ WaitForEncodedFrame(1);
+ EXPECT_EQ(kMaxAppBitrateKbps,
+ bitrate_allocator_factory_.codec_config().maxBitrate);
+ EXPECT_EQ(kMinAppBitrateKbps,
+ bitrate_allocator_factory_.codec_config().minBitrate);
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest,
+ EncoderRecommendedMaxAndMinBitratesUsedForGivenResolution) {
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kTargetBitrate, kTargetBitrate, kTargetBitrate, 0, 0, 0);
+
+ const VideoEncoder::ResolutionBitrateLimits encoder_bitrate_limits_270p(
+ 480 * 270, 34 * 1000, 12 * 1000, 1234 * 1000);
+ const VideoEncoder::ResolutionBitrateLimits encoder_bitrate_limits_360p(
+ 640 * 360, 43 * 1000, 21 * 1000, 2345 * 1000);
+ fake_encoder_.SetResolutionBitrateLimits(
+ {encoder_bitrate_limits_270p, encoder_bitrate_limits_360p});
+
+ VideoEncoderConfig video_encoder_config;
+ test::FillEncoderConfiguration(kVideoCodecVP8, 1, &video_encoder_config);
+ video_encoder_config.max_bitrate_bps = 0;
+ video_stream_encoder_->ConfigureEncoder(video_encoder_config.Copy(),
+ kMaxPayloadLength);
+
+ // 270p. The bitrate limits recommended by encoder for 270p should be used.
+ video_source_.IncomingCapturedFrame(CreateFrame(1, 480, 270));
+ WaitForEncodedFrame(1);
+ EXPECT_EQ(static_cast<uint32_t>(encoder_bitrate_limits_270p.min_bitrate_bps),
+ bitrate_allocator_factory_.codec_config().minBitrate * 1000);
+ EXPECT_EQ(static_cast<uint32_t>(encoder_bitrate_limits_270p.max_bitrate_bps),
+ bitrate_allocator_factory_.codec_config().maxBitrate * 1000);
+
+ // 360p. The bitrate limits recommended by encoder for 360p should be used.
+ video_source_.IncomingCapturedFrame(CreateFrame(2, 640, 360));
+ WaitForEncodedFrame(2);
+ EXPECT_EQ(static_cast<uint32_t>(encoder_bitrate_limits_360p.min_bitrate_bps),
+ bitrate_allocator_factory_.codec_config().minBitrate * 1000);
+ EXPECT_EQ(static_cast<uint32_t>(encoder_bitrate_limits_360p.max_bitrate_bps),
+ bitrate_allocator_factory_.codec_config().maxBitrate * 1000);
+
+ // Resolution between 270p and 360p. The bitrate limits recommended by
+ // encoder for 360p should be used.
+ video_source_.IncomingCapturedFrame(
+ CreateFrame(3, (640 + 480) / 2, (360 + 270) / 2));
+ WaitForEncodedFrame(3);
+ EXPECT_EQ(static_cast<uint32_t>(encoder_bitrate_limits_360p.min_bitrate_bps),
+ bitrate_allocator_factory_.codec_config().minBitrate * 1000);
+ EXPECT_EQ(static_cast<uint32_t>(encoder_bitrate_limits_360p.max_bitrate_bps),
+ bitrate_allocator_factory_.codec_config().maxBitrate * 1000);
+
+ // Resolution higher than 360p. The caps recommended by encoder should be
+ // ignored.
+ video_source_.IncomingCapturedFrame(CreateFrame(4, 960, 540));
+ WaitForEncodedFrame(4);
+ EXPECT_NE(static_cast<uint32_t>(encoder_bitrate_limits_270p.min_bitrate_bps),
+ bitrate_allocator_factory_.codec_config().minBitrate * 1000);
+ EXPECT_NE(static_cast<uint32_t>(encoder_bitrate_limits_270p.max_bitrate_bps),
+ bitrate_allocator_factory_.codec_config().maxBitrate * 1000);
+ EXPECT_NE(static_cast<uint32_t>(encoder_bitrate_limits_360p.min_bitrate_bps),
+ bitrate_allocator_factory_.codec_config().minBitrate * 1000);
+ EXPECT_NE(static_cast<uint32_t>(encoder_bitrate_limits_360p.max_bitrate_bps),
+ bitrate_allocator_factory_.codec_config().maxBitrate * 1000);
+
+ // Resolution lower than 270p. The max bitrate limit recommended by encoder
+ // for 270p should be used.
+ video_source_.IncomingCapturedFrame(CreateFrame(5, 320, 180));
+ WaitForEncodedFrame(5);
+ EXPECT_EQ(static_cast<uint32_t>(encoder_bitrate_limits_270p.min_bitrate_bps),
+ bitrate_allocator_factory_.codec_config().minBitrate * 1000);
+ EXPECT_EQ(static_cast<uint32_t>(encoder_bitrate_limits_270p.max_bitrate_bps),
+ bitrate_allocator_factory_.codec_config().maxBitrate * 1000);
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest, EncoderRecommendedMaxBitrateCapsTargetBitrate) {
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kTargetBitrate, kTargetBitrate, kTargetBitrate, 0, 0, 0);
+
+ VideoEncoderConfig video_encoder_config;
+ test::FillEncoderConfiguration(kVideoCodecVP8, 1, &video_encoder_config);
+ video_encoder_config.max_bitrate_bps = 0;
+ video_stream_encoder_->ConfigureEncoder(video_encoder_config.Copy(),
+ kMaxPayloadLength);
+
+ // Encode 720p frame to get the default encoder target bitrate.
+ video_source_.IncomingCapturedFrame(CreateFrame(1, 1280, 720));
+ WaitForEncodedFrame(1);
+ const uint32_t kDefaultTargetBitrateFor720pKbps =
+ bitrate_allocator_factory_.codec_config()
+ .simulcastStream[0]
+ .targetBitrate;
+
+ // Set the max recommended encoder bitrate to something lower than the default
+ // target bitrate.
+ const VideoEncoder::ResolutionBitrateLimits encoder_bitrate_limits(
+ 1280 * 720, 10 * 1000, 10 * 1000,
+ kDefaultTargetBitrateFor720pKbps / 2 * 1000);
+ fake_encoder_.SetResolutionBitrateLimits({encoder_bitrate_limits});
+
+ // Change resolution to trigger encoder reinitialization.
+ video_source_.IncomingCapturedFrame(CreateFrame(2, 640, 360));
+ WaitForEncodedFrame(2);
+ video_source_.IncomingCapturedFrame(CreateFrame(3, 1280, 720));
+ WaitForEncodedFrame(3);
+
+ // Ensure the target bitrate is capped by the max bitrate.
+ EXPECT_EQ(bitrate_allocator_factory_.codec_config().maxBitrate * 1000,
+ static_cast<uint32_t>(encoder_bitrate_limits.max_bitrate_bps));
+ EXPECT_EQ(bitrate_allocator_factory_.codec_config()
+ .simulcastStream[0]
+ .targetBitrate *
+ 1000,
+ static_cast<uint32_t>(encoder_bitrate_limits.max_bitrate_bps));
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest,
+ EncoderMaxAndMinBitratesUsedForTwoStreamsHighestActive) {
+ const VideoEncoder::ResolutionBitrateLimits kEncoderLimits270p(
+ 480 * 270, 34 * 1000, 12 * 1000, 1234 * 1000);
+ const VideoEncoder::ResolutionBitrateLimits kEncoderLimits360p(
+ 640 * 360, 43 * 1000, 21 * 1000, 2345 * 1000);
+ fake_encoder_.SetResolutionBitrateLimits(
+ {kEncoderLimits270p, kEncoderLimits360p});
+
+ // Two streams, highest stream active.
+ VideoEncoderConfig config;
+ webrtc::VideoEncoder::EncoderInfo encoder_info;
+ const int kNumStreams = 2;
+ test::FillEncoderConfiguration(kVideoCodecVP8, kNumStreams, &config);
+ config.max_bitrate_bps = 0;
+ config.simulcast_layers[0].active = false;
+ config.simulcast_layers[1].active = true;
+ config.video_stream_factory =
+ rtc::make_ref_counted<cricket::EncoderStreamFactory>(
+ "VP8", /*max qp*/ 56, /*screencast*/ false,
+ /*screenshare enabled*/ false, encoder_info);
+ video_stream_encoder_->ConfigureEncoder(config.Copy(), kMaxPayloadLength);
+
+ // The encoder bitrate limits for 270p should be used.
+ video_source_.IncomingCapturedFrame(CreateFrame(1, 480, 270));
+ video_stream_encoder_->WaitUntilTaskQueueIsIdle();
+ EXPECT_EQ(fake_encoder_.config().numberOfSimulcastStreams, kNumStreams);
+ EXPECT_EQ(static_cast<uint32_t>(kEncoderLimits270p.min_bitrate_bps),
+ fake_encoder_.config().simulcastStream[1].minBitrate * 1000);
+ EXPECT_EQ(static_cast<uint32_t>(kEncoderLimits270p.max_bitrate_bps),
+ fake_encoder_.config().simulcastStream[1].maxBitrate * 1000);
+
+ // The encoder bitrate limits for 360p should be used.
+ video_source_.IncomingCapturedFrame(CreateFrame(2, 640, 360));
+ video_stream_encoder_->WaitUntilTaskQueueIsIdle();
+ EXPECT_EQ(static_cast<uint32_t>(kEncoderLimits360p.min_bitrate_bps),
+ fake_encoder_.config().simulcastStream[1].minBitrate * 1000);
+ EXPECT_EQ(static_cast<uint32_t>(kEncoderLimits360p.max_bitrate_bps),
+ fake_encoder_.config().simulcastStream[1].maxBitrate * 1000);
+
+ // Resolution b/w 270p and 360p. The encoder limits for 360p should be used.
+ video_source_.IncomingCapturedFrame(
+ CreateFrame(3, (640 + 480) / 2, (360 + 270) / 2));
+ video_stream_encoder_->WaitUntilTaskQueueIsIdle();
+ EXPECT_EQ(static_cast<uint32_t>(kEncoderLimits360p.min_bitrate_bps),
+ fake_encoder_.config().simulcastStream[1].minBitrate * 1000);
+ EXPECT_EQ(static_cast<uint32_t>(kEncoderLimits360p.max_bitrate_bps),
+ fake_encoder_.config().simulcastStream[1].maxBitrate * 1000);
+
+ // Resolution higher than 360p. Encoder limits should be ignored.
+ video_source_.IncomingCapturedFrame(CreateFrame(4, 960, 540));
+ video_stream_encoder_->WaitUntilTaskQueueIsIdle();
+ EXPECT_NE(static_cast<uint32_t>(kEncoderLimits270p.min_bitrate_bps),
+ fake_encoder_.config().simulcastStream[1].minBitrate * 1000);
+ EXPECT_NE(static_cast<uint32_t>(kEncoderLimits270p.max_bitrate_bps),
+ fake_encoder_.config().simulcastStream[1].maxBitrate * 1000);
+ EXPECT_NE(static_cast<uint32_t>(kEncoderLimits360p.min_bitrate_bps),
+ fake_encoder_.config().simulcastStream[1].minBitrate * 1000);
+ EXPECT_NE(static_cast<uint32_t>(kEncoderLimits360p.max_bitrate_bps),
+ fake_encoder_.config().simulcastStream[1].maxBitrate * 1000);
+
+ // Resolution lower than 270p. The encoder limits for 270p should be used.
+ video_source_.IncomingCapturedFrame(CreateFrame(5, 320, 180));
+ video_stream_encoder_->WaitUntilTaskQueueIsIdle();
+ EXPECT_EQ(static_cast<uint32_t>(kEncoderLimits270p.min_bitrate_bps),
+ fake_encoder_.config().simulcastStream[1].minBitrate * 1000);
+ EXPECT_EQ(static_cast<uint32_t>(kEncoderLimits270p.max_bitrate_bps),
+ fake_encoder_.config().simulcastStream[1].maxBitrate * 1000);
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest,
+ DefaultEncoderMaxAndMinBitratesUsedForTwoStreamsHighestActive) {
+ // Two streams, highest stream active.
+ VideoEncoderConfig config;
+ webrtc::VideoEncoder::EncoderInfo encoder_info;
+ const int kNumStreams = 2;
+ test::FillEncoderConfiguration(kVideoCodecVP8, kNumStreams, &config);
+ config.max_bitrate_bps = 0;
+ config.simulcast_layers[0].active = false;
+ config.simulcast_layers[1].active = true;
+ config.video_stream_factory =
+ rtc::make_ref_counted<cricket::EncoderStreamFactory>(
+ "VP8", /*max qp*/ 56, /*screencast*/ false,
+ /*screenshare enabled*/ false, encoder_info);
+ video_stream_encoder_->ConfigureEncoder(config.Copy(), kMaxPayloadLength);
+
+ // Default bitrate limits for 270p should be used.
+ const absl::optional<VideoEncoder::ResolutionBitrateLimits>
+ kDefaultLimits270p =
+ EncoderInfoSettings::GetDefaultSinglecastBitrateLimitsForResolution(
+ kVideoCodecVP8, 480 * 270);
+ video_source_.IncomingCapturedFrame(CreateFrame(1, 480, 270));
+ video_stream_encoder_->WaitUntilTaskQueueIsIdle();
+ EXPECT_EQ(fake_encoder_.config().numberOfSimulcastStreams, kNumStreams);
+ EXPECT_EQ(static_cast<uint32_t>(kDefaultLimits270p->min_bitrate_bps),
+ fake_encoder_.config().simulcastStream[1].minBitrate * 1000);
+ EXPECT_EQ(static_cast<uint32_t>(kDefaultLimits270p->max_bitrate_bps),
+ fake_encoder_.config().simulcastStream[1].maxBitrate * 1000);
+
+ // Default bitrate limits for 360p should be used.
+ const absl::optional<VideoEncoder::ResolutionBitrateLimits>
+ kDefaultLimits360p =
+ EncoderInfoSettings::GetDefaultSinglecastBitrateLimitsForResolution(
+ kVideoCodecVP8, 640 * 360);
+ video_source_.IncomingCapturedFrame(CreateFrame(2, 640, 360));
+ video_stream_encoder_->WaitUntilTaskQueueIsIdle();
+ EXPECT_EQ(static_cast<uint32_t>(kDefaultLimits360p->min_bitrate_bps),
+ fake_encoder_.config().simulcastStream[1].minBitrate * 1000);
+ EXPECT_EQ(static_cast<uint32_t>(kDefaultLimits360p->max_bitrate_bps),
+ fake_encoder_.config().simulcastStream[1].maxBitrate * 1000);
+
+ // Resolution b/w 270p and 360p. The default limits for 360p should be used.
+ video_source_.IncomingCapturedFrame(
+ CreateFrame(3, (640 + 480) / 2, (360 + 270) / 2));
+ video_stream_encoder_->WaitUntilTaskQueueIsIdle();
+ EXPECT_EQ(static_cast<uint32_t>(kDefaultLimits360p->min_bitrate_bps),
+ fake_encoder_.config().simulcastStream[1].minBitrate * 1000);
+ EXPECT_EQ(static_cast<uint32_t>(kDefaultLimits360p->max_bitrate_bps),
+ fake_encoder_.config().simulcastStream[1].maxBitrate * 1000);
+
+ // Default bitrate limits for 540p should be used.
+ const absl::optional<VideoEncoder::ResolutionBitrateLimits>
+ kDefaultLimits540p =
+ EncoderInfoSettings::GetDefaultSinglecastBitrateLimitsForResolution(
+ kVideoCodecVP8, 960 * 540);
+ video_source_.IncomingCapturedFrame(CreateFrame(4, 960, 540));
+ video_stream_encoder_->WaitUntilTaskQueueIsIdle();
+ EXPECT_EQ(static_cast<uint32_t>(kDefaultLimits540p->min_bitrate_bps),
+ fake_encoder_.config().simulcastStream[1].minBitrate * 1000);
+ EXPECT_EQ(static_cast<uint32_t>(kDefaultLimits540p->max_bitrate_bps),
+ fake_encoder_.config().simulcastStream[1].maxBitrate * 1000);
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest,
+ EncoderMaxAndMinBitratesUsedForThreeStreamsMiddleActive) {
+ const VideoEncoder::ResolutionBitrateLimits kEncoderLimits270p(
+ 480 * 270, 34 * 1000, 12 * 1000, 1234 * 1000);
+ const VideoEncoder::ResolutionBitrateLimits kEncoderLimits360p(
+ 640 * 360, 43 * 1000, 21 * 1000, 2345 * 1000);
+ const VideoEncoder::ResolutionBitrateLimits kEncoderLimits720p(
+ 1280 * 720, 54 * 1000, 31 * 1000, 3456 * 1000);
+ fake_encoder_.SetResolutionBitrateLimits(
+ {kEncoderLimits270p, kEncoderLimits360p, kEncoderLimits720p});
+
+ // Three streams, middle stream active.
+ VideoEncoderConfig config;
+ webrtc::VideoEncoder::EncoderInfo encoder_info;
+ const int kNumStreams = 3;
+ test::FillEncoderConfiguration(kVideoCodecVP8, kNumStreams, &config);
+ config.simulcast_layers[0].active = false;
+ config.simulcast_layers[1].active = true;
+ config.simulcast_layers[2].active = false;
+ config.video_stream_factory =
+ rtc::make_ref_counted<cricket::EncoderStreamFactory>(
+ "VP8", /*max qp*/ 56, /*screencast*/ false,
+ /*screenshare enabled*/ false, encoder_info);
+ video_stream_encoder_->ConfigureEncoder(config.Copy(), kMaxPayloadLength);
+
+ // The encoder bitrate limits for 360p should be used.
+ video_source_.IncomingCapturedFrame(CreateFrame(1, 1280, 720));
+ video_stream_encoder_->WaitUntilTaskQueueIsIdle();
+ EXPECT_EQ(fake_encoder_.config().numberOfSimulcastStreams, kNumStreams);
+ EXPECT_EQ(static_cast<uint32_t>(kEncoderLimits360p.min_bitrate_bps),
+ fake_encoder_.config().simulcastStream[1].minBitrate * 1000);
+ EXPECT_EQ(static_cast<uint32_t>(kEncoderLimits360p.max_bitrate_bps),
+ fake_encoder_.config().simulcastStream[1].maxBitrate * 1000);
+
+ // The encoder bitrate limits for 270p should be used.
+ video_source_.IncomingCapturedFrame(CreateFrame(2, 960, 540));
+ video_stream_encoder_->WaitUntilTaskQueueIsIdle();
+ EXPECT_EQ(static_cast<uint32_t>(kEncoderLimits270p.min_bitrate_bps),
+ fake_encoder_.config().simulcastStream[1].minBitrate * 1000);
+ EXPECT_EQ(static_cast<uint32_t>(kEncoderLimits270p.max_bitrate_bps),
+ fake_encoder_.config().simulcastStream[1].maxBitrate * 1000);
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest,
+ EncoderMaxAndMinBitratesNotUsedForThreeStreamsLowestActive) {
+ const VideoEncoder::ResolutionBitrateLimits kEncoderLimits270p(
+ 480 * 270, 34 * 1000, 12 * 1000, 1234 * 1000);
+ const VideoEncoder::ResolutionBitrateLimits kEncoderLimits360p(
+ 640 * 360, 43 * 1000, 21 * 1000, 2345 * 1000);
+ const VideoEncoder::ResolutionBitrateLimits kEncoderLimits720p(
+ 1280 * 720, 54 * 1000, 31 * 1000, 3456 * 1000);
+ fake_encoder_.SetResolutionBitrateLimits(
+ {kEncoderLimits270p, kEncoderLimits360p, kEncoderLimits720p});
+
+ // Three streams, lowest stream active.
+ VideoEncoderConfig config;
+ webrtc::VideoEncoder::EncoderInfo encoder_info;
+ const int kNumStreams = 3;
+ test::FillEncoderConfiguration(kVideoCodecVP8, kNumStreams, &config);
+ config.simulcast_layers[0].active = true;
+ config.simulcast_layers[1].active = false;
+ config.simulcast_layers[2].active = false;
+ config.video_stream_factory =
+ rtc::make_ref_counted<cricket::EncoderStreamFactory>(
+ "VP8", /*max qp*/ 56, /*screencast*/ false,
+ /*screenshare enabled*/ false, encoder_info);
+ video_stream_encoder_->ConfigureEncoder(config.Copy(), kMaxPayloadLength);
+
+ // Resolution on lowest stream lower than 270p. The encoder limits not applied
+ // on lowest stream, limits for 270p should not be used
+ video_source_.IncomingCapturedFrame(CreateFrame(1, 1280, 720));
+ video_stream_encoder_->WaitUntilTaskQueueIsIdle();
+ EXPECT_EQ(fake_encoder_.config().numberOfSimulcastStreams, kNumStreams);
+ EXPECT_NE(static_cast<uint32_t>(kEncoderLimits270p.min_bitrate_bps),
+ fake_encoder_.config().simulcastStream[1].minBitrate * 1000);
+ EXPECT_NE(static_cast<uint32_t>(kEncoderLimits270p.max_bitrate_bps),
+ fake_encoder_.config().simulcastStream[1].maxBitrate * 1000);
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest,
+ EncoderMaxBitrateCappedByConfigForTwoStreamsHighestActive) {
+ const VideoEncoder::ResolutionBitrateLimits kEncoderLimits270p(
+ 480 * 270, 34 * 1000, 12 * 1000, 1234 * 1000);
+ const VideoEncoder::ResolutionBitrateLimits kEncoderLimits360p(
+ 640 * 360, 43 * 1000, 21 * 1000, 2345 * 1000);
+ fake_encoder_.SetResolutionBitrateLimits(
+ {kEncoderLimits270p, kEncoderLimits360p});
+ const int kMaxBitrateBps = kEncoderLimits360p.max_bitrate_bps - 100 * 1000;
+
+ // Two streams, highest stream active.
+ VideoEncoderConfig config;
+ webrtc::VideoEncoder::EncoderInfo encoder_info;
+ const int kNumStreams = 2;
+ test::FillEncoderConfiguration(kVideoCodecVP8, kNumStreams, &config);
+ config.simulcast_layers[0].active = false;
+ config.simulcast_layers[1].active = true;
+ config.simulcast_layers[1].max_bitrate_bps = kMaxBitrateBps;
+ config.video_stream_factory =
+ rtc::make_ref_counted<cricket::EncoderStreamFactory>(
+ "VP8", /*max qp*/ 56, /*screencast*/ false,
+ /*screenshare enabled*/ false, encoder_info);
+ video_stream_encoder_->ConfigureEncoder(config.Copy(), kMaxPayloadLength);
+
+ // The encoder bitrate limits for 270p should be used.
+ video_source_.IncomingCapturedFrame(CreateFrame(1, 480, 270));
+ video_stream_encoder_->WaitUntilTaskQueueIsIdle();
+ EXPECT_EQ(fake_encoder_.config().numberOfSimulcastStreams, kNumStreams);
+ EXPECT_EQ(static_cast<uint32_t>(kEncoderLimits270p.min_bitrate_bps),
+ fake_encoder_.config().simulcastStream[1].minBitrate * 1000);
+ EXPECT_EQ(static_cast<uint32_t>(kEncoderLimits270p.max_bitrate_bps),
+ fake_encoder_.config().simulcastStream[1].maxBitrate * 1000);
+
+ // The max configured bitrate is less than the encoder limit for 360p.
+ video_source_.IncomingCapturedFrame(CreateFrame(2, 640, 360));
+ video_stream_encoder_->WaitUntilTaskQueueIsIdle();
+ EXPECT_EQ(static_cast<uint32_t>(kEncoderLimits360p.min_bitrate_bps),
+ fake_encoder_.config().simulcastStream[1].minBitrate * 1000);
+ EXPECT_EQ(static_cast<uint32_t>(kMaxBitrateBps),
+ fake_encoder_.config().simulcastStream[1].maxBitrate * 1000);
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest, SwitchSourceDeregisterEncoderAsSink) {
+ EXPECT_TRUE(video_source_.has_sinks());
+ test::FrameForwarder new_video_source;
+ video_stream_encoder_->SetSource(
+ &new_video_source, webrtc::DegradationPreference::MAINTAIN_FRAMERATE);
+ EXPECT_FALSE(video_source_.has_sinks());
+ EXPECT_TRUE(new_video_source.has_sinks());
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest, SinkWantsRotationApplied) {
+ EXPECT_FALSE(video_source_.sink_wants().rotation_applied);
+ video_stream_encoder_->SetSink(&sink_, true /*rotation_applied*/);
+ EXPECT_TRUE(video_source_.sink_wants().rotation_applied);
+ video_stream_encoder_->Stop();
+}
+
+class ResolutionAlignmentTest
+ : public VideoStreamEncoderTest,
+ public ::testing::WithParamInterface<
+ ::testing::tuple<int, std::vector<double>>> {
+ public:
+ ResolutionAlignmentTest()
+ : requested_alignment_(::testing::get<0>(GetParam())),
+ scale_factors_(::testing::get<1>(GetParam())) {}
+
+ protected:
+ const uint32_t requested_alignment_;
+ const std::vector<double> scale_factors_;
+};
+
+INSTANTIATE_TEST_SUITE_P(
+ AlignmentAndScaleFactors,
+ ResolutionAlignmentTest,
+ ::testing::Combine(
+ ::testing::Values(1, 2, 3, 4, 5, 6, 16, 22), // requested_alignment_
+ ::testing::Values(std::vector<double>{-1.0}, // scale_factors_
+ std::vector<double>{-1.0, -1.0},
+ std::vector<double>{-1.0, -1.0, -1.0},
+ std::vector<double>{4.0, 2.0, 1.0},
+ std::vector<double>{9999.0, -1.0, 1.0},
+ std::vector<double>{3.99, 2.01, 1.0},
+ std::vector<double>{4.9, 1.7, 1.25},
+ std::vector<double>{10.0, 4.0, 3.0},
+ std::vector<double>{1.75, 3.5},
+ std::vector<double>{1.5, 2.5},
+ std::vector<double>{1.3, 1.0})));
+
+TEST_P(ResolutionAlignmentTest, SinkWantsAlignmentApplied) {
+ // Set requested resolution alignment.
+ video_source_.set_adaptation_enabled(true);
+ fake_encoder_.SetRequestedResolutionAlignment(requested_alignment_);
+ fake_encoder_.SetApplyAlignmentToAllSimulcastLayers(true);
+
+ // Fill config with the scaling factor by which to reduce encoding size.
+ const int num_streams = scale_factors_.size();
+ VideoEncoderConfig config;
+ webrtc::VideoEncoder::EncoderInfo encoder_info;
+ test::FillEncoderConfiguration(kVideoCodecVP8, num_streams, &config);
+ for (int i = 0; i < num_streams; ++i) {
+ config.simulcast_layers[i].scale_resolution_down_by = scale_factors_[i];
+ }
+ config.video_stream_factory =
+ rtc::make_ref_counted<cricket::EncoderStreamFactory>(
+ "VP8", /*max qp*/ 56, /*screencast*/ false,
+ /*screenshare enabled*/ false, encoder_info);
+ video_stream_encoder_->ConfigureEncoder(std::move(config), kMaxPayloadLength);
+
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kSimulcastTargetBitrate, kSimulcastTargetBitrate, kSimulcastTargetBitrate,
+ 0, 0, 0);
+ // Wait for all layers before triggering event.
+ sink_.SetNumExpectedLayers(num_streams);
+
+ // On the 1st frame, we should have initialized the encoder and
+ // asked for its resolution requirements.
+ int64_t timestamp_ms = kFrameIntervalMs;
+ video_source_.IncomingCapturedFrame(CreateFrame(timestamp_ms, 1280, 720));
+ WaitForEncodedFrame(timestamp_ms);
+ EXPECT_EQ(1, fake_encoder_.GetNumInitializations());
+
+ // On the 2nd frame, we should be receiving a correctly aligned resolution.
+ // (It's up the to the encoder to potentially drop the previous frame,
+ // to avoid coding back-to-back keyframes.)
+ timestamp_ms += kFrameIntervalMs;
+ video_source_.IncomingCapturedFrame(CreateFrame(timestamp_ms, 1280, 720));
+ WaitForEncodedFrame(timestamp_ms);
+ EXPECT_GE(fake_encoder_.GetNumInitializations(), 1);
+
+ VideoCodec codec = fake_encoder_.config();
+ EXPECT_EQ(codec.numberOfSimulcastStreams, num_streams);
+ // Frame size should be a multiple of the requested alignment.
+ for (int i = 0; i < codec.numberOfSimulcastStreams; ++i) {
+ EXPECT_EQ(codec.simulcastStream[i].width % requested_alignment_, 0u);
+ EXPECT_EQ(codec.simulcastStream[i].height % requested_alignment_, 0u);
+ // Aspect ratio should match.
+ EXPECT_EQ(codec.width * codec.simulcastStream[i].height,
+ codec.height * codec.simulcastStream[i].width);
+ }
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest, TestCpuDowngrades_BalancedMode) {
+ const int kFramerateFps = 30;
+ const int kWidth = 1280;
+ const int kHeight = 720;
+
+ // We rely on the automatic resolution adaptation, but we handle framerate
+ // adaptation manually by mocking the stats proxy.
+ video_source_.set_adaptation_enabled(true);
+
+ // Enable BALANCED preference, no initial limitation.
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kTargetBitrate, kTargetBitrate, kTargetBitrate, 0, 0, 0);
+ video_stream_encoder_->SetSource(&video_source_,
+ webrtc::DegradationPreference::BALANCED);
+ EXPECT_THAT(video_source_.sink_wants(), UnlimitedSinkWants());
+ EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_resolution);
+ EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_framerate);
+ EXPECT_EQ(0, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
+
+ // Adapt down as far as possible.
+ rtc::VideoSinkWants last_wants;
+ int64_t t = 1;
+ int loop_count = 0;
+ do {
+ ++loop_count;
+ last_wants = video_source_.sink_wants();
+
+ // Simulate the framerate we've been asked to adapt to.
+ const int fps = std::min(kFramerateFps, last_wants.max_framerate_fps);
+ const int frame_interval_ms = rtc::kNumMillisecsPerSec / fps;
+ VideoSendStream::Stats mock_stats = stats_proxy_->GetStats();
+ mock_stats.input_frame_rate = fps;
+ stats_proxy_->SetMockStats(mock_stats);
+
+ video_source_.IncomingCapturedFrame(CreateFrame(t, kWidth, kHeight));
+ sink_.WaitForEncodedFrame(t);
+ t += frame_interval_ms;
+
+ video_stream_encoder_->TriggerCpuOveruse();
+ EXPECT_THAT(
+ video_source_.sink_wants(),
+ FpsInRangeForPixelsInBalanced(*video_source_.last_sent_width() *
+ *video_source_.last_sent_height()));
+ } while (video_source_.sink_wants().max_pixel_count <
+ last_wants.max_pixel_count ||
+ video_source_.sink_wants().max_framerate_fps <
+ last_wants.max_framerate_fps);
+
+ // Verify that we've adapted all the way down.
+ stats_proxy_->ResetMockStats();
+ EXPECT_TRUE(stats_proxy_->GetStats().cpu_limited_resolution);
+ EXPECT_TRUE(stats_proxy_->GetStats().cpu_limited_framerate);
+ EXPECT_EQ(loop_count - 1,
+ stats_proxy_->GetStats().number_of_cpu_adapt_changes);
+ EXPECT_EQ(kMinPixelsPerFrame, *video_source_.last_sent_width() *
+ *video_source_.last_sent_height());
+ EXPECT_EQ(kMinBalancedFramerateFps,
+ video_source_.sink_wants().max_framerate_fps);
+
+ // Adapt back up the same number of times we adapted down.
+ for (int i = 0; i < loop_count - 1; ++i) {
+ last_wants = video_source_.sink_wants();
+
+ // Simulate the framerate we've been asked to adapt to.
+ const int fps = std::min(kFramerateFps, last_wants.max_framerate_fps);
+ const int frame_interval_ms = rtc::kNumMillisecsPerSec / fps;
+ VideoSendStream::Stats mock_stats = stats_proxy_->GetStats();
+ mock_stats.input_frame_rate = fps;
+ stats_proxy_->SetMockStats(mock_stats);
+
+ video_source_.IncomingCapturedFrame(CreateFrame(t, kWidth, kHeight));
+ sink_.WaitForEncodedFrame(t);
+ t += frame_interval_ms;
+
+ video_stream_encoder_->TriggerCpuUnderuse();
+ EXPECT_THAT(
+ video_source_.sink_wants(),
+ FpsInRangeForPixelsInBalanced(*video_source_.last_sent_width() *
+ *video_source_.last_sent_height()));
+ EXPECT_TRUE(video_source_.sink_wants().max_pixel_count >
+ last_wants.max_pixel_count ||
+ video_source_.sink_wants().max_framerate_fps >
+ last_wants.max_framerate_fps);
+ }
+
+ EXPECT_THAT(video_source_.sink_wants(), FpsMaxResolutionMax());
+ stats_proxy_->ResetMockStats();
+ EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_resolution);
+ EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_framerate);
+ EXPECT_EQ((loop_count - 1) * 2,
+ stats_proxy_->GetStats().number_of_cpu_adapt_changes);
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest,
+ SinkWantsNotChangedByResourceLimitedBeforeDegradationPreferenceChange) {
+ video_stream_encoder_->OnBitrateUpdated(kTargetBitrate, kTargetBitrate,
+ kTargetBitrate, 0, 0, 0);
+ EXPECT_THAT(video_source_.sink_wants(), UnlimitedSinkWants());
+
+ const int kFrameWidth = 1280;
+ const int kFrameHeight = 720;
+
+ int64_t ntp_time = kFrameIntervalMs;
+
+ // Force an input frame rate to be available, or the adaptation call won't
+ // know what framerate to adapt form.
+ const int kInputFps = 30;
+ VideoSendStream::Stats stats = stats_proxy_->GetStats();
+ stats.input_frame_rate = kInputFps;
+ stats_proxy_->SetMockStats(stats);
+
+ video_source_.set_adaptation_enabled(true);
+ video_stream_encoder_->SetSource(
+ &video_source_, webrtc::DegradationPreference::MAINTAIN_RESOLUTION);
+ EXPECT_THAT(video_source_.sink_wants(), UnlimitedSinkWants());
+ video_source_.IncomingCapturedFrame(
+ CreateFrame(ntp_time, kFrameWidth, kFrameHeight));
+ sink_.WaitForEncodedFrame(ntp_time);
+ ntp_time += kFrameIntervalMs;
+
+ // Trigger CPU overuse.
+ video_stream_encoder_->TriggerCpuOveruse();
+ video_source_.IncomingCapturedFrame(
+ CreateFrame(ntp_time, kFrameWidth, kFrameHeight));
+ sink_.WaitForEncodedFrame(ntp_time);
+ ntp_time += kFrameIntervalMs;
+
+ EXPECT_FALSE(video_source_.sink_wants().target_pixel_count);
+ EXPECT_EQ(std::numeric_limits<int>::max(),
+ video_source_.sink_wants().max_pixel_count);
+ // Some framerate constraint should be set.
+ int restricted_fps = video_source_.sink_wants().max_framerate_fps;
+ EXPECT_LT(restricted_fps, kInputFps);
+ video_source_.IncomingCapturedFrame(
+ CreateFrame(ntp_time, kFrameWidth, kFrameHeight));
+ sink_.WaitForEncodedFrame(ntp_time);
+ ntp_time += 100;
+
+ video_stream_encoder_->SetSourceAndWaitForRestrictionsUpdated(
+ &video_source_, webrtc::DegradationPreference::MAINTAIN_FRAMERATE);
+ // Give the encoder queue time to process the change in degradation preference
+ // by waiting for an encoded frame.
+ video_source_.IncomingCapturedFrame(
+ CreateFrame(ntp_time, kFrameWidth, kFrameHeight));
+ sink_.WaitForEncodedFrame(ntp_time);
+ ntp_time += kFrameIntervalMs;
+
+ video_stream_encoder_->TriggerQualityLow();
+ video_source_.IncomingCapturedFrame(
+ CreateFrame(ntp_time, kFrameWidth, kFrameHeight));
+ sink_.WaitForEncodedFrame(ntp_time);
+ ntp_time += kFrameIntervalMs;
+
+ // Some resolution constraint should be set.
+ EXPECT_FALSE(video_source_.sink_wants().target_pixel_count);
+ EXPECT_LT(video_source_.sink_wants().max_pixel_count,
+ kFrameWidth * kFrameHeight);
+ EXPECT_EQ(video_source_.sink_wants().max_framerate_fps, kInputFps);
+
+ int pixel_count = video_source_.sink_wants().max_pixel_count;
+ // Triggering a CPU underuse should not change the sink wants since it has
+ // not been overused for resolution since we changed degradation preference.
+ video_stream_encoder_->TriggerCpuUnderuse();
+ video_source_.IncomingCapturedFrame(
+ CreateFrame(ntp_time, kFrameWidth, kFrameHeight));
+ sink_.WaitForEncodedFrame(ntp_time);
+ ntp_time += kFrameIntervalMs;
+ EXPECT_EQ(video_source_.sink_wants().max_pixel_count, pixel_count);
+ EXPECT_EQ(video_source_.sink_wants().max_framerate_fps, kInputFps);
+
+ // Change the degradation preference back. CPU underuse should not adapt since
+ // QP is most limited.
+ video_stream_encoder_->SetSourceAndWaitForRestrictionsUpdated(
+ &video_source_, webrtc::DegradationPreference::MAINTAIN_RESOLUTION);
+ video_source_.IncomingCapturedFrame(
+ CreateFrame(ntp_time, kFrameWidth, kFrameHeight));
+ sink_.WaitForEncodedFrame(ntp_time);
+ ntp_time += 100;
+ // Resolution adaptations is gone after changing degradation preference.
+ EXPECT_FALSE(video_source_.sink_wants().target_pixel_count);
+ EXPECT_EQ(std::numeric_limits<int>::max(),
+ video_source_.sink_wants().max_pixel_count);
+ // The fps adaptation from above is now back.
+ EXPECT_EQ(video_source_.sink_wants().max_framerate_fps, restricted_fps);
+
+ // Trigger CPU underuse.
+ video_stream_encoder_->TriggerCpuUnderuse();
+ video_source_.IncomingCapturedFrame(
+ CreateFrame(ntp_time, kFrameWidth, kFrameHeight));
+ sink_.WaitForEncodedFrame(ntp_time);
+ ntp_time += kFrameIntervalMs;
+ EXPECT_EQ(video_source_.sink_wants().max_framerate_fps, restricted_fps);
+
+ // Trigger QP underuse, fps should return to normal.
+ video_stream_encoder_->TriggerQualityHigh();
+ video_source_.IncomingCapturedFrame(
+ CreateFrame(ntp_time, kFrameWidth, kFrameHeight));
+ sink_.WaitForEncodedFrame(ntp_time);
+ ntp_time += kFrameIntervalMs;
+ EXPECT_THAT(video_source_.sink_wants(), FpsMax());
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest, SinkWantsStoredByDegradationPreference) {
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kTargetBitrate, kTargetBitrate, kTargetBitrate, 0, 0, 0);
+ EXPECT_THAT(video_source_.sink_wants(), UnlimitedSinkWants());
+
+ const int kFrameWidth = 1280;
+ const int kFrameHeight = 720;
+
+ int64_t frame_timestamp = 1;
+
+ video_source_.IncomingCapturedFrame(
+ CreateFrame(frame_timestamp, kFrameWidth, kFrameHeight));
+ WaitForEncodedFrame(frame_timestamp);
+ frame_timestamp += kFrameIntervalMs;
+
+ // Trigger CPU overuse.
+ video_stream_encoder_->TriggerCpuOveruse();
+ video_source_.IncomingCapturedFrame(
+ CreateFrame(frame_timestamp, kFrameWidth, kFrameHeight));
+ WaitForEncodedFrame(frame_timestamp);
+ frame_timestamp += kFrameIntervalMs;
+
+ // Default degradation preference is maintain-framerate, so will lower max
+ // wanted resolution.
+ EXPECT_FALSE(video_source_.sink_wants().target_pixel_count);
+ EXPECT_LT(video_source_.sink_wants().max_pixel_count,
+ kFrameWidth * kFrameHeight);
+ EXPECT_EQ(kDefaultFramerate, video_source_.sink_wants().max_framerate_fps);
+
+ // Set new source, switch to maintain-resolution.
+ test::FrameForwarder new_video_source;
+ video_stream_encoder_->SetSourceAndWaitForRestrictionsUpdated(
+ &new_video_source, webrtc::DegradationPreference::MAINTAIN_RESOLUTION);
+ // Give the encoder queue time to process the change in degradation preference
+ // by waiting for an encoded frame.
+ new_video_source.IncomingCapturedFrame(
+ CreateFrame(frame_timestamp, kFrameWidth, kFrameWidth));
+ sink_.WaitForEncodedFrame(frame_timestamp);
+ frame_timestamp += kFrameIntervalMs;
+ // Initially no degradation registered.
+ EXPECT_THAT(new_video_source.sink_wants(), FpsMaxResolutionMax());
+
+ // Force an input frame rate to be available, or the adaptation call won't
+ // know what framerate to adapt form.
+ const int kInputFps = 30;
+ VideoSendStream::Stats stats = stats_proxy_->GetStats();
+ stats.input_frame_rate = kInputFps;
+ stats_proxy_->SetMockStats(stats);
+
+ video_stream_encoder_->TriggerCpuOveruse();
+ new_video_source.IncomingCapturedFrame(
+ CreateFrame(frame_timestamp, kFrameWidth, kFrameHeight));
+ WaitForEncodedFrame(frame_timestamp);
+ frame_timestamp += kFrameIntervalMs;
+
+ // Some framerate constraint should be set.
+ EXPECT_FALSE(new_video_source.sink_wants().target_pixel_count);
+ EXPECT_EQ(std::numeric_limits<int>::max(),
+ new_video_source.sink_wants().max_pixel_count);
+ EXPECT_LT(new_video_source.sink_wants().max_framerate_fps, kInputFps);
+
+ // Turn off degradation completely.
+ video_stream_encoder_->SetSourceAndWaitForRestrictionsUpdated(
+ &new_video_source, webrtc::DegradationPreference::DISABLED);
+ // Give the encoder queue time to process the change in degradation preference
+ // by waiting for an encoded frame.
+ new_video_source.IncomingCapturedFrame(
+ CreateFrame(frame_timestamp, kFrameWidth, kFrameWidth));
+ sink_.WaitForEncodedFrame(frame_timestamp);
+ frame_timestamp += kFrameIntervalMs;
+ EXPECT_THAT(new_video_source.sink_wants(), FpsMaxResolutionMax());
+
+ video_stream_encoder_->TriggerCpuOveruse();
+ new_video_source.IncomingCapturedFrame(
+ CreateFrame(frame_timestamp, kFrameWidth, kFrameHeight));
+ WaitForEncodedFrame(frame_timestamp);
+ frame_timestamp += kFrameIntervalMs;
+
+ // Still no degradation.
+ EXPECT_THAT(new_video_source.sink_wants(), FpsMaxResolutionMax());
+
+ // Calling SetSource with resolution scaling enabled apply the old SinkWants.
+ video_stream_encoder_->SetSourceAndWaitForRestrictionsUpdated(
+ &new_video_source, webrtc::DegradationPreference::MAINTAIN_FRAMERATE);
+ // Give the encoder queue time to process the change in degradation preference
+ // by waiting for an encoded frame.
+ new_video_source.IncomingCapturedFrame(
+ CreateFrame(frame_timestamp, kFrameWidth, kFrameWidth));
+ sink_.WaitForEncodedFrame(frame_timestamp);
+ frame_timestamp += kFrameIntervalMs;
+ EXPECT_LT(new_video_source.sink_wants().max_pixel_count,
+ kFrameWidth * kFrameHeight);
+ EXPECT_FALSE(new_video_source.sink_wants().target_pixel_count);
+ EXPECT_EQ(kDefaultFramerate, new_video_source.sink_wants().max_framerate_fps);
+
+ // Calling SetSource with framerate scaling enabled apply the old SinkWants.
+ video_stream_encoder_->SetSourceAndWaitForRestrictionsUpdated(
+ &new_video_source, webrtc::DegradationPreference::MAINTAIN_RESOLUTION);
+ // Give the encoder queue time to process the change in degradation preference
+ // by waiting for an encoded frame.
+ new_video_source.IncomingCapturedFrame(
+ CreateFrame(frame_timestamp, kFrameWidth, kFrameWidth));
+ sink_.WaitForEncodedFrame(frame_timestamp);
+ frame_timestamp += kFrameIntervalMs;
+ EXPECT_FALSE(new_video_source.sink_wants().target_pixel_count);
+ EXPECT_EQ(std::numeric_limits<int>::max(),
+ new_video_source.sink_wants().max_pixel_count);
+ EXPECT_LT(new_video_source.sink_wants().max_framerate_fps, kInputFps);
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest, StatsTracksQualityAdaptationStats) {
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kTargetBitrate, kTargetBitrate, kTargetBitrate, 0, 0, 0);
+
+ const int kWidth = 1280;
+ const int kHeight = 720;
+ video_source_.IncomingCapturedFrame(CreateFrame(1, kWidth, kHeight));
+ WaitForEncodedFrame(1);
+ VideoSendStream::Stats stats = stats_proxy_->GetStats();
+ EXPECT_FALSE(stats.bw_limited_resolution);
+ EXPECT_EQ(0, stats.number_of_quality_adapt_changes);
+
+ // Trigger adapt down.
+ video_stream_encoder_->TriggerQualityLow();
+ video_source_.IncomingCapturedFrame(CreateFrame(2, kWidth, kHeight));
+ WaitForEncodedFrame(2);
+
+ stats = stats_proxy_->GetStats();
+ EXPECT_TRUE(stats.bw_limited_resolution);
+ EXPECT_EQ(1, stats.number_of_quality_adapt_changes);
+
+ // Trigger adapt up.
+ video_stream_encoder_->TriggerQualityHigh();
+ video_source_.IncomingCapturedFrame(CreateFrame(3, kWidth, kHeight));
+ WaitForEncodedFrame(3);
+
+ stats = stats_proxy_->GetStats();
+ EXPECT_FALSE(stats.bw_limited_resolution);
+ EXPECT_EQ(2, stats.number_of_quality_adapt_changes);
+ EXPECT_EQ(0, stats.number_of_cpu_adapt_changes);
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest, StatsTracksCpuAdaptationStats) {
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kTargetBitrate, kTargetBitrate, kTargetBitrate, 0, 0, 0);
+
+ const int kWidth = 1280;
+ const int kHeight = 720;
+ video_source_.IncomingCapturedFrame(CreateFrame(1, kWidth, kHeight));
+ WaitForEncodedFrame(1);
+ VideoSendStream::Stats stats = stats_proxy_->GetStats();
+ EXPECT_FALSE(stats.cpu_limited_resolution);
+ EXPECT_EQ(0, stats.number_of_cpu_adapt_changes);
+
+ // Trigger CPU overuse.
+ video_stream_encoder_->TriggerCpuOveruse();
+ video_source_.IncomingCapturedFrame(CreateFrame(2, kWidth, kHeight));
+ WaitForEncodedFrame(2);
+
+ stats = stats_proxy_->GetStats();
+ EXPECT_TRUE(stats.cpu_limited_resolution);
+ EXPECT_EQ(1, stats.number_of_cpu_adapt_changes);
+
+ // Trigger CPU normal use.
+ video_stream_encoder_->TriggerCpuUnderuse();
+ video_source_.IncomingCapturedFrame(CreateFrame(3, kWidth, kHeight));
+ WaitForEncodedFrame(3);
+
+ stats = stats_proxy_->GetStats();
+ EXPECT_FALSE(stats.cpu_limited_resolution);
+ EXPECT_EQ(2, stats.number_of_cpu_adapt_changes);
+ EXPECT_EQ(0, stats.number_of_quality_adapt_changes);
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest, SwitchingSourceKeepsCpuAdaptation) {
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kTargetBitrate, kTargetBitrate, kTargetBitrate, 0, 0, 0);
+
+ const int kWidth = 1280;
+ const int kHeight = 720;
+ video_source_.IncomingCapturedFrame(CreateFrame(1, kWidth, kHeight));
+ WaitForEncodedFrame(1);
+ VideoSendStream::Stats stats = stats_proxy_->GetStats();
+ EXPECT_FALSE(stats.bw_limited_resolution);
+ EXPECT_FALSE(stats.cpu_limited_resolution);
+ EXPECT_EQ(0, stats.number_of_cpu_adapt_changes);
+
+ // Trigger CPU overuse.
+ video_stream_encoder_->TriggerCpuOveruse();
+ video_source_.IncomingCapturedFrame(CreateFrame(2, kWidth, kHeight));
+ WaitForEncodedFrame(2);
+ stats = stats_proxy_->GetStats();
+ EXPECT_FALSE(stats.bw_limited_resolution);
+ EXPECT_TRUE(stats.cpu_limited_resolution);
+ EXPECT_EQ(1, stats.number_of_cpu_adapt_changes);
+
+ // Set new source with adaptation still enabled.
+ test::FrameForwarder new_video_source;
+ video_stream_encoder_->SetSource(
+ &new_video_source, webrtc::DegradationPreference::MAINTAIN_FRAMERATE);
+
+ new_video_source.IncomingCapturedFrame(CreateFrame(3, kWidth, kHeight));
+ WaitForEncodedFrame(3);
+ stats = stats_proxy_->GetStats();
+ EXPECT_FALSE(stats.bw_limited_resolution);
+ EXPECT_TRUE(stats.cpu_limited_resolution);
+ EXPECT_EQ(1, stats.number_of_cpu_adapt_changes);
+
+ // Set adaptation disabled.
+ video_stream_encoder_->SetSource(&new_video_source,
+ webrtc::DegradationPreference::DISABLED);
+
+ new_video_source.IncomingCapturedFrame(CreateFrame(4, kWidth, kHeight));
+ WaitForEncodedFrame(4);
+ stats = stats_proxy_->GetStats();
+ EXPECT_FALSE(stats.bw_limited_resolution);
+ EXPECT_FALSE(stats.cpu_limited_resolution);
+ EXPECT_EQ(1, stats.number_of_cpu_adapt_changes);
+
+ // Set adaptation back to enabled.
+ video_stream_encoder_->SetSource(
+ &new_video_source, webrtc::DegradationPreference::MAINTAIN_FRAMERATE);
+
+ new_video_source.IncomingCapturedFrame(CreateFrame(5, kWidth, kHeight));
+ WaitForEncodedFrame(5);
+ stats = stats_proxy_->GetStats();
+ EXPECT_FALSE(stats.bw_limited_resolution);
+ EXPECT_TRUE(stats.cpu_limited_resolution);
+ EXPECT_EQ(1, stats.number_of_cpu_adapt_changes);
+
+ // Trigger CPU normal use.
+ video_stream_encoder_->TriggerCpuUnderuse();
+ new_video_source.IncomingCapturedFrame(CreateFrame(6, kWidth, kHeight));
+ WaitForEncodedFrame(6);
+ stats = stats_proxy_->GetStats();
+ EXPECT_FALSE(stats.bw_limited_resolution);
+ EXPECT_FALSE(stats.cpu_limited_resolution);
+ EXPECT_EQ(2, stats.number_of_cpu_adapt_changes);
+ EXPECT_EQ(0, stats.number_of_quality_adapt_changes);
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest, SwitchingSourceKeepsQualityAdaptation) {
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kTargetBitrate, kTargetBitrate, kTargetBitrate, 0, 0, 0);
+
+ const int kWidth = 1280;
+ const int kHeight = 720;
+ video_source_.IncomingCapturedFrame(CreateFrame(1, kWidth, kHeight));
+ WaitForEncodedFrame(1);
+ VideoSendStream::Stats stats = stats_proxy_->GetStats();
+ EXPECT_FALSE(stats.bw_limited_resolution);
+ EXPECT_FALSE(stats.bw_limited_framerate);
+ EXPECT_EQ(0, stats.number_of_quality_adapt_changes);
+
+ // Set new source with adaptation still enabled.
+ test::FrameForwarder new_video_source;
+ video_stream_encoder_->SetSource(&new_video_source,
+ webrtc::DegradationPreference::BALANCED);
+
+ new_video_source.IncomingCapturedFrame(CreateFrame(2, kWidth, kHeight));
+ WaitForEncodedFrame(2);
+ stats = stats_proxy_->GetStats();
+ EXPECT_FALSE(stats.bw_limited_resolution);
+ EXPECT_FALSE(stats.bw_limited_framerate);
+ EXPECT_EQ(0, stats.number_of_quality_adapt_changes);
+
+ // Trigger adapt down.
+ video_stream_encoder_->TriggerQualityLow();
+ new_video_source.IncomingCapturedFrame(CreateFrame(3, kWidth, kHeight));
+ WaitForEncodedFrame(3);
+ stats = stats_proxy_->GetStats();
+ EXPECT_TRUE(stats.bw_limited_resolution);
+ EXPECT_FALSE(stats.bw_limited_framerate);
+ EXPECT_EQ(1, stats.number_of_quality_adapt_changes);
+
+ // Set new source with adaptation still enabled.
+ video_stream_encoder_->SetSource(&new_video_source,
+ webrtc::DegradationPreference::BALANCED);
+
+ new_video_source.IncomingCapturedFrame(CreateFrame(4, kWidth, kHeight));
+ WaitForEncodedFrame(4);
+ stats = stats_proxy_->GetStats();
+ EXPECT_TRUE(stats.bw_limited_resolution);
+ EXPECT_FALSE(stats.bw_limited_framerate);
+ EXPECT_EQ(1, stats.number_of_quality_adapt_changes);
+
+ // Disable resolution scaling.
+ video_stream_encoder_->SetSource(
+ &new_video_source, webrtc::DegradationPreference::MAINTAIN_RESOLUTION);
+
+ new_video_source.IncomingCapturedFrame(CreateFrame(5, kWidth, kHeight));
+ WaitForEncodedFrame(5);
+ stats = stats_proxy_->GetStats();
+ EXPECT_FALSE(stats.bw_limited_resolution);
+ EXPECT_FALSE(stats.bw_limited_framerate);
+ EXPECT_EQ(1, stats.number_of_quality_adapt_changes);
+ EXPECT_EQ(0, stats.number_of_cpu_adapt_changes);
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest,
+ QualityAdaptationStatsAreResetWhenScalerIsDisabled) {
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kTargetBitrate, kTargetBitrate, kTargetBitrate, 0, 0, 0);
+
+ const int kWidth = 1280;
+ const int kHeight = 720;
+ int64_t timestamp_ms = kFrameIntervalMs;
+ video_source_.set_adaptation_enabled(true);
+ video_source_.IncomingCapturedFrame(
+ CreateFrame(timestamp_ms, kWidth, kHeight));
+ WaitForEncodedFrame(timestamp_ms);
+ EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_resolution);
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_EQ(0, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
+
+ // Trigger adapt down.
+ video_stream_encoder_->TriggerQualityLow();
+ timestamp_ms += kFrameIntervalMs;
+ video_source_.IncomingCapturedFrame(
+ CreateFrame(timestamp_ms, kWidth, kHeight));
+ WaitForEncodedFrame(timestamp_ms);
+ EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_resolution);
+ EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_EQ(0, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
+
+ // Trigger overuse.
+ video_stream_encoder_->TriggerCpuOveruse();
+ timestamp_ms += kFrameIntervalMs;
+ video_source_.IncomingCapturedFrame(
+ CreateFrame(timestamp_ms, kWidth, kHeight));
+ WaitForEncodedFrame(timestamp_ms);
+ EXPECT_TRUE(stats_proxy_->GetStats().cpu_limited_resolution);
+ EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_EQ(1, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
+
+ // Leave source unchanged, but disable quality scaler.
+ fake_encoder_.SetQualityScaling(false);
+
+ VideoEncoderConfig video_encoder_config;
+ test::FillEncoderConfiguration(kVideoCodecVP8, 1, &video_encoder_config);
+ // Make format different, to force recreation of encoder.
+ video_encoder_config.video_format.parameters["foo"] = "foo";
+ video_stream_encoder_->ConfigureEncoder(std::move(video_encoder_config),
+ kMaxPayloadLength);
+ timestamp_ms += kFrameIntervalMs;
+ video_source_.IncomingCapturedFrame(
+ CreateFrame(timestamp_ms, kWidth, kHeight));
+ WaitForEncodedFrame(timestamp_ms);
+ EXPECT_TRUE(stats_proxy_->GetStats().cpu_limited_resolution);
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_EQ(1, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest,
+ StatsTracksCpuAdaptationStatsWhenSwitchingSource_Balanced) {
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kTargetBitrate, kTargetBitrate, kTargetBitrate, 0, 0, 0);
+
+ const int kWidth = 1280;
+ const int kHeight = 720;
+ int sequence = 1;
+
+ // Enable BALANCED preference, no initial limitation.
+ test::FrameForwarder source;
+ video_stream_encoder_->SetSource(&source,
+ webrtc::DegradationPreference::BALANCED);
+ source.IncomingCapturedFrame(CreateFrame(sequence, kWidth, kHeight));
+ WaitForEncodedFrame(sequence++);
+ VideoSendStream::Stats stats = stats_proxy_->GetStats();
+ EXPECT_FALSE(stats.cpu_limited_resolution);
+ EXPECT_FALSE(stats.cpu_limited_framerate);
+ EXPECT_EQ(0, stats.number_of_cpu_adapt_changes);
+
+ // Trigger CPU overuse, should now adapt down.
+ video_stream_encoder_->TriggerCpuOveruse();
+ source.IncomingCapturedFrame(CreateFrame(sequence, kWidth, kHeight));
+ WaitForEncodedFrame(sequence++);
+ stats = stats_proxy_->GetStats();
+ EXPECT_EQ(1, stats.number_of_cpu_adapt_changes);
+
+ // Set new degradation preference should clear restrictions since we changed
+ // from BALANCED.
+ video_stream_encoder_->SetSourceAndWaitForRestrictionsUpdated(
+ &source, webrtc::DegradationPreference::MAINTAIN_FRAMERATE);
+ source.IncomingCapturedFrame(CreateFrame(sequence, kWidth, kHeight));
+ WaitForEncodedFrame(sequence++);
+ stats = stats_proxy_->GetStats();
+ EXPECT_FALSE(stats.cpu_limited_resolution);
+ EXPECT_FALSE(stats.cpu_limited_framerate);
+ EXPECT_EQ(1, stats.number_of_cpu_adapt_changes);
+
+ // Force an input frame rate to be available, or the adaptation call won't
+ // know what framerate to adapt from.
+ VideoSendStream::Stats mock_stats = stats_proxy_->GetStats();
+ mock_stats.input_frame_rate = 30;
+ stats_proxy_->SetMockStats(mock_stats);
+ video_stream_encoder_->TriggerCpuOveruse();
+ stats_proxy_->ResetMockStats();
+ source.IncomingCapturedFrame(CreateFrame(sequence, kWidth, kHeight));
+ WaitForEncodedFrame(sequence++);
+
+ // We have now adapted once.
+ stats = stats_proxy_->GetStats();
+ EXPECT_EQ(2, stats.number_of_cpu_adapt_changes);
+
+ // Back to BALANCED, should clear the restrictions again.
+ video_stream_encoder_->SetSourceAndWaitForRestrictionsUpdated(
+ &source, webrtc::DegradationPreference::BALANCED);
+ source.IncomingCapturedFrame(CreateFrame(sequence, kWidth, kHeight));
+ WaitForEncodedFrame(sequence++);
+ stats = stats_proxy_->GetStats();
+ EXPECT_FALSE(stats.cpu_limited_resolution);
+ EXPECT_FALSE(stats.cpu_limited_framerate);
+ EXPECT_EQ(2, stats.number_of_cpu_adapt_changes);
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest,
+ StatsTracksCpuAdaptationStatsWhenSwitchingSource) {
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kTargetBitrate, kTargetBitrate, kTargetBitrate, 0, 0, 0);
+
+ const int kWidth = 1280;
+ const int kHeight = 720;
+ int sequence = 1;
+
+ video_source_.IncomingCapturedFrame(CreateFrame(sequence, kWidth, kHeight));
+ WaitForEncodedFrame(sequence++);
+ VideoSendStream::Stats stats = stats_proxy_->GetStats();
+ EXPECT_FALSE(stats.cpu_limited_resolution);
+ EXPECT_FALSE(stats.cpu_limited_framerate);
+ EXPECT_EQ(0, stats.number_of_cpu_adapt_changes);
+
+ // Trigger CPU overuse, should now adapt down.
+ video_stream_encoder_->TriggerCpuOveruse();
+ video_source_.IncomingCapturedFrame(CreateFrame(sequence, kWidth, kHeight));
+ WaitForEncodedFrame(sequence++);
+ stats = stats_proxy_->GetStats();
+ EXPECT_TRUE(stats.cpu_limited_resolution);
+ EXPECT_FALSE(stats.cpu_limited_framerate);
+ EXPECT_EQ(1, stats.number_of_cpu_adapt_changes);
+
+ // Set new source with adaptation still enabled.
+ test::FrameForwarder new_video_source;
+ video_stream_encoder_->SetSource(
+ &new_video_source, webrtc::DegradationPreference::MAINTAIN_FRAMERATE);
+
+ new_video_source.IncomingCapturedFrame(
+ CreateFrame(sequence, kWidth, kHeight));
+ WaitForEncodedFrame(sequence++);
+ stats = stats_proxy_->GetStats();
+ EXPECT_TRUE(stats.cpu_limited_resolution);
+ EXPECT_FALSE(stats.cpu_limited_framerate);
+ EXPECT_EQ(1, stats.number_of_cpu_adapt_changes);
+
+ // Set cpu adaptation by frame dropping.
+ video_stream_encoder_->SetSource(
+ &new_video_source, webrtc::DegradationPreference::MAINTAIN_RESOLUTION);
+ new_video_source.IncomingCapturedFrame(
+ CreateFrame(sequence, kWidth, kHeight));
+ WaitForEncodedFrame(sequence++);
+ stats = stats_proxy_->GetStats();
+ // Not adapted at first.
+ EXPECT_FALSE(stats.cpu_limited_resolution);
+ EXPECT_FALSE(stats.cpu_limited_framerate);
+ EXPECT_EQ(1, stats.number_of_cpu_adapt_changes);
+
+ // Force an input frame rate to be available, or the adaptation call won't
+ // know what framerate to adapt from.
+ VideoSendStream::Stats mock_stats = stats_proxy_->GetStats();
+ mock_stats.input_frame_rate = 30;
+ stats_proxy_->SetMockStats(mock_stats);
+ video_stream_encoder_->TriggerCpuOveruse();
+ stats_proxy_->ResetMockStats();
+
+ new_video_source.IncomingCapturedFrame(
+ CreateFrame(sequence, kWidth, kHeight));
+ WaitForEncodedFrame(sequence++);
+
+ // Framerate now adapted.
+ stats = stats_proxy_->GetStats();
+ EXPECT_FALSE(stats.cpu_limited_resolution);
+ EXPECT_TRUE(stats.cpu_limited_framerate);
+ EXPECT_EQ(2, stats.number_of_cpu_adapt_changes);
+
+ // Disable CPU adaptation.
+ video_stream_encoder_->SetSource(&new_video_source,
+ webrtc::DegradationPreference::DISABLED);
+ new_video_source.IncomingCapturedFrame(
+ CreateFrame(sequence, kWidth, kHeight));
+ WaitForEncodedFrame(sequence++);
+
+ stats = stats_proxy_->GetStats();
+ EXPECT_FALSE(stats.cpu_limited_resolution);
+ EXPECT_FALSE(stats.cpu_limited_framerate);
+ EXPECT_EQ(2, stats.number_of_cpu_adapt_changes);
+
+ // Try to trigger overuse. Should not succeed.
+ stats_proxy_->SetMockStats(mock_stats);
+ video_stream_encoder_->TriggerCpuOveruse();
+ stats_proxy_->ResetMockStats();
+
+ stats = stats_proxy_->GetStats();
+ EXPECT_FALSE(stats.cpu_limited_resolution);
+ EXPECT_FALSE(stats.cpu_limited_framerate);
+ EXPECT_EQ(2, stats.number_of_cpu_adapt_changes);
+
+ // Switch back the source with resolution adaptation enabled.
+ video_stream_encoder_->SetSource(
+ &video_source_, webrtc::DegradationPreference::MAINTAIN_FRAMERATE);
+ video_source_.IncomingCapturedFrame(CreateFrame(sequence, kWidth, kHeight));
+ WaitForEncodedFrame(sequence++);
+ stats = stats_proxy_->GetStats();
+ EXPECT_TRUE(stats.cpu_limited_resolution);
+ EXPECT_FALSE(stats.cpu_limited_framerate);
+ EXPECT_EQ(2, stats.number_of_cpu_adapt_changes);
+
+ // Trigger CPU normal usage.
+ video_stream_encoder_->TriggerCpuUnderuse();
+ video_source_.IncomingCapturedFrame(CreateFrame(sequence, kWidth, kHeight));
+ WaitForEncodedFrame(sequence++);
+ stats = stats_proxy_->GetStats();
+ EXPECT_FALSE(stats.cpu_limited_resolution);
+ EXPECT_FALSE(stats.cpu_limited_framerate);
+ EXPECT_EQ(3, stats.number_of_cpu_adapt_changes);
+
+ // Back to the source with adaptation off, set it back to maintain-resolution.
+ video_stream_encoder_->SetSource(
+ &new_video_source, webrtc::DegradationPreference::MAINTAIN_RESOLUTION);
+ new_video_source.IncomingCapturedFrame(
+ CreateFrame(sequence, kWidth, kHeight));
+ WaitForEncodedFrame(sequence++);
+ stats = stats_proxy_->GetStats();
+ // Disabled, since we previously switched the source to disabled.
+ EXPECT_FALSE(stats.cpu_limited_resolution);
+ EXPECT_TRUE(stats.cpu_limited_framerate);
+ EXPECT_EQ(3, stats.number_of_cpu_adapt_changes);
+
+ // Trigger CPU normal usage.
+ video_stream_encoder_->TriggerCpuUnderuse();
+ new_video_source.IncomingCapturedFrame(
+ CreateFrame(sequence, kWidth, kHeight));
+ WaitForEncodedFrame(sequence++);
+ stats = stats_proxy_->GetStats();
+ EXPECT_FALSE(stats.cpu_limited_resolution);
+ EXPECT_FALSE(stats.cpu_limited_framerate);
+ EXPECT_EQ(4, stats.number_of_cpu_adapt_changes);
+ EXPECT_EQ(0, stats.number_of_quality_adapt_changes);
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest,
+ ScalingUpAndDownDoesNothingWithMaintainResolution) {
+ const int kWidth = 1280;
+ const int kHeight = 720;
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kTargetBitrate, kTargetBitrate, kTargetBitrate, 0, 0, 0);
+
+ // Expect no scaling to begin with.
+ EXPECT_THAT(video_source_.sink_wants(), UnlimitedSinkWants());
+
+ video_source_.IncomingCapturedFrame(CreateFrame(1, kWidth, kHeight));
+ WaitForEncodedFrame(1);
+
+ // Trigger scale down.
+ video_stream_encoder_->TriggerQualityLow();
+
+ video_source_.IncomingCapturedFrame(CreateFrame(2, kWidth, kHeight));
+ WaitForEncodedFrame(2);
+
+ // Expect a scale down.
+ EXPECT_TRUE(video_source_.sink_wants().max_pixel_count);
+ EXPECT_LT(video_source_.sink_wants().max_pixel_count, kWidth * kHeight);
+
+ // Set resolution scaling disabled.
+ test::FrameForwarder new_video_source;
+ video_stream_encoder_->SetSource(
+ &new_video_source, webrtc::DegradationPreference::MAINTAIN_RESOLUTION);
+
+ // Trigger scale down.
+ video_stream_encoder_->TriggerQualityLow();
+ new_video_source.IncomingCapturedFrame(CreateFrame(3, kWidth, kHeight));
+ WaitForEncodedFrame(3);
+
+ // Expect no scaling.
+ EXPECT_EQ(std::numeric_limits<int>::max(),
+ new_video_source.sink_wants().max_pixel_count);
+
+ // Trigger scale up.
+ video_stream_encoder_->TriggerQualityHigh();
+ new_video_source.IncomingCapturedFrame(CreateFrame(4, kWidth, kHeight));
+ WaitForEncodedFrame(4);
+
+ // Expect nothing to change, still no scaling.
+ EXPECT_EQ(std::numeric_limits<int>::max(),
+ new_video_source.sink_wants().max_pixel_count);
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest,
+ SkipsSameAdaptDownRequest_MaintainFramerateMode) {
+ const int kWidth = 1280;
+ const int kHeight = 720;
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kTargetBitrate, kTargetBitrate, kTargetBitrate, 0, 0, 0);
+
+ // Enable MAINTAIN_FRAMERATE preference, no initial limitation.
+ test::FrameForwarder source;
+ video_stream_encoder_->SetSource(
+ &source, webrtc::DegradationPreference::MAINTAIN_FRAMERATE);
+
+ source.IncomingCapturedFrame(CreateFrame(1, kWidth, kHeight));
+ WaitForEncodedFrame(1);
+ EXPECT_THAT(source.sink_wants(), FpsMaxResolutionMax());
+ EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_resolution);
+ EXPECT_EQ(0, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
+
+ // Trigger adapt down, expect scaled down resolution.
+ video_stream_encoder_->TriggerCpuOveruse();
+ EXPECT_THAT(source.sink_wants(),
+ FpsMaxResolutionMatches(Lt(kWidth * kHeight)));
+ const int kLastMaxPixelCount = source.sink_wants().max_pixel_count;
+ EXPECT_TRUE(stats_proxy_->GetStats().cpu_limited_resolution);
+ EXPECT_EQ(1, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
+
+ // Trigger adapt down for same input resolution, expect no change.
+ video_stream_encoder_->TriggerCpuOveruse();
+ EXPECT_EQ(kLastMaxPixelCount, source.sink_wants().max_pixel_count);
+ EXPECT_TRUE(stats_proxy_->GetStats().cpu_limited_resolution);
+ EXPECT_EQ(1, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest, SkipsSameOrLargerAdaptDownRequest_BalancedMode) {
+ const int kWidth = 1280;
+ const int kHeight = 720;
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kTargetBitrate, kTargetBitrate, kTargetBitrate, 0, 0, 0);
+
+ // Enable BALANCED preference, no initial limitation.
+ test::FrameForwarder source;
+ video_stream_encoder_->SetSource(&source,
+ webrtc::DegradationPreference::BALANCED);
+ source.IncomingCapturedFrame(CreateFrame(1, kWidth, kHeight));
+ sink_.WaitForEncodedFrame(1);
+ EXPECT_THAT(source.sink_wants(), FpsMaxResolutionMax());
+
+ // Trigger adapt down, expect scaled down resolution.
+ video_stream_encoder_->TriggerQualityLow();
+ EXPECT_THAT(source.sink_wants(),
+ FpsMaxResolutionMatches(Lt(kWidth * kHeight)));
+ EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_EQ(1, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+ const int kLastMaxPixelCount = source.sink_wants().max_pixel_count;
+
+ // Trigger adapt down for same input resolution, expect no change.
+ source.IncomingCapturedFrame(CreateFrame(2, kWidth, kHeight));
+ sink_.WaitForEncodedFrame(2);
+ video_stream_encoder_->TriggerQualityLow();
+ EXPECT_EQ(kLastMaxPixelCount, source.sink_wants().max_pixel_count);
+ EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_EQ(1, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Trigger adapt down for larger input resolution, expect no change.
+ source.IncomingCapturedFrame(CreateFrame(3, kWidth + 1, kHeight + 1));
+ sink_.WaitForEncodedFrame(3);
+ video_stream_encoder_->TriggerQualityLow();
+ EXPECT_EQ(kLastMaxPixelCount, source.sink_wants().max_pixel_count);
+ EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_EQ(1, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest,
+ FpsCountReturnsToZeroForFewerAdaptationsUpThanDown) {
+ const int kWidth = 640;
+ const int kHeight = 360;
+ const int64_t kFrameIntervalMs = 150;
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kTargetBitrate, kTargetBitrate, kTargetBitrate, 0, 0, 0);
+
+ // Enable BALANCED preference, no initial limitation.
+ AdaptingFrameForwarder source(&time_controller_);
+ source.set_adaptation_enabled(true);
+ video_stream_encoder_->SetSource(&source,
+ webrtc::DegradationPreference::BALANCED);
+
+ int64_t timestamp_ms = kFrameIntervalMs;
+ source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
+ sink_.WaitForEncodedFrame(kWidth, kHeight);
+ EXPECT_THAT(source.sink_wants(), FpsMaxResolutionMax());
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_framerate);
+ EXPECT_EQ(0, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Trigger adapt down, expect reduced fps (640x360@15fps).
+ video_stream_encoder_->TriggerQualityLow();
+ timestamp_ms += kFrameIntervalMs;
+ source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
+ sink_.WaitForEncodedFrame(timestamp_ms);
+ EXPECT_THAT(source.sink_wants(),
+ FpsMatchesResolutionMax(Lt(kDefaultFramerate)));
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_framerate);
+ EXPECT_EQ(1, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Source requests 270p, expect reduced resolution (480x270@15fps).
+ source.OnOutputFormatRequest(480, 270);
+ timestamp_ms += kFrameIntervalMs;
+ source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
+ WaitForEncodedFrame(480, 270);
+ EXPECT_EQ(1, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Trigger adapt down, expect reduced fps (480x270@10fps).
+ video_stream_encoder_->TriggerQualityLow();
+ timestamp_ms += kFrameIntervalMs;
+ source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
+ sink_.WaitForEncodedFrame(timestamp_ms);
+ EXPECT_THAT(source.sink_wants(), FpsLtResolutionEq(source.last_wants()));
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_framerate);
+ EXPECT_EQ(2, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Source requests QVGA, expect reduced resolution (320x180@10fps).
+ source.OnOutputFormatRequest(320, 180);
+ timestamp_ms += kFrameIntervalMs;
+ source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
+ WaitForEncodedFrame(320, 180);
+ EXPECT_EQ(2, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Trigger adapt down, expect reduced fps (320x180@7fps).
+ video_stream_encoder_->TriggerQualityLow();
+ timestamp_ms += kFrameIntervalMs;
+ source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
+ sink_.WaitForEncodedFrame(timestamp_ms);
+ EXPECT_THAT(source.sink_wants(), FpsLtResolutionEq(source.last_wants()));
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_framerate);
+ EXPECT_EQ(3, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Source requests VGA, expect increased resolution (640x360@7fps).
+ source.OnOutputFormatRequest(640, 360);
+ timestamp_ms += kFrameIntervalMs;
+ source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
+ WaitForEncodedFrame(timestamp_ms);
+ EXPECT_EQ(3, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Trigger adapt up, expect increased fps (640x360@(max-2)fps).
+ video_stream_encoder_->TriggerQualityHigh();
+ timestamp_ms += kFrameIntervalMs;
+ source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
+ WaitForEncodedFrame(timestamp_ms);
+ EXPECT_THAT(source.sink_wants(), FpsGtResolutionEq(source.last_wants()));
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_framerate);
+ EXPECT_EQ(4, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Trigger adapt up, expect increased fps (640x360@(max-1)fps).
+ video_stream_encoder_->TriggerQualityHigh();
+ timestamp_ms += kFrameIntervalMs;
+ source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
+ WaitForEncodedFrame(timestamp_ms);
+ EXPECT_THAT(source.sink_wants(), FpsGtResolutionEq(source.last_wants()));
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_framerate);
+ EXPECT_EQ(5, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Trigger adapt up, expect increased fps (640x360@maxfps).
+ video_stream_encoder_->TriggerQualityHigh();
+ timestamp_ms += kFrameIntervalMs;
+ source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
+ WaitForEncodedFrame(timestamp_ms);
+ EXPECT_THAT(source.sink_wants(), FpsGtResolutionEq(source.last_wants()));
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_framerate);
+ EXPECT_EQ(6, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest,
+ FpsCountReturnsToZeroForFewerAdaptationsUpThanDownWithTwoResources) {
+ const int kWidth = 1280;
+ const int kHeight = 720;
+ const int64_t kFrameIntervalMs = 150;
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kTargetBitrate, kTargetBitrate, kTargetBitrate, 0, 0, 0);
+
+ // Enable BALANCED preference, no initial limitation.
+ AdaptingFrameForwarder source(&time_controller_);
+ source.set_adaptation_enabled(true);
+ video_stream_encoder_->SetSource(&source,
+ webrtc::DegradationPreference::BALANCED);
+
+ int64_t timestamp_ms = kFrameIntervalMs;
+ source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
+ sink_.WaitForEncodedFrame(kWidth, kHeight);
+ EXPECT_THAT(source.sink_wants(), FpsMaxResolutionMax());
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_framerate);
+ EXPECT_EQ(0, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Trigger adapt down, expect scaled down resolution (960x540@maxfps).
+ video_stream_encoder_->TriggerQualityLow();
+ timestamp_ms += kFrameIntervalMs;
+ source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
+ sink_.WaitForEncodedFrame(timestamp_ms);
+ EXPECT_THAT(source.sink_wants(),
+ FpsMaxResolutionMatches(Lt(kWidth * kHeight)));
+ EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_framerate);
+ EXPECT_EQ(1, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Trigger adapt down, expect scaled down resolution (640x360@maxfps).
+ video_stream_encoder_->TriggerQualityLow();
+ timestamp_ms += kFrameIntervalMs;
+ source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
+ sink_.WaitForEncodedFrame(timestamp_ms);
+ EXPECT_THAT(source.sink_wants(), FpsMaxResolutionLt(source.last_wants()));
+ EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_framerate);
+ EXPECT_EQ(2, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Trigger adapt down, expect reduced fps (640x360@15fps).
+ video_stream_encoder_->TriggerQualityLow();
+ timestamp_ms += kFrameIntervalMs;
+ source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
+ WaitForEncodedFrame(timestamp_ms);
+ EXPECT_THAT(source.sink_wants(), FpsLtResolutionEq(source.last_wants()));
+ EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_framerate);
+ EXPECT_EQ(3, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Source requests QVGA, expect reduced resolution (320x180@15fps).
+ source.OnOutputFormatRequest(320, 180);
+ timestamp_ms += kFrameIntervalMs;
+ source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
+ WaitForEncodedFrame(320, 180);
+ EXPECT_EQ(3, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+ EXPECT_EQ(0, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
+
+ // Trigger adapt down, expect reduced fps (320x180@7fps).
+ video_stream_encoder_->TriggerCpuOveruse();
+ timestamp_ms += kFrameIntervalMs;
+ source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
+ WaitForEncodedFrame(timestamp_ms);
+ EXPECT_THAT(source.sink_wants(), FpsLtResolutionEq(source.last_wants()));
+ EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_framerate);
+ EXPECT_TRUE(stats_proxy_->GetStats().cpu_limited_resolution);
+ EXPECT_TRUE(stats_proxy_->GetStats().cpu_limited_framerate);
+ EXPECT_EQ(3, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+ EXPECT_EQ(1, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
+
+ // Source requests HD, expect increased resolution (640x360@7fps).
+ source.OnOutputFormatRequest(1280, 720);
+ timestamp_ms += kFrameIntervalMs;
+ source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
+ WaitForEncodedFrame(timestamp_ms);
+ EXPECT_EQ(3, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+ EXPECT_EQ(1, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
+
+ // Trigger adapt up, expect increased fps (640x360@(max-1)fps).
+ video_stream_encoder_->TriggerCpuUnderuse();
+ timestamp_ms += kFrameIntervalMs;
+ source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
+ WaitForEncodedFrame(timestamp_ms);
+ EXPECT_THAT(source.sink_wants(), FpsGtResolutionEq(source.last_wants()));
+ EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_framerate);
+ EXPECT_TRUE(stats_proxy_->GetStats().cpu_limited_resolution);
+ EXPECT_TRUE(stats_proxy_->GetStats().cpu_limited_framerate);
+ EXPECT_EQ(3, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+ EXPECT_EQ(2, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
+
+ // Trigger adapt up, expect increased fps (640x360@maxfps).
+ video_stream_encoder_->TriggerQualityHigh();
+ video_stream_encoder_->TriggerCpuUnderuse();
+ timestamp_ms += kFrameIntervalMs;
+ source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
+ WaitForEncodedFrame(timestamp_ms);
+ EXPECT_THAT(source.sink_wants(), FpsGtResolutionEq(source.last_wants()));
+ EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_framerate);
+ EXPECT_TRUE(stats_proxy_->GetStats().cpu_limited_resolution);
+ EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_framerate);
+ EXPECT_EQ(4, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+ EXPECT_EQ(3, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
+
+ // Trigger adapt up, expect increased resolution (960x570@maxfps).
+ video_stream_encoder_->TriggerQualityHigh();
+ video_stream_encoder_->TriggerCpuUnderuse();
+ timestamp_ms += kFrameIntervalMs;
+ source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
+ WaitForEncodedFrame(timestamp_ms);
+ EXPECT_THAT(source.sink_wants(), FpsEqResolutionGt(source.last_wants()));
+ EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_framerate);
+ EXPECT_TRUE(stats_proxy_->GetStats().cpu_limited_resolution);
+ EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_framerate);
+ EXPECT_EQ(5, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+ EXPECT_EQ(4, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
+
+ // Trigger adapt up, expect increased resolution (1280x720@maxfps).
+ video_stream_encoder_->TriggerQualityHigh();
+ video_stream_encoder_->TriggerCpuUnderuse();
+ timestamp_ms += kFrameIntervalMs;
+ source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
+ WaitForEncodedFrame(timestamp_ms);
+ EXPECT_THAT(source.sink_wants(), FpsEqResolutionGt(source.last_wants()));
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_framerate);
+ EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_resolution);
+ EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_framerate);
+ EXPECT_EQ(6, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+ EXPECT_EQ(5, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest,
+ NoChangeForInitialNormalUsage_MaintainFramerateMode) {
+ const int kWidth = 1280;
+ const int kHeight = 720;
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kTargetBitrate, kTargetBitrate, kTargetBitrate, 0, 0, 0);
+
+ // Enable MAINTAIN_FRAMERATE preference, no initial limitation.
+ test::FrameForwarder source;
+ video_stream_encoder_->SetSource(
+ &source, webrtc::DegradationPreference::MAINTAIN_FRAMERATE);
+
+ source.IncomingCapturedFrame(CreateFrame(1, kWidth, kHeight));
+ WaitForEncodedFrame(kWidth, kHeight);
+ EXPECT_THAT(source.sink_wants(), FpsMaxResolutionMax());
+ EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_resolution);
+ EXPECT_EQ(0, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
+
+ // Trigger adapt up, expect no change.
+ video_stream_encoder_->TriggerCpuUnderuse();
+ EXPECT_THAT(source.sink_wants(), FpsMaxResolutionMax());
+ EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_resolution);
+ EXPECT_EQ(0, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest,
+ NoChangeForInitialNormalUsage_MaintainResolutionMode) {
+ const int kWidth = 1280;
+ const int kHeight = 720;
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kTargetBitrate, kTargetBitrate, kTargetBitrate, 0, 0, 0);
+
+ // Enable MAINTAIN_RESOLUTION preference, no initial limitation.
+ test::FrameForwarder source;
+ video_stream_encoder_->SetSource(
+ &source, webrtc::DegradationPreference::MAINTAIN_RESOLUTION);
+
+ source.IncomingCapturedFrame(CreateFrame(1, kWidth, kHeight));
+ WaitForEncodedFrame(kWidth, kHeight);
+ EXPECT_THAT(source.sink_wants(), FpsMaxResolutionMax());
+ EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_framerate);
+ EXPECT_EQ(0, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
+
+ // Trigger adapt up, expect no change.
+ video_stream_encoder_->TriggerCpuUnderuse();
+ EXPECT_THAT(source.sink_wants(), FpsMaxResolutionMax());
+ EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_framerate);
+ EXPECT_EQ(0, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest, NoChangeForInitialNormalUsage_BalancedMode) {
+ const int kWidth = 1280;
+ const int kHeight = 720;
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kTargetBitrate, kTargetBitrate, kTargetBitrate, 0, 0, 0);
+
+ // Enable BALANCED preference, no initial limitation.
+ test::FrameForwarder source;
+ video_stream_encoder_->SetSource(&source,
+ webrtc::DegradationPreference::BALANCED);
+
+ source.IncomingCapturedFrame(CreateFrame(1, kWidth, kHeight));
+ sink_.WaitForEncodedFrame(kWidth, kHeight);
+ EXPECT_THAT(source.sink_wants(), FpsMaxResolutionMax());
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_framerate);
+ EXPECT_EQ(0, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Trigger adapt up, expect no change.
+ video_stream_encoder_->TriggerQualityHigh();
+ EXPECT_THAT(source.sink_wants(), FpsMaxResolutionMax());
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_framerate);
+ EXPECT_EQ(0, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest, NoChangeForInitialNormalUsage_DisabledMode) {
+ const int kWidth = 1280;
+ const int kHeight = 720;
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kTargetBitrate, kTargetBitrate, kTargetBitrate, 0, 0, 0);
+
+ // Enable DISABLED preference, no initial limitation.
+ test::FrameForwarder source;
+ video_stream_encoder_->SetSource(&source,
+ webrtc::DegradationPreference::DISABLED);
+
+ source.IncomingCapturedFrame(CreateFrame(1, kWidth, kHeight));
+ sink_.WaitForEncodedFrame(kWidth, kHeight);
+ EXPECT_THAT(source.sink_wants(), FpsMaxResolutionMax());
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_framerate);
+ EXPECT_EQ(0, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Trigger adapt up, expect no change.
+ video_stream_encoder_->TriggerQualityHigh();
+ EXPECT_THAT(source.sink_wants(), FpsMaxResolutionMax());
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_framerate);
+ EXPECT_EQ(0, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest,
+ AdaptsResolutionForLowQuality_MaintainFramerateMode) {
+ const int kWidth = 1280;
+ const int kHeight = 720;
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kTargetBitrate, kTargetBitrate, kTargetBitrate, 0, 0, 0);
+
+ // Enable MAINTAIN_FRAMERATE preference, no initial limitation.
+ AdaptingFrameForwarder source(&time_controller_);
+ source.set_adaptation_enabled(true);
+ video_stream_encoder_->SetSource(
+ &source, webrtc::DegradationPreference::MAINTAIN_FRAMERATE);
+
+ source.IncomingCapturedFrame(CreateFrame(1, kWidth, kHeight));
+ WaitForEncodedFrame(1);
+ EXPECT_THAT(source.sink_wants(), FpsMaxResolutionMax());
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_EQ(0, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Trigger adapt down, expect scaled down resolution.
+ video_stream_encoder_->TriggerQualityLow();
+ source.IncomingCapturedFrame(CreateFrame(2, kWidth, kHeight));
+ WaitForEncodedFrame(2);
+ EXPECT_THAT(source.sink_wants(),
+ FpsMaxResolutionMatches(Lt(kWidth * kHeight)));
+ EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_EQ(1, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Trigger adapt up, expect no restriction.
+ video_stream_encoder_->TriggerQualityHigh();
+ EXPECT_THAT(source.sink_wants(), FpsMaxResolutionMax());
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_EQ(2, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+ EXPECT_EQ(0, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest,
+ AdaptsFramerateForLowQuality_MaintainResolutionMode) {
+ const int kWidth = 1280;
+ const int kHeight = 720;
+ const int kInputFps = 30;
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kTargetBitrate, kTargetBitrate, kTargetBitrate, 0, 0, 0);
+
+ VideoSendStream::Stats stats = stats_proxy_->GetStats();
+ stats.input_frame_rate = kInputFps;
+ stats_proxy_->SetMockStats(stats);
+
+ // Expect no scaling to begin with (preference: MAINTAIN_FRAMERATE).
+ video_source_.IncomingCapturedFrame(CreateFrame(1, kWidth, kHeight));
+ sink_.WaitForEncodedFrame(1);
+ EXPECT_THAT(video_source_.sink_wants(), FpsMaxResolutionMax());
+
+ // Trigger adapt down, expect scaled down resolution.
+ video_stream_encoder_->TriggerQualityLow();
+ video_source_.IncomingCapturedFrame(CreateFrame(2, kWidth, kHeight));
+ sink_.WaitForEncodedFrame(2);
+ EXPECT_THAT(video_source_.sink_wants(),
+ FpsMaxResolutionMatches(Lt(kWidth * kHeight)));
+
+ // Enable MAINTAIN_RESOLUTION preference.
+ test::FrameForwarder new_video_source;
+ video_stream_encoder_->SetSourceAndWaitForRestrictionsUpdated(
+ &new_video_source, webrtc::DegradationPreference::MAINTAIN_RESOLUTION);
+ // Give the encoder queue time to process the change in degradation preference
+ // by waiting for an encoded frame.
+ new_video_source.IncomingCapturedFrame(CreateFrame(3, kWidth, kHeight));
+ sink_.WaitForEncodedFrame(3);
+ EXPECT_THAT(new_video_source.sink_wants(), FpsMaxResolutionMax());
+
+ // Trigger adapt down, expect reduced framerate.
+ video_stream_encoder_->TriggerQualityLow();
+ new_video_source.IncomingCapturedFrame(CreateFrame(4, kWidth, kHeight));
+ sink_.WaitForEncodedFrame(4);
+ EXPECT_THAT(new_video_source.sink_wants(),
+ FpsMatchesResolutionMax(Lt(kInputFps)));
+
+ // Trigger adapt up, expect no restriction.
+ video_stream_encoder_->TriggerQualityHigh();
+ EXPECT_THAT(new_video_source.sink_wants(), FpsMaxResolutionMax());
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest, DoesNotScaleBelowSetResolutionLimit) {
+ const int kWidth = 1280;
+ const int kHeight = 720;
+ const size_t kNumFrames = 10;
+
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kTargetBitrate, kTargetBitrate, kTargetBitrate, 0, 0, 0);
+
+ // Enable adapter, expected input resolutions when downscaling:
+ // 1280x720 -> 960x540 -> 640x360 -> 480x270 -> 320x180 (kMinPixelsPerFrame)
+ video_source_.set_adaptation_enabled(true);
+
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_EQ(0, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ int downscales = 0;
+ for (size_t i = 1; i <= kNumFrames; i++) {
+ video_source_.IncomingCapturedFrame(
+ CreateFrame(i * kFrameIntervalMs, kWidth, kHeight));
+ WaitForEncodedFrame(i * kFrameIntervalMs);
+
+ // Trigger scale down.
+ rtc::VideoSinkWants last_wants = video_source_.sink_wants();
+ video_stream_encoder_->TriggerQualityLow();
+ EXPECT_GE(video_source_.sink_wants().max_pixel_count, kMinPixelsPerFrame);
+
+ if (video_source_.sink_wants().max_pixel_count < last_wants.max_pixel_count)
+ ++downscales;
+
+ EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_EQ(downscales,
+ stats_proxy_->GetStats().number_of_quality_adapt_changes);
+ EXPECT_GT(downscales, 0);
+ }
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest,
+ AdaptsResolutionUpAndDownTwiceOnOveruse_MaintainFramerateMode) {
+ const int kWidth = 1280;
+ const int kHeight = 720;
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kTargetBitrate, kTargetBitrate, kTargetBitrate, 0, 0, 0);
+
+ // Enable MAINTAIN_FRAMERATE preference, no initial limitation.
+ AdaptingFrameForwarder source(&time_controller_);
+ source.set_adaptation_enabled(true);
+ video_stream_encoder_->SetSource(
+ &source, webrtc::DegradationPreference::MAINTAIN_FRAMERATE);
+
+ int64_t timestamp_ms = kFrameIntervalMs;
+ source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
+ WaitForEncodedFrame(kWidth, kHeight);
+ EXPECT_THAT(source.sink_wants(), FpsMaxResolutionMax());
+ EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_resolution);
+ EXPECT_EQ(0, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
+
+ // Trigger adapt down, expect scaled down resolution.
+ video_stream_encoder_->TriggerCpuOveruse();
+ timestamp_ms += kFrameIntervalMs;
+ source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
+ WaitForEncodedFrame(timestamp_ms);
+ EXPECT_THAT(source.sink_wants(),
+ FpsMaxResolutionMatches(Lt(kWidth * kHeight)));
+ EXPECT_TRUE(stats_proxy_->GetStats().cpu_limited_resolution);
+ EXPECT_EQ(1, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
+
+ // Trigger adapt up, expect no restriction.
+ video_stream_encoder_->TriggerCpuUnderuse();
+ timestamp_ms += kFrameIntervalMs;
+ source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
+ WaitForEncodedFrame(kWidth, kHeight);
+ EXPECT_THAT(source.sink_wants(), FpsMaxResolutionMax());
+ EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_resolution);
+ EXPECT_EQ(2, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
+
+ // Trigger adapt down, expect scaled down resolution.
+ video_stream_encoder_->TriggerCpuOveruse();
+ timestamp_ms += kFrameIntervalMs;
+ source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
+ WaitForEncodedFrame(timestamp_ms);
+ EXPECT_THAT(source.sink_wants(),
+ FpsMaxResolutionMatches(Lt(kWidth * kHeight)));
+ EXPECT_TRUE(stats_proxy_->GetStats().cpu_limited_resolution);
+ EXPECT_EQ(3, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
+
+ // Trigger adapt up, expect no restriction.
+ video_stream_encoder_->TriggerCpuUnderuse();
+ timestamp_ms += kFrameIntervalMs;
+ source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
+ sink_.WaitForEncodedFrame(kWidth, kHeight);
+ EXPECT_THAT(source.sink_wants(), FpsMaxResolutionMax());
+ EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_resolution);
+ EXPECT_EQ(4, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest,
+ AdaptsResolutionUpAndDownTwiceForLowQuality_BalancedMode_NoFpsLimit) {
+ const int kWidth = 1280;
+ const int kHeight = 720;
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kTargetBitrate, kTargetBitrate, kTargetBitrate, 0, 0, 0);
+
+ // Enable BALANCED preference, no initial limitation.
+ AdaptingFrameForwarder source(&time_controller_);
+ source.set_adaptation_enabled(true);
+ video_stream_encoder_->SetSource(&source,
+ webrtc::DegradationPreference::BALANCED);
+
+ int64_t timestamp_ms = kFrameIntervalMs;
+ source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
+ sink_.WaitForEncodedFrame(kWidth, kHeight);
+ EXPECT_THAT(source.sink_wants(), FpsMaxResolutionMax());
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_EQ(0, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Trigger adapt down, expect scaled down resolution.
+ video_stream_encoder_->TriggerQualityLow();
+ timestamp_ms += kFrameIntervalMs;
+ source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
+ sink_.WaitForEncodedFrame(timestamp_ms);
+ EXPECT_THAT(source.sink_wants(),
+ FpsMaxResolutionMatches(Lt(kWidth * kHeight)));
+ EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_EQ(1, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Trigger adapt up, expect no restriction.
+ video_stream_encoder_->TriggerQualityHigh();
+ timestamp_ms += kFrameIntervalMs;
+ source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
+ sink_.WaitForEncodedFrame(kWidth, kHeight);
+ EXPECT_THAT(source.sink_wants(), FpsMaxResolutionMax());
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_EQ(2, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Trigger adapt down, expect scaled down resolution.
+ video_stream_encoder_->TriggerQualityLow();
+ timestamp_ms += kFrameIntervalMs;
+ source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
+ sink_.WaitForEncodedFrame(timestamp_ms);
+ EXPECT_THAT(source.sink_wants(),
+ FpsMaxResolutionMatches(Lt(kWidth * kHeight)));
+ EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_EQ(3, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Trigger adapt up, expect no restriction.
+ video_stream_encoder_->TriggerQualityHigh();
+ timestamp_ms += kFrameIntervalMs;
+ source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
+ sink_.WaitForEncodedFrame(kWidth, kHeight);
+ EXPECT_THAT(source.sink_wants(), FpsMaxResolutionMax());
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_EQ(4, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest, AdaptUpIfBwEstimateIsHigherThanMinBitrate) {
+ fake_encoder_.SetResolutionBitrateLimits(
+ {kEncoderBitrateLimits540p, kEncoderBitrateLimits720p});
+
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ DataRate::BitsPerSec(kEncoderBitrateLimits720p.min_start_bitrate_bps),
+ DataRate::BitsPerSec(kEncoderBitrateLimits720p.min_start_bitrate_bps),
+ DataRate::BitsPerSec(kEncoderBitrateLimits720p.min_start_bitrate_bps), 0,
+ 0, 0);
+
+ // Enable MAINTAIN_FRAMERATE preference, no initial limitation.
+ AdaptingFrameForwarder source(&time_controller_);
+ source.set_adaptation_enabled(true);
+ video_stream_encoder_->SetSource(
+ &source, webrtc::DegradationPreference::MAINTAIN_FRAMERATE);
+
+ // Insert 720p frame.
+ int64_t timestamp_ms = kFrameIntervalMs;
+ source.IncomingCapturedFrame(CreateFrame(timestamp_ms, 1280, 720));
+ WaitForEncodedFrame(1280, 720);
+
+ // Reduce bitrate and trigger adapt down.
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ DataRate::BitsPerSec(kEncoderBitrateLimits540p.min_start_bitrate_bps),
+ DataRate::BitsPerSec(kEncoderBitrateLimits540p.min_start_bitrate_bps),
+ DataRate::BitsPerSec(kEncoderBitrateLimits540p.min_start_bitrate_bps), 0,
+ 0, 0);
+ video_stream_encoder_->TriggerQualityLow();
+
+ // Insert 720p frame. It should be downscaled and encoded.
+ timestamp_ms += kFrameIntervalMs;
+ source.IncomingCapturedFrame(CreateFrame(timestamp_ms, 1280, 720));
+ WaitForEncodedFrame(960, 540);
+
+ // Trigger adapt up. Higher resolution should not be requested duo to lack
+ // of bitrate.
+ video_stream_encoder_->TriggerQualityHigh();
+ EXPECT_THAT(source.sink_wants(), FpsMaxResolutionMatches(Lt(1280 * 720)));
+
+ // Increase bitrate.
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ DataRate::BitsPerSec(kEncoderBitrateLimits720p.min_start_bitrate_bps),
+ DataRate::BitsPerSec(kEncoderBitrateLimits720p.min_start_bitrate_bps),
+ DataRate::BitsPerSec(kEncoderBitrateLimits720p.min_start_bitrate_bps), 0,
+ 0, 0);
+
+ // Trigger adapt up. Higher resolution should be requested.
+ video_stream_encoder_->TriggerQualityHigh();
+ EXPECT_THAT(source.sink_wants(), FpsMaxResolutionMax());
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest, DropFirstFramesIfBwEstimateIsTooLow) {
+ fake_encoder_.SetResolutionBitrateLimits(
+ {kEncoderBitrateLimits540p, kEncoderBitrateLimits720p});
+
+ // Set bitrate equal to min bitrate of 540p.
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ DataRate::BitsPerSec(kEncoderBitrateLimits540p.min_start_bitrate_bps),
+ DataRate::BitsPerSec(kEncoderBitrateLimits540p.min_start_bitrate_bps),
+ DataRate::BitsPerSec(kEncoderBitrateLimits540p.min_start_bitrate_bps), 0,
+ 0, 0);
+
+ // Enable MAINTAIN_FRAMERATE preference, no initial limitation.
+ AdaptingFrameForwarder source(&time_controller_);
+ source.set_adaptation_enabled(true);
+ video_stream_encoder_->SetSource(
+ &source, webrtc::DegradationPreference::MAINTAIN_FRAMERATE);
+
+ // Insert 720p frame. It should be dropped and lower resolution should be
+ // requested.
+ int64_t timestamp_ms = kFrameIntervalMs;
+ source.IncomingCapturedFrame(CreateFrame(timestamp_ms, 1280, 720));
+ ExpectDroppedFrame();
+ EXPECT_TRUE_WAIT(source.sink_wants().max_pixel_count < 1280 * 720, 5000);
+
+ // Insert 720p frame. It should be downscaled and encoded.
+ timestamp_ms += kFrameIntervalMs;
+ source.IncomingCapturedFrame(CreateFrame(timestamp_ms, 1280, 720));
+ WaitForEncodedFrame(960, 540);
+
+ video_stream_encoder_->Stop();
+}
+
+class BalancedDegradationTest : public VideoStreamEncoderTest {
+ protected:
+ void SetupTest() {
+ // Reset encoder for field trials to take effect.
+ ConfigureEncoder(video_encoder_config_.Copy());
+ OnBitrateUpdated(kTargetBitrate);
+
+ // Enable BALANCED preference.
+ source_.set_adaptation_enabled(true);
+ video_stream_encoder_->SetSource(&source_, DegradationPreference::BALANCED);
+ }
+
+ void OnBitrateUpdated(DataRate bitrate) {
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ bitrate, bitrate, bitrate, 0, 0, 0);
+ }
+
+ void InsertFrame() {
+ timestamp_ms_ += kFrameIntervalMs;
+ source_.IncomingCapturedFrame(CreateFrame(timestamp_ms_, kWidth, kHeight));
+ }
+
+ void InsertFrameAndWaitForEncoded() {
+ InsertFrame();
+ sink_.WaitForEncodedFrame(timestamp_ms_);
+ }
+
+ const int kWidth = 640; // pixels:640x360=230400
+ const int kHeight = 360;
+ const int64_t kFrameIntervalMs = 150; // Use low fps to not drop any frame.
+ int64_t timestamp_ms_ = 0;
+ AdaptingFrameForwarder source_{&time_controller_};
+};
+
+TEST_F(BalancedDegradationTest, AdaptDownTwiceIfMinFpsDiffLtThreshold) {
+ test::ScopedKeyValueConfig field_trials(
+ field_trials_,
+ "WebRTC-Video-BalancedDegradationSettings/"
+ "pixels:57600|129600|230400,fps:7|10|24,fps_diff:1|1|1/");
+ SetupTest();
+
+ // Force input frame rate.
+ const int kInputFps = 24;
+ VideoSendStream::Stats stats = stats_proxy_->GetStats();
+ stats.input_frame_rate = kInputFps;
+ stats_proxy_->SetMockStats(stats);
+
+ InsertFrameAndWaitForEncoded();
+ EXPECT_THAT(source_.sink_wants(), FpsMaxResolutionMax());
+
+ // Trigger adapt down, expect scaled down framerate and resolution,
+ // since Fps diff (input-requested:0) < threshold.
+ video_stream_encoder_->TriggerQualityLow();
+ EXPECT_THAT(source_.sink_wants(),
+ AllOf(WantsFps(Eq(24)), WantsMaxPixels(Le(230400))));
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(BalancedDegradationTest, AdaptDownOnceIfFpsDiffGeThreshold) {
+ test::ScopedKeyValueConfig field_trials(
+ field_trials_,
+ "WebRTC-Video-BalancedDegradationSettings/"
+ "pixels:57600|129600|230400,fps:7|10|24,fps_diff:1|1|1/");
+ SetupTest();
+
+ // Force input frame rate.
+ const int kInputFps = 25;
+ VideoSendStream::Stats stats = stats_proxy_->GetStats();
+ stats.input_frame_rate = kInputFps;
+ stats_proxy_->SetMockStats(stats);
+
+ InsertFrameAndWaitForEncoded();
+ EXPECT_THAT(source_.sink_wants(), FpsMaxResolutionMax());
+
+ // Trigger adapt down, expect scaled down framerate only (640x360@24fps).
+ // Fps diff (input-requested:1) == threshold.
+ video_stream_encoder_->TriggerQualityLow();
+ EXPECT_THAT(source_.sink_wants(), FpsMatchesResolutionMax(Eq(24)));
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(BalancedDegradationTest, AdaptDownUsesCodecSpecificFps) {
+ test::ScopedKeyValueConfig field_trials(
+ field_trials_,
+ "WebRTC-Video-BalancedDegradationSettings/"
+ "pixels:57600|129600|230400,fps:7|10|24,vp8_fps:8|11|22/");
+ SetupTest();
+
+ EXPECT_EQ(kVideoCodecVP8, video_encoder_config_.codec_type);
+
+ InsertFrameAndWaitForEncoded();
+ EXPECT_THAT(source_.sink_wants(), FpsMaxResolutionMax());
+
+ // Trigger adapt down, expect scaled down framerate (640x360@22fps).
+ video_stream_encoder_->TriggerQualityLow();
+ EXPECT_THAT(source_.sink_wants(), FpsMatchesResolutionMax(Eq(22)));
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(BalancedDegradationTest, NoAdaptUpIfBwEstimateIsLessThanMinBitrate) {
+ test::ScopedKeyValueConfig field_trials(
+ field_trials_,
+ "WebRTC-Video-BalancedDegradationSettings/"
+ "pixels:57600|129600|230400,fps:7|10|14,kbps:0|0|425/");
+ SetupTest();
+
+ const DataRate kMinBitrate = DataRate::KilobitsPerSec(425);
+ const DataRate kTooLowMinBitrate = DataRate::KilobitsPerSec(424);
+ OnBitrateUpdated(kTooLowMinBitrate);
+
+ InsertFrameAndWaitForEncoded();
+ EXPECT_THAT(source_.sink_wants(), FpsMaxResolutionMax());
+ EXPECT_EQ(0, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Trigger adapt down, expect scaled down framerate (640x360@14fps).
+ video_stream_encoder_->TriggerQualityLow();
+ InsertFrameAndWaitForEncoded();
+ EXPECT_THAT(source_.sink_wants(), FpsMatchesResolutionMax(Eq(14)));
+ EXPECT_EQ(1, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Trigger adapt down, expect scaled down resolution (480x270@14fps).
+ video_stream_encoder_->TriggerQualityLow();
+ InsertFrameAndWaitForEncoded();
+ EXPECT_THAT(source_.sink_wants(), FpsEqResolutionLt(source_.last_wants()));
+ EXPECT_EQ(2, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Trigger adapt down, expect scaled down framerate (480x270@10fps).
+ video_stream_encoder_->TriggerQualityLow();
+ InsertFrameAndWaitForEncoded();
+ EXPECT_THAT(source_.sink_wants(), FpsLtResolutionEq(source_.last_wants()));
+ EXPECT_EQ(source_.sink_wants().max_framerate_fps, 10);
+ EXPECT_EQ(3, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Trigger adapt up, expect no upscale in fps (target bitrate < min bitrate).
+ video_stream_encoder_->TriggerQualityHigh();
+ InsertFrameAndWaitForEncoded();
+ EXPECT_EQ(3, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Trigger adapt up, expect upscaled fps (target bitrate == min bitrate).
+ OnBitrateUpdated(kMinBitrate);
+ video_stream_encoder_->TriggerQualityHigh();
+ InsertFrameAndWaitForEncoded();
+ EXPECT_EQ(source_.sink_wants().max_framerate_fps, 14);
+ EXPECT_EQ(4, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(BalancedDegradationTest,
+ InitialFrameDropAdaptsFpsAndResolutionInOneStep) {
+ test::ScopedKeyValueConfig field_trials(
+ field_trials_,
+ "WebRTC-Video-BalancedDegradationSettings/"
+ "pixels:57600|129600|230400,fps:7|24|24/");
+ SetupTest();
+ OnBitrateUpdated(kLowTargetBitrate);
+
+ EXPECT_THAT(source_.sink_wants(), UnlimitedSinkWants());
+
+ // Insert frame, expect scaled down:
+ // framerate (640x360@24fps) -> resolution (480x270@24fps).
+ InsertFrame();
+ EXPECT_FALSE(WaitForFrame(TimeDelta::Seconds(1)));
+ EXPECT_LT(source_.sink_wants().max_pixel_count, kWidth * kHeight);
+ EXPECT_EQ(source_.sink_wants().max_framerate_fps, 24);
+
+ // Insert frame, expect scaled down:
+ // resolution (320x180@24fps).
+ InsertFrame();
+ EXPECT_FALSE(WaitForFrame(TimeDelta::Seconds(1)));
+ EXPECT_LT(source_.sink_wants().max_pixel_count,
+ source_.last_wants().max_pixel_count);
+ EXPECT_EQ(source_.sink_wants().max_framerate_fps, 24);
+
+ // Frame should not be dropped (min pixels per frame reached).
+ InsertFrameAndWaitForEncoded();
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(BalancedDegradationTest,
+ NoAdaptUpInResolutionIfBwEstimateIsLessThanMinBitrate) {
+ test::ScopedKeyValueConfig field_trials(
+ field_trials_,
+ "WebRTC-Video-BalancedDegradationSettings/"
+ "pixels:57600|129600|230400,fps:7|10|14,kbps_res:0|0|435/");
+ SetupTest();
+
+ const DataRate kResolutionMinBitrate = DataRate::KilobitsPerSec(435);
+ const DataRate kTooLowMinResolutionBitrate = DataRate::KilobitsPerSec(434);
+ OnBitrateUpdated(kTooLowMinResolutionBitrate);
+
+ InsertFrameAndWaitForEncoded();
+ EXPECT_THAT(source_.sink_wants(), FpsMaxResolutionMax());
+ EXPECT_EQ(0, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Trigger adapt down, expect scaled down framerate (640x360@14fps).
+ video_stream_encoder_->TriggerQualityLow();
+ InsertFrameAndWaitForEncoded();
+ EXPECT_THAT(source_.sink_wants(), FpsMatchesResolutionMax(Eq(14)));
+ EXPECT_EQ(1, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Trigger adapt down, expect scaled down resolution (480x270@14fps).
+ video_stream_encoder_->TriggerQualityLow();
+ InsertFrameAndWaitForEncoded();
+ EXPECT_THAT(source_.sink_wants(), FpsEqResolutionLt(source_.last_wants()));
+ EXPECT_EQ(2, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Trigger adapt down, expect scaled down framerate (480x270@10fps).
+ video_stream_encoder_->TriggerQualityLow();
+ InsertFrameAndWaitForEncoded();
+ EXPECT_THAT(source_.sink_wants(), FpsLtResolutionEq(source_.last_wants()));
+ EXPECT_EQ(3, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Trigger adapt up, expect upscaled fps (no bitrate limit) (480x270@14fps).
+ video_stream_encoder_->TriggerQualityHigh();
+ InsertFrameAndWaitForEncoded();
+ EXPECT_THAT(source_.sink_wants(), FpsGtResolutionEq(source_.last_wants()));
+ EXPECT_EQ(4, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Trigger adapt up, expect no upscale in res (target bitrate < min bitrate).
+ video_stream_encoder_->TriggerQualityHigh();
+ InsertFrameAndWaitForEncoded();
+ EXPECT_EQ(4, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Trigger adapt up, expect upscaled res (target bitrate == min bitrate).
+ OnBitrateUpdated(kResolutionMinBitrate);
+ video_stream_encoder_->TriggerQualityHigh();
+ InsertFrameAndWaitForEncoded();
+ EXPECT_THAT(source_.sink_wants(), FpsEqResolutionGt(source_.last_wants()));
+ EXPECT_EQ(5, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(BalancedDegradationTest,
+ NoAdaptUpInFpsAndResolutionIfBwEstimateIsLessThanMinBitrate) {
+ test::ScopedKeyValueConfig field_trials(
+ field_trials_,
+ "WebRTC-Video-BalancedDegradationSettings/"
+ "pixels:57600|129600|230400,fps:7|10|14,kbps:0|0|425,kbps_res:0|0|435/");
+ SetupTest();
+
+ const DataRate kMinBitrate = DataRate::KilobitsPerSec(425);
+ const DataRate kTooLowMinBitrate = DataRate::KilobitsPerSec(424);
+ const DataRate kResolutionMinBitrate = DataRate::KilobitsPerSec(435);
+ const DataRate kTooLowMinResolutionBitrate = DataRate::KilobitsPerSec(434);
+ OnBitrateUpdated(kTooLowMinBitrate);
+
+ InsertFrameAndWaitForEncoded();
+ EXPECT_THAT(source_.sink_wants(), FpsMaxResolutionMax());
+ EXPECT_EQ(0, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Trigger adapt down, expect scaled down framerate (640x360@14fps).
+ video_stream_encoder_->TriggerQualityLow();
+ InsertFrameAndWaitForEncoded();
+ EXPECT_THAT(source_.sink_wants(), FpsMatchesResolutionMax(Eq(14)));
+ EXPECT_EQ(1, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Trigger adapt down, expect scaled down resolution (480x270@14fps).
+ video_stream_encoder_->TriggerQualityLow();
+ InsertFrameAndWaitForEncoded();
+ EXPECT_THAT(source_.sink_wants(), FpsEqResolutionLt(source_.last_wants()));
+ EXPECT_EQ(2, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Trigger adapt down, expect scaled down framerate (480x270@10fps).
+ video_stream_encoder_->TriggerQualityLow();
+ InsertFrameAndWaitForEncoded();
+ EXPECT_THAT(source_.sink_wants(), FpsLtResolutionEq(source_.last_wants()));
+ EXPECT_EQ(3, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Trigger adapt up, expect no upscale (target bitrate < min bitrate).
+ video_stream_encoder_->TriggerQualityHigh();
+ InsertFrameAndWaitForEncoded();
+ EXPECT_EQ(3, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Trigger adapt up, expect upscaled fps (target bitrate == min bitrate).
+ OnBitrateUpdated(kMinBitrate);
+ video_stream_encoder_->TriggerQualityHigh();
+ InsertFrameAndWaitForEncoded();
+ EXPECT_THAT(source_.sink_wants(), FpsGtResolutionEq(source_.last_wants()));
+ EXPECT_EQ(4, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Trigger adapt up, expect no upscale in res (target bitrate < min bitrate).
+ OnBitrateUpdated(kTooLowMinResolutionBitrate);
+ video_stream_encoder_->TriggerQualityHigh();
+ InsertFrameAndWaitForEncoded();
+ EXPECT_EQ(4, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Trigger adapt up, expect upscaled res (target bitrate == min bitrate).
+ OnBitrateUpdated(kResolutionMinBitrate);
+ video_stream_encoder_->TriggerQualityHigh();
+ InsertFrameAndWaitForEncoded();
+ EXPECT_THAT(source_.sink_wants(), FpsEqResolutionGt(source_.last_wants()));
+ EXPECT_EQ(5, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest,
+ AdaptsResolutionOnOveruseAndLowQuality_MaintainFramerateMode) {
+ const int kWidth = 1280;
+ const int kHeight = 720;
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kTargetBitrate, kTargetBitrate, kTargetBitrate, 0, 0, 0);
+
+ // Enable MAINTAIN_FRAMERATE preference, no initial limitation.
+ AdaptingFrameForwarder source(&time_controller_);
+ source.set_adaptation_enabled(true);
+ video_stream_encoder_->SetSource(
+ &source, webrtc::DegradationPreference::MAINTAIN_FRAMERATE);
+
+ int64_t timestamp_ms = kFrameIntervalMs;
+ source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
+ WaitForEncodedFrame(kWidth, kHeight);
+ EXPECT_THAT(source.sink_wants(), FpsMaxResolutionMax());
+ EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_resolution);
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_EQ(0, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
+ EXPECT_EQ(0, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Trigger cpu adapt down, expect scaled down resolution (960x540).
+ video_stream_encoder_->TriggerCpuOveruse();
+ timestamp_ms += kFrameIntervalMs;
+ source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
+ WaitForEncodedFrame(timestamp_ms);
+ EXPECT_THAT(source.sink_wants(),
+ FpsMaxResolutionMatches(Lt(kWidth * kHeight)));
+ EXPECT_TRUE(stats_proxy_->GetStats().cpu_limited_resolution);
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_EQ(1, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
+ EXPECT_EQ(0, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Trigger cpu adapt down, expect scaled down resolution (640x360).
+ video_stream_encoder_->TriggerCpuOveruse();
+ timestamp_ms += kFrameIntervalMs;
+ source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
+ WaitForEncodedFrame(timestamp_ms);
+ EXPECT_THAT(source.sink_wants(), FpsMaxResolutionLt(source.last_wants()));
+ EXPECT_TRUE(stats_proxy_->GetStats().cpu_limited_resolution);
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_EQ(2, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
+ EXPECT_EQ(0, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Trigger cpu adapt down, expect scaled down resolution (480x270).
+ video_stream_encoder_->TriggerCpuOveruse();
+ timestamp_ms += kFrameIntervalMs;
+ source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
+ WaitForEncodedFrame(timestamp_ms);
+ EXPECT_THAT(source.sink_wants(), FpsMaxResolutionLt(source.last_wants()));
+ EXPECT_TRUE(stats_proxy_->GetStats().cpu_limited_resolution);
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_EQ(3, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
+ EXPECT_EQ(0, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Trigger quality adapt down, expect scaled down resolution (320x180).
+ video_stream_encoder_->TriggerQualityLow();
+ timestamp_ms += kFrameIntervalMs;
+ source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
+ WaitForEncodedFrame(timestamp_ms);
+ EXPECT_THAT(source.sink_wants(), FpsMaxResolutionLt(source.last_wants()));
+ rtc::VideoSinkWants last_wants = source.sink_wants();
+ EXPECT_TRUE(stats_proxy_->GetStats().cpu_limited_resolution);
+ EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_EQ(3, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
+ EXPECT_EQ(1, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Trigger quality adapt down, expect no change (min resolution reached).
+ video_stream_encoder_->TriggerQualityLow();
+ timestamp_ms += kFrameIntervalMs;
+ source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
+ WaitForEncodedFrame(timestamp_ms);
+ EXPECT_THAT(source.sink_wants(), FpsMax());
+ EXPECT_EQ(source.sink_wants().max_pixel_count, last_wants.max_pixel_count);
+ EXPECT_TRUE(stats_proxy_->GetStats().cpu_limited_resolution);
+ EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_EQ(3, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
+ EXPECT_EQ(1, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Trigger quality adapt up, expect upscaled resolution (480x270).
+ video_stream_encoder_->TriggerQualityHigh();
+ timestamp_ms += kFrameIntervalMs;
+ source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
+ WaitForEncodedFrame(timestamp_ms);
+ EXPECT_THAT(source.sink_wants(), FpsMaxResolutionGt(source.last_wants()));
+ EXPECT_TRUE(stats_proxy_->GetStats().cpu_limited_resolution);
+ EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_EQ(3, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
+ EXPECT_EQ(2, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Trigger quality and cpu adapt up since both are most limited, expect
+ // upscaled resolution (640x360).
+ video_stream_encoder_->TriggerCpuUnderuse();
+ video_stream_encoder_->TriggerQualityHigh();
+ timestamp_ms += kFrameIntervalMs;
+ source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
+ WaitForEncodedFrame(timestamp_ms);
+ EXPECT_THAT(source.sink_wants(), FpsMaxResolutionGt(source.last_wants()));
+ EXPECT_TRUE(stats_proxy_->GetStats().cpu_limited_resolution);
+ EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_EQ(4, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
+ EXPECT_EQ(3, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Trigger quality and cpu adapt up since both are most limited, expect
+ // upscaled resolution (960x540).
+ video_stream_encoder_->TriggerCpuUnderuse();
+ video_stream_encoder_->TriggerQualityHigh();
+ timestamp_ms += kFrameIntervalMs;
+ source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
+ WaitForEncodedFrame(timestamp_ms);
+ EXPECT_THAT(source.sink_wants(), FpsMaxResolutionGt(source.last_wants()));
+ last_wants = source.sink_wants();
+ EXPECT_TRUE(stats_proxy_->GetStats().cpu_limited_resolution);
+ EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_EQ(5, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
+ EXPECT_EQ(4, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Trigger cpu adapt up, expect no change since not most limited (960x540).
+ // However the stats will change since the CPU resource is no longer limited.
+ video_stream_encoder_->TriggerCpuUnderuse();
+ timestamp_ms += kFrameIntervalMs;
+ source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
+ WaitForEncodedFrame(timestamp_ms);
+ EXPECT_THAT(source.sink_wants(), FpsEqResolutionEqTo(last_wants));
+ EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_resolution);
+ EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_EQ(6, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
+ EXPECT_EQ(4, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Trigger quality adapt up, expect no restriction (1280x720).
+ video_stream_encoder_->TriggerQualityHigh();
+ timestamp_ms += kFrameIntervalMs;
+ source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
+ WaitForEncodedFrame(kWidth, kHeight);
+ EXPECT_THAT(source.sink_wants(), FpsMaxResolutionGt(source.last_wants()));
+ EXPECT_THAT(source.sink_wants(), FpsMaxResolutionMax());
+ EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_resolution);
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_EQ(6, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
+ EXPECT_EQ(5, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest, CpuLimitedHistogramIsReported) {
+ const int kWidth = 640;
+ const int kHeight = 360;
+
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kTargetBitrate, kTargetBitrate, kTargetBitrate, 0, 0, 0);
+
+ for (int i = 1; i <= SendStatisticsProxy::kMinRequiredMetricsSamples; ++i) {
+ video_source_.IncomingCapturedFrame(CreateFrame(i, kWidth, kHeight));
+ WaitForEncodedFrame(i);
+ }
+
+ video_stream_encoder_->TriggerCpuOveruse();
+ for (int i = 1; i <= SendStatisticsProxy::kMinRequiredMetricsSamples; ++i) {
+ video_source_.IncomingCapturedFrame(CreateFrame(
+ SendStatisticsProxy::kMinRequiredMetricsSamples + i, kWidth, kHeight));
+ WaitForEncodedFrame(SendStatisticsProxy::kMinRequiredMetricsSamples + i);
+ }
+
+ video_stream_encoder_->Stop();
+ video_stream_encoder_.reset();
+ stats_proxy_.reset();
+
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.CpuLimitedResolutionInPercent"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.CpuLimitedResolutionInPercent", 50));
+}
+
+TEST_F(VideoStreamEncoderTest,
+ CpuLimitedHistogramIsNotReportedForDisabledDegradation) {
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kTargetBitrate, kTargetBitrate, kTargetBitrate, 0, 0, 0);
+ const int kWidth = 640;
+ const int kHeight = 360;
+
+ video_stream_encoder_->SetSource(&video_source_,
+ webrtc::DegradationPreference::DISABLED);
+
+ for (int i = 1; i <= SendStatisticsProxy::kMinRequiredMetricsSamples; ++i) {
+ video_source_.IncomingCapturedFrame(CreateFrame(i, kWidth, kHeight));
+ WaitForEncodedFrame(i);
+ }
+
+ video_stream_encoder_->Stop();
+ video_stream_encoder_.reset();
+ stats_proxy_.reset();
+
+ EXPECT_EQ(0,
+ metrics::NumSamples("WebRTC.Video.CpuLimitedResolutionInPercent"));
+}
+
+TEST_F(VideoStreamEncoderTest, ReportsVideoBitrateAllocation) {
+ ResetEncoder("FAKE", 1, 1, 1, /*screenshare*/ false,
+ VideoStreamEncoder::BitrateAllocationCallbackType::
+ kVideoBitrateAllocation);
+
+ const int kDefaultFps = 30;
+ const VideoBitrateAllocation expected_bitrate =
+ SimulcastRateAllocator(fake_encoder_.config())
+ .Allocate(VideoBitrateAllocationParameters(kLowTargetBitrate.bps(),
+ kDefaultFps));
+
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kLowTargetBitrate, kLowTargetBitrate, kLowTargetBitrate, 0, 0, 0);
+
+ video_source_.IncomingCapturedFrame(
+ CreateFrame(CurrentTimeMs(), codec_width_, codec_height_));
+ WaitForEncodedFrame(CurrentTimeMs());
+ EXPECT_EQ(sink_.GetLastVideoBitrateAllocation(), expected_bitrate);
+ EXPECT_EQ(sink_.number_of_bitrate_allocations(), 1);
+
+ // Check that encoder has been updated too, not just allocation observer.
+ EXPECT_TRUE(fake_encoder_.GetAndResetLastRateControlSettings().has_value());
+ AdvanceTime(TimeDelta::Seconds(1) / kDefaultFps);
+
+ // VideoBitrateAllocation not updated on second frame.
+ video_source_.IncomingCapturedFrame(
+ CreateFrame(CurrentTimeMs(), codec_width_, codec_height_));
+ WaitForEncodedFrame(CurrentTimeMs());
+ EXPECT_EQ(sink_.number_of_bitrate_allocations(), 1);
+ AdvanceTime(TimeDelta::Millis(1) / kDefaultFps);
+
+ // VideoBitrateAllocation updated after a process interval.
+ const int64_t start_time_ms = CurrentTimeMs();
+ while (CurrentTimeMs() - start_time_ms < 5 * kProcessIntervalMs) {
+ video_source_.IncomingCapturedFrame(
+ CreateFrame(CurrentTimeMs(), codec_width_, codec_height_));
+ WaitForEncodedFrame(CurrentTimeMs());
+ AdvanceTime(TimeDelta::Millis(1) / kDefaultFps);
+ }
+ EXPECT_GT(sink_.number_of_bitrate_allocations(), 3);
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest, ReportsVideoLayersAllocationForVP8Simulcast) {
+ ResetEncoder("VP8", /*num_streams*/ 2, 1, 1, /*screenshare*/ false,
+ VideoStreamEncoder::BitrateAllocationCallbackType::
+ kVideoLayersAllocation);
+
+ const int kDefaultFps = 30;
+
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kLowTargetBitrate, kLowTargetBitrate, kLowTargetBitrate, 0, 0, 0);
+
+ video_source_.IncomingCapturedFrame(
+ CreateFrame(CurrentTimeMs(), codec_width_, codec_height_));
+ WaitForEncodedFrame(CurrentTimeMs());
+ EXPECT_EQ(sink_.number_of_layers_allocations(), 1);
+ VideoLayersAllocation last_layer_allocation =
+ sink_.GetLastVideoLayersAllocation();
+ // kLowTargetBitrate is only enough for one spatial layer.
+ ASSERT_EQ(last_layer_allocation.active_spatial_layers.size(), 1u);
+
+ VideoBitrateAllocation bitrate_allocation =
+ fake_encoder_.GetAndResetLastRateControlSettings()->target_bitrate;
+ // Check that encoder has been updated too, not just allocation observer.
+ EXPECT_EQ(bitrate_allocation.get_sum_bps(), kLowTargetBitrate.bps());
+ AdvanceTime(TimeDelta::Seconds(1) / kDefaultFps);
+
+ // VideoLayersAllocation might be updated if frame rate changes.
+ int number_of_layers_allocation = 1;
+ const int64_t start_time_ms = CurrentTimeMs();
+ while (CurrentTimeMs() - start_time_ms < 10 * kProcessIntervalMs) {
+ video_source_.IncomingCapturedFrame(
+ CreateFrame(CurrentTimeMs(), codec_width_, codec_height_));
+ WaitForEncodedFrame(CurrentTimeMs());
+ if (number_of_layers_allocation != sink_.number_of_layers_allocations()) {
+ number_of_layers_allocation = sink_.number_of_layers_allocations();
+ VideoLayersAllocation new_allocation =
+ sink_.GetLastVideoLayersAllocation();
+ ASSERT_EQ(new_allocation.active_spatial_layers.size(), 1u);
+ EXPECT_NE(new_allocation.active_spatial_layers[0].frame_rate_fps,
+ last_layer_allocation.active_spatial_layers[0].frame_rate_fps);
+ EXPECT_EQ(new_allocation.active_spatial_layers[0]
+ .target_bitrate_per_temporal_layer,
+ last_layer_allocation.active_spatial_layers[0]
+ .target_bitrate_per_temporal_layer);
+ last_layer_allocation = new_allocation;
+ }
+ }
+ EXPECT_LE(sink_.number_of_layers_allocations(), 3);
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest,
+ ReportsVideoLayersAllocationForVP8WithMiddleLayerDisabled) {
+ fake_encoder_.SetTemporalLayersSupported(/*spatial_idx=*/0, true);
+ fake_encoder_.SetTemporalLayersSupported(/*spatial_idx*/ 1, true);
+ fake_encoder_.SetTemporalLayersSupported(/*spatial_idx*/ 2, true);
+ VideoEncoderConfig video_encoder_config;
+ test::FillEncoderConfiguration(VideoCodecType::kVideoCodecVP8,
+ /* num_streams*/ 3, &video_encoder_config);
+ video_encoder_config.max_bitrate_bps = 2 * kTargetBitrate.bps();
+ video_encoder_config.content_type =
+ VideoEncoderConfig::ContentType::kRealtimeVideo;
+ video_encoder_config.encoder_specific_settings =
+ rtc::make_ref_counted<VideoEncoderConfig::Vp8EncoderSpecificSettings>(
+ VideoEncoder::GetDefaultVp8Settings());
+ for (auto& layer : video_encoder_config.simulcast_layers) {
+ layer.num_temporal_layers = 2;
+ }
+ // Simulcast layers are used for enabling/disabling streams.
+ video_encoder_config.simulcast_layers[0].active = true;
+ video_encoder_config.simulcast_layers[1].active = false;
+ video_encoder_config.simulcast_layers[2].active = true;
+ ConfigureEncoder(std::move(video_encoder_config),
+ VideoStreamEncoder::BitrateAllocationCallbackType::
+ kVideoLayersAllocation);
+
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kTargetBitrate, kTargetBitrate, kTargetBitrate, 0, 0, 0);
+
+ video_source_.IncomingCapturedFrame(CreateFrame(CurrentTimeMs(), 1280, 720));
+ WaitForEncodedFrame(CurrentTimeMs());
+ EXPECT_EQ(sink_.number_of_layers_allocations(), 1);
+ VideoLayersAllocation last_layer_allocation =
+ sink_.GetLastVideoLayersAllocation();
+
+ ASSERT_THAT(last_layer_allocation.active_spatial_layers, SizeIs(2));
+ EXPECT_THAT(last_layer_allocation.active_spatial_layers[0]
+ .target_bitrate_per_temporal_layer,
+ SizeIs(2));
+ EXPECT_LT(last_layer_allocation.active_spatial_layers[0].width, 1280);
+ EXPECT_EQ(last_layer_allocation.active_spatial_layers[1].width, 1280);
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest,
+ ReportsVideoLayersAllocationForVP8WithMiddleAndHighestLayerDisabled) {
+ fake_encoder_.SetTemporalLayersSupported(/*spatial_idx=*/0, true);
+ fake_encoder_.SetTemporalLayersSupported(/*spatial_idx*/ 1, true);
+ fake_encoder_.SetTemporalLayersSupported(/*spatial_idx*/ 2, true);
+ VideoEncoderConfig video_encoder_config;
+ test::FillEncoderConfiguration(VideoCodecType::kVideoCodecVP8,
+ /* num_streams*/ 3, &video_encoder_config);
+ video_encoder_config.max_bitrate_bps = 2 * kTargetBitrate.bps();
+ video_encoder_config.content_type =
+ VideoEncoderConfig::ContentType::kRealtimeVideo;
+ video_encoder_config.encoder_specific_settings =
+ rtc::make_ref_counted<VideoEncoderConfig::Vp8EncoderSpecificSettings>(
+ VideoEncoder::GetDefaultVp8Settings());
+ for (auto& layer : video_encoder_config.simulcast_layers) {
+ layer.num_temporal_layers = 2;
+ }
+ // Simulcast layers are used for enabling/disabling streams.
+ video_encoder_config.simulcast_layers[0].active = true;
+ video_encoder_config.simulcast_layers[1].active = false;
+ video_encoder_config.simulcast_layers[2].active = false;
+ ConfigureEncoder(std::move(video_encoder_config),
+ VideoStreamEncoder::BitrateAllocationCallbackType::
+ kVideoLayersAllocation);
+
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kTargetBitrate, kTargetBitrate, kTargetBitrate, 0, 0, 0);
+
+ video_source_.IncomingCapturedFrame(CreateFrame(CurrentTimeMs(), 1280, 720));
+ WaitForEncodedFrame(CurrentTimeMs());
+ EXPECT_EQ(sink_.number_of_layers_allocations(), 1);
+ VideoLayersAllocation last_layer_allocation =
+ sink_.GetLastVideoLayersAllocation();
+
+ ASSERT_THAT(last_layer_allocation.active_spatial_layers, SizeIs(1));
+ EXPECT_THAT(last_layer_allocation.active_spatial_layers[0]
+ .target_bitrate_per_temporal_layer,
+ SizeIs(2));
+ EXPECT_LT(last_layer_allocation.active_spatial_layers[0].width, 1280);
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest,
+ ReportsVideoLayersAllocationForV9SvcWithTemporalLayerSupport) {
+ fake_encoder_.SetTemporalLayersSupported(/*spatial_idx=*/0, true);
+ fake_encoder_.SetTemporalLayersSupported(/*spatial_idx*/ 1, true);
+ VideoEncoderConfig video_encoder_config;
+ test::FillEncoderConfiguration(VideoCodecType::kVideoCodecVP9,
+ /* num_streams*/ 1, &video_encoder_config);
+ video_encoder_config.max_bitrate_bps = 2 * kTargetBitrate.bps();
+ video_encoder_config.content_type =
+ VideoEncoderConfig::ContentType::kRealtimeVideo;
+ VideoCodecVP9 vp9_settings = VideoEncoder::GetDefaultVp9Settings();
+ vp9_settings.numberOfSpatialLayers = 2;
+ vp9_settings.numberOfTemporalLayers = 2;
+ vp9_settings.interLayerPred = InterLayerPredMode::kOn;
+ vp9_settings.automaticResizeOn = false;
+ video_encoder_config.encoder_specific_settings =
+ rtc::make_ref_counted<VideoEncoderConfig::Vp9EncoderSpecificSettings>(
+ vp9_settings);
+ ConfigureEncoder(std::move(video_encoder_config),
+ VideoStreamEncoder::BitrateAllocationCallbackType::
+ kVideoLayersAllocation);
+
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kTargetBitrate, kTargetBitrate, kTargetBitrate, 0, 0, 0);
+
+ video_source_.IncomingCapturedFrame(CreateFrame(CurrentTimeMs(), 1280, 720));
+ WaitForEncodedFrame(CurrentTimeMs());
+ EXPECT_EQ(sink_.number_of_layers_allocations(), 1);
+ VideoLayersAllocation last_layer_allocation =
+ sink_.GetLastVideoLayersAllocation();
+
+ ASSERT_THAT(last_layer_allocation.active_spatial_layers, SizeIs(2));
+ EXPECT_THAT(last_layer_allocation.active_spatial_layers[0]
+ .target_bitrate_per_temporal_layer,
+ SizeIs(2));
+ EXPECT_EQ(last_layer_allocation.active_spatial_layers[0].width, 640);
+ EXPECT_EQ(last_layer_allocation.active_spatial_layers[0].height, 360);
+ EXPECT_EQ(last_layer_allocation.active_spatial_layers[0].frame_rate_fps, 30);
+ EXPECT_THAT(last_layer_allocation.active_spatial_layers[1]
+ .target_bitrate_per_temporal_layer,
+ SizeIs(2));
+ EXPECT_EQ(last_layer_allocation.active_spatial_layers[1].width, 1280);
+ EXPECT_EQ(last_layer_allocation.active_spatial_layers[1].height, 720);
+ EXPECT_EQ(last_layer_allocation.active_spatial_layers[1].frame_rate_fps, 30);
+
+ // Since full SVC is used, expect the top layer to utilize the full target
+ // rate.
+ EXPECT_EQ(last_layer_allocation.active_spatial_layers[1]
+ .target_bitrate_per_temporal_layer[1],
+ kTargetBitrate);
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest,
+ ReportsVideoLayersAllocationForV9SvcWithoutTemporalLayerSupport) {
+ fake_encoder_.SetTemporalLayersSupported(/*spatial_idx=*/0, false);
+ fake_encoder_.SetTemporalLayersSupported(/*spatial_idx*/ 1, false);
+ VideoEncoderConfig video_encoder_config;
+ test::FillEncoderConfiguration(VideoCodecType::kVideoCodecVP9,
+ /* num_streams*/ 1, &video_encoder_config);
+ video_encoder_config.max_bitrate_bps = 2 * kTargetBitrate.bps();
+ video_encoder_config.content_type =
+ VideoEncoderConfig::ContentType::kRealtimeVideo;
+ VideoCodecVP9 vp9_settings = VideoEncoder::GetDefaultVp9Settings();
+ vp9_settings.numberOfSpatialLayers = 2;
+ vp9_settings.numberOfTemporalLayers = 2;
+ vp9_settings.interLayerPred = InterLayerPredMode::kOn;
+ vp9_settings.automaticResizeOn = false;
+ video_encoder_config.encoder_specific_settings =
+ rtc::make_ref_counted<VideoEncoderConfig::Vp9EncoderSpecificSettings>(
+ vp9_settings);
+ ConfigureEncoder(std::move(video_encoder_config),
+ VideoStreamEncoder::BitrateAllocationCallbackType::
+ kVideoLayersAllocation);
+
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kTargetBitrate, kTargetBitrate, kTargetBitrate, 0, 0, 0);
+
+ video_source_.IncomingCapturedFrame(CreateFrame(CurrentTimeMs(), 1280, 720));
+ WaitForEncodedFrame(CurrentTimeMs());
+ EXPECT_EQ(sink_.number_of_layers_allocations(), 1);
+ VideoLayersAllocation last_layer_allocation =
+ sink_.GetLastVideoLayersAllocation();
+
+ ASSERT_THAT(last_layer_allocation.active_spatial_layers, SizeIs(2));
+ EXPECT_THAT(last_layer_allocation.active_spatial_layers[0]
+ .target_bitrate_per_temporal_layer,
+ SizeIs(1));
+ EXPECT_THAT(last_layer_allocation.active_spatial_layers[1]
+ .target_bitrate_per_temporal_layer,
+ SizeIs(1));
+ // Since full SVC is used, expect the top layer to utilize the full target
+ // rate.
+ EXPECT_EQ(last_layer_allocation.active_spatial_layers[1]
+ .target_bitrate_per_temporal_layer[0],
+ kTargetBitrate);
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest,
+ ReportsVideoLayersAllocationForVP9KSvcWithTemporalLayerSupport) {
+ fake_encoder_.SetTemporalLayersSupported(/*spatial_idx=*/0, true);
+ fake_encoder_.SetTemporalLayersSupported(/*spatial_idx*/ 1, true);
+ VideoEncoderConfig video_encoder_config;
+ test::FillEncoderConfiguration(VideoCodecType::kVideoCodecVP9,
+ /* num_streams*/ 1, &video_encoder_config);
+ video_encoder_config.max_bitrate_bps = 2 * kTargetBitrate.bps();
+ video_encoder_config.content_type =
+ VideoEncoderConfig::ContentType::kRealtimeVideo;
+ VideoCodecVP9 vp9_settings = VideoEncoder::GetDefaultVp9Settings();
+ vp9_settings.numberOfSpatialLayers = 2;
+ vp9_settings.numberOfTemporalLayers = 2;
+ vp9_settings.interLayerPred = InterLayerPredMode::kOnKeyPic;
+ vp9_settings.automaticResizeOn = false;
+ video_encoder_config.encoder_specific_settings =
+ rtc::make_ref_counted<VideoEncoderConfig::Vp9EncoderSpecificSettings>(
+ vp9_settings);
+ ConfigureEncoder(std::move(video_encoder_config),
+ VideoStreamEncoder::BitrateAllocationCallbackType::
+ kVideoLayersAllocation);
+
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kTargetBitrate, kTargetBitrate, kTargetBitrate, 0, 0, 0);
+
+ video_source_.IncomingCapturedFrame(CreateFrame(CurrentTimeMs(), 1280, 720));
+ WaitForEncodedFrame(CurrentTimeMs());
+ EXPECT_EQ(sink_.number_of_layers_allocations(), 1);
+ VideoLayersAllocation last_layer_allocation =
+ sink_.GetLastVideoLayersAllocation();
+
+ ASSERT_THAT(last_layer_allocation.active_spatial_layers, SizeIs(2));
+ EXPECT_THAT(last_layer_allocation.active_spatial_layers[0]
+ .target_bitrate_per_temporal_layer,
+ SizeIs(2));
+ EXPECT_THAT(last_layer_allocation.active_spatial_layers[1]
+ .target_bitrate_per_temporal_layer,
+ SizeIs(2));
+ // Since KSVC is, spatial layers are independend except on key frames.
+ EXPECT_LT(last_layer_allocation.active_spatial_layers[1]
+ .target_bitrate_per_temporal_layer[1],
+ kTargetBitrate);
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest,
+ ReportsVideoLayersAllocationForV9SvcWithLowestLayerDisabled) {
+ fake_encoder_.SetTemporalLayersSupported(/*spatial_idx=*/0, true);
+ fake_encoder_.SetTemporalLayersSupported(/*spatial_idx*/ 1, true);
+ fake_encoder_.SetTemporalLayersSupported(/*spatial_idx*/ 2, true);
+ VideoEncoderConfig video_encoder_config;
+ test::FillEncoderConfiguration(VideoCodecType::kVideoCodecVP9,
+ /* num_streams*/ 1, &video_encoder_config);
+ video_encoder_config.max_bitrate_bps = 2 * kTargetBitrate.bps();
+ video_encoder_config.content_type =
+ VideoEncoderConfig::ContentType::kRealtimeVideo;
+ VideoCodecVP9 vp9_settings = VideoEncoder::GetDefaultVp9Settings();
+ vp9_settings.numberOfSpatialLayers = 3;
+ vp9_settings.numberOfTemporalLayers = 2;
+ vp9_settings.interLayerPred = InterLayerPredMode::kOn;
+ vp9_settings.automaticResizeOn = false;
+ video_encoder_config.encoder_specific_settings =
+ rtc::make_ref_counted<VideoEncoderConfig::Vp9EncoderSpecificSettings>(
+ vp9_settings);
+ // Simulcast layers are used for enabling/disabling streams.
+ video_encoder_config.simulcast_layers.resize(3);
+ video_encoder_config.simulcast_layers[0].active = false;
+ video_encoder_config.simulcast_layers[1].active = true;
+ video_encoder_config.simulcast_layers[2].active = true;
+ ConfigureEncoder(std::move(video_encoder_config),
+ VideoStreamEncoder::BitrateAllocationCallbackType::
+ kVideoLayersAllocation);
+
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kTargetBitrate, kTargetBitrate, kTargetBitrate, 0, 0, 0);
+
+ video_source_.IncomingCapturedFrame(CreateFrame(CurrentTimeMs(), 1280, 720));
+ WaitForEncodedFrame(CurrentTimeMs());
+ EXPECT_EQ(sink_.number_of_layers_allocations(), 1);
+ VideoLayersAllocation last_layer_allocation =
+ sink_.GetLastVideoLayersAllocation();
+
+ ASSERT_THAT(last_layer_allocation.active_spatial_layers, SizeIs(2));
+ EXPECT_THAT(last_layer_allocation.active_spatial_layers[0]
+ .target_bitrate_per_temporal_layer,
+ SizeIs(2));
+ EXPECT_EQ(last_layer_allocation.active_spatial_layers[0].width, 640);
+ EXPECT_EQ(last_layer_allocation.active_spatial_layers[0].spatial_id, 0);
+
+ EXPECT_EQ(last_layer_allocation.active_spatial_layers[1].width, 1280);
+ EXPECT_EQ(last_layer_allocation.active_spatial_layers[1].spatial_id, 1);
+ EXPECT_THAT(last_layer_allocation.active_spatial_layers[1]
+ .target_bitrate_per_temporal_layer,
+ SizeIs(2));
+ // Since full SVC is used, expect the top layer to utilize the full target
+ // rate.
+ EXPECT_EQ(last_layer_allocation.active_spatial_layers[1]
+ .target_bitrate_per_temporal_layer[1],
+ kTargetBitrate);
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest,
+ ReportsVideoLayersAllocationForV9SvcWithHighestLayerDisabled) {
+ fake_encoder_.SetTemporalLayersSupported(/*spatial_idx=*/0, true);
+ fake_encoder_.SetTemporalLayersSupported(/*spatial_idx*/ 1, true);
+ fake_encoder_.SetTemporalLayersSupported(/*spatial_idx*/ 2, true);
+ VideoEncoderConfig video_encoder_config;
+ test::FillEncoderConfiguration(VideoCodecType::kVideoCodecVP9,
+ /* num_streams*/ 1, &video_encoder_config);
+ video_encoder_config.max_bitrate_bps = 2 * kTargetBitrate.bps();
+ video_encoder_config.content_type =
+ VideoEncoderConfig::ContentType::kRealtimeVideo;
+ VideoCodecVP9 vp9_settings = VideoEncoder::GetDefaultVp9Settings();
+ vp9_settings.numberOfSpatialLayers = 3;
+ vp9_settings.numberOfTemporalLayers = 2;
+ vp9_settings.interLayerPred = InterLayerPredMode::kOn;
+ vp9_settings.automaticResizeOn = false;
+ video_encoder_config.encoder_specific_settings =
+ rtc::make_ref_counted<VideoEncoderConfig::Vp9EncoderSpecificSettings>(
+ vp9_settings);
+ // Simulcast layers are used for enabling/disabling streams.
+ video_encoder_config.simulcast_layers.resize(3);
+ video_encoder_config.simulcast_layers[2].active = false;
+ ConfigureEncoder(std::move(video_encoder_config),
+ VideoStreamEncoder::BitrateAllocationCallbackType::
+ kVideoLayersAllocation);
+
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kTargetBitrate, kTargetBitrate, kTargetBitrate, 0, 0, 0);
+
+ video_source_.IncomingCapturedFrame(CreateFrame(CurrentTimeMs(), 1280, 720));
+ WaitForEncodedFrame(CurrentTimeMs());
+ EXPECT_EQ(sink_.number_of_layers_allocations(), 1);
+ VideoLayersAllocation last_layer_allocation =
+ sink_.GetLastVideoLayersAllocation();
+
+ ASSERT_THAT(last_layer_allocation.active_spatial_layers, SizeIs(2));
+ EXPECT_THAT(last_layer_allocation.active_spatial_layers[0]
+ .target_bitrate_per_temporal_layer,
+ SizeIs(2));
+ EXPECT_EQ(last_layer_allocation.active_spatial_layers[0].width, 320);
+ EXPECT_EQ(last_layer_allocation.active_spatial_layers[0].spatial_id, 0);
+
+ EXPECT_EQ(last_layer_allocation.active_spatial_layers[1].width, 640);
+ EXPECT_EQ(last_layer_allocation.active_spatial_layers[1].spatial_id, 1);
+ EXPECT_THAT(last_layer_allocation.active_spatial_layers[1]
+ .target_bitrate_per_temporal_layer,
+ SizeIs(2));
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest,
+ ReportsVideoLayersAllocationForV9SvcWithAllButHighestLayerDisabled) {
+ fake_encoder_.SetTemporalLayersSupported(/*spatial_idx=*/0, true);
+ fake_encoder_.SetTemporalLayersSupported(/*spatial_idx*/ 1, true);
+ fake_encoder_.SetTemporalLayersSupported(/*spatial_idx*/ 2, true);
+ VideoEncoderConfig video_encoder_config;
+ test::FillEncoderConfiguration(VideoCodecType::kVideoCodecVP9,
+ /* num_streams*/ 1, &video_encoder_config);
+ video_encoder_config.max_bitrate_bps = 2 * kTargetBitrate.bps();
+ video_encoder_config.content_type =
+ VideoEncoderConfig::ContentType::kRealtimeVideo;
+ VideoCodecVP9 vp9_settings = VideoEncoder::GetDefaultVp9Settings();
+ vp9_settings.numberOfSpatialLayers = 3;
+ vp9_settings.numberOfTemporalLayers = 2;
+ vp9_settings.interLayerPred = InterLayerPredMode::kOn;
+ vp9_settings.automaticResizeOn = false;
+ video_encoder_config.encoder_specific_settings =
+ rtc::make_ref_counted<VideoEncoderConfig::Vp9EncoderSpecificSettings>(
+ vp9_settings);
+ // Simulcast layers are used for enabling/disabling streams.
+ video_encoder_config.simulcast_layers.resize(3);
+ video_encoder_config.simulcast_layers[0].active = false;
+ video_encoder_config.simulcast_layers[1].active = false;
+ video_encoder_config.simulcast_layers[2].active = true;
+ ConfigureEncoder(std::move(video_encoder_config),
+ VideoStreamEncoder::BitrateAllocationCallbackType::
+ kVideoLayersAllocation);
+
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kTargetBitrate, kTargetBitrate, kTargetBitrate, 0, 0, 0);
+
+ video_source_.IncomingCapturedFrame(CreateFrame(CurrentTimeMs(), 1280, 720));
+ WaitForEncodedFrame(CurrentTimeMs());
+ EXPECT_EQ(sink_.number_of_layers_allocations(), 1);
+ VideoLayersAllocation last_layer_allocation =
+ sink_.GetLastVideoLayersAllocation();
+
+ ASSERT_THAT(last_layer_allocation.active_spatial_layers, SizeIs(1));
+ EXPECT_THAT(last_layer_allocation.active_spatial_layers[0]
+ .target_bitrate_per_temporal_layer,
+ SizeIs(2));
+ EXPECT_EQ(last_layer_allocation.active_spatial_layers[0].width, 1280);
+ EXPECT_EQ(last_layer_allocation.active_spatial_layers[0].spatial_id, 0);
+ EXPECT_EQ(last_layer_allocation.active_spatial_layers[0]
+ .target_bitrate_per_temporal_layer[1],
+ kTargetBitrate);
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest, ReportsVideoLayersAllocationForH264) {
+ ResetEncoder("H264", 1, 1, 1, false,
+ VideoStreamEncoder::BitrateAllocationCallbackType::
+ kVideoLayersAllocation);
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kTargetBitrate, kTargetBitrate, kTargetBitrate, 0, 0, 0);
+
+ video_source_.IncomingCapturedFrame(CreateFrame(CurrentTimeMs(), 1280, 720));
+ WaitForEncodedFrame(CurrentTimeMs());
+ EXPECT_EQ(sink_.number_of_layers_allocations(), 1);
+ VideoLayersAllocation last_layer_allocation =
+ sink_.GetLastVideoLayersAllocation();
+
+ ASSERT_THAT(last_layer_allocation.active_spatial_layers, SizeIs(1));
+ ASSERT_THAT(last_layer_allocation.active_spatial_layers[0]
+ .target_bitrate_per_temporal_layer,
+ SizeIs(1));
+ EXPECT_EQ(last_layer_allocation.active_spatial_layers[0]
+ .target_bitrate_per_temporal_layer[0],
+ kTargetBitrate);
+ EXPECT_EQ(last_layer_allocation.active_spatial_layers[0].width, 1280);
+ EXPECT_EQ(last_layer_allocation.active_spatial_layers[0].height, 720);
+ EXPECT_EQ(last_layer_allocation.active_spatial_layers[0].frame_rate_fps, 30);
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest,
+ ReportsUpdatedVideoLayersAllocationWhenBweChanges) {
+ ResetEncoder("VP8", /*num_streams*/ 2, 1, 1, /*screenshare*/ false,
+ VideoStreamEncoder::BitrateAllocationCallbackType::
+ kVideoLayersAllocation);
+
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kLowTargetBitrate, kLowTargetBitrate, kLowTargetBitrate, 0, 0, 0);
+
+ video_source_.IncomingCapturedFrame(
+ CreateFrame(CurrentTimeMs(), codec_width_, codec_height_));
+ WaitForEncodedFrame(CurrentTimeMs());
+ EXPECT_EQ(sink_.number_of_layers_allocations(), 1);
+ VideoLayersAllocation last_layer_allocation =
+ sink_.GetLastVideoLayersAllocation();
+ // kLowTargetBitrate is only enough for one spatial layer.
+ ASSERT_EQ(last_layer_allocation.active_spatial_layers.size(), 1u);
+ EXPECT_EQ(last_layer_allocation.active_spatial_layers[0]
+ .target_bitrate_per_temporal_layer[0],
+ kLowTargetBitrate);
+
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kSimulcastTargetBitrate, kSimulcastTargetBitrate, kSimulcastTargetBitrate,
+ 0, 0, 0);
+ video_source_.IncomingCapturedFrame(
+ CreateFrame(CurrentTimeMs(), codec_width_, codec_height_));
+ WaitForEncodedFrame(CurrentTimeMs());
+
+ EXPECT_EQ(sink_.number_of_layers_allocations(), 2);
+ last_layer_allocation = sink_.GetLastVideoLayersAllocation();
+ ASSERT_EQ(last_layer_allocation.active_spatial_layers.size(), 2u);
+ EXPECT_GT(last_layer_allocation.active_spatial_layers[1]
+ .target_bitrate_per_temporal_layer[0],
+ DataRate::Zero());
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest,
+ ReportsUpdatedVideoLayersAllocationWhenResolutionChanges) {
+ ResetEncoder("VP8", /*num_streams*/ 2, 1, 1, /*screenshare*/ false,
+ VideoStreamEncoder::BitrateAllocationCallbackType::
+ kVideoLayersAllocation);
+
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kSimulcastTargetBitrate, kSimulcastTargetBitrate, kSimulcastTargetBitrate,
+ 0, 0, 0);
+
+ video_source_.IncomingCapturedFrame(
+ CreateFrame(CurrentTimeMs(), codec_width_, codec_height_));
+ WaitForEncodedFrame(CurrentTimeMs());
+ EXPECT_EQ(sink_.number_of_layers_allocations(), 1);
+ ASSERT_THAT(sink_.GetLastVideoLayersAllocation().active_spatial_layers,
+ SizeIs(2));
+ EXPECT_EQ(sink_.GetLastVideoLayersAllocation().active_spatial_layers[1].width,
+ codec_width_);
+ EXPECT_EQ(
+ sink_.GetLastVideoLayersAllocation().active_spatial_layers[1].height,
+ codec_height_);
+
+ video_source_.IncomingCapturedFrame(
+ CreateFrame(CurrentTimeMs(), codec_width_ / 2, codec_height_ / 2));
+ WaitForEncodedFrame(CurrentTimeMs());
+ EXPECT_EQ(sink_.number_of_layers_allocations(), 2);
+ ASSERT_THAT(sink_.GetLastVideoLayersAllocation().active_spatial_layers,
+ SizeIs(2));
+ EXPECT_EQ(sink_.GetLastVideoLayersAllocation().active_spatial_layers[1].width,
+ codec_width_ / 2);
+ EXPECT_EQ(
+ sink_.GetLastVideoLayersAllocation().active_spatial_layers[1].height,
+ codec_height_ / 2);
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest, TemporalLayersNotDisabledIfSupported) {
+ // 2 TLs configured, temporal layers supported by encoder.
+ const int kNumTemporalLayers = 2;
+ ResetEncoder("VP8", 1, kNumTemporalLayers, 1, /*screenshare*/ false,
+ VideoStreamEncoder::BitrateAllocationCallbackType::
+ kVideoBitrateAllocation);
+ fake_encoder_.SetTemporalLayersSupported(0, true);
+
+ // Bitrate allocated across temporal layers.
+ const int kTl0Bps = kTargetBitrate.bps() *
+ webrtc::SimulcastRateAllocator::GetTemporalRateAllocation(
+ kNumTemporalLayers, /*temporal_id*/ 0,
+ /*base_heavy_tl3_alloc*/ false);
+ const int kTl1Bps = kTargetBitrate.bps() *
+ webrtc::SimulcastRateAllocator::GetTemporalRateAllocation(
+ kNumTemporalLayers, /*temporal_id*/ 1,
+ /*base_heavy_tl3_alloc*/ false);
+ VideoBitrateAllocation expected_bitrate;
+ expected_bitrate.SetBitrate(/*si*/ 0, /*ti*/ 0, kTl0Bps);
+ expected_bitrate.SetBitrate(/*si*/ 0, /*ti*/ 1, kTl1Bps - kTl0Bps);
+
+ VerifyAllocatedBitrate(expected_bitrate);
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest, TemporalLayersDisabledIfNotSupported) {
+ // 2 TLs configured, temporal layers not supported by encoder.
+ ResetEncoder("VP8", 1, /*num_temporal_layers*/ 2, 1, /*screenshare*/ false,
+ VideoStreamEncoder::BitrateAllocationCallbackType::
+ kVideoBitrateAllocation);
+ fake_encoder_.SetTemporalLayersSupported(0, false);
+
+ // Temporal layers not supported by the encoder.
+ // Total bitrate should be at ti:0.
+ VideoBitrateAllocation expected_bitrate;
+ expected_bitrate.SetBitrate(/*si*/ 0, /*ti*/ 0, kTargetBitrate.bps());
+
+ VerifyAllocatedBitrate(expected_bitrate);
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest, VerifyBitrateAllocationForTwoStreams) {
+ webrtc::test::ScopedKeyValueConfig field_trials(
+ field_trials_,
+ "WebRTC-Video-QualityScalerSettings/"
+ "initial_bitrate_interval_ms:1000,initial_bitrate_factor:0.2/");
+ // Reset encoder for field trials to take effect.
+ ConfigureEncoder(video_encoder_config_.Copy());
+
+ // 2 TLs configured, temporal layers only supported for first stream.
+ ResetEncoder("VP8", 2, /*num_temporal_layers*/ 2, 1, /*screenshare*/ false,
+ VideoStreamEncoder::BitrateAllocationCallbackType::
+ kVideoBitrateAllocation);
+ fake_encoder_.SetTemporalLayersSupported(0, true);
+ fake_encoder_.SetTemporalLayersSupported(1, false);
+
+ const int kS0Bps = 150000;
+ const int kS0Tl0Bps =
+ kS0Bps *
+ webrtc::SimulcastRateAllocator::GetTemporalRateAllocation(
+ /*num_layers*/ 2, /*temporal_id*/ 0, /*base_heavy_tl3_alloc*/ false);
+ const int kS0Tl1Bps =
+ kS0Bps *
+ webrtc::SimulcastRateAllocator::GetTemporalRateAllocation(
+ /*num_layers*/ 2, /*temporal_id*/ 1, /*base_heavy_tl3_alloc*/ false);
+ const int kS1Bps = kTargetBitrate.bps() - kS0Tl1Bps;
+ // Temporal layers not supported by si:1.
+ VideoBitrateAllocation expected_bitrate;
+ expected_bitrate.SetBitrate(/*si*/ 0, /*ti*/ 0, kS0Tl0Bps);
+ expected_bitrate.SetBitrate(/*si*/ 0, /*ti*/ 1, kS0Tl1Bps - kS0Tl0Bps);
+ expected_bitrate.SetBitrate(/*si*/ 1, /*ti*/ 0, kS1Bps);
+
+ VerifyAllocatedBitrate(expected_bitrate);
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest, OveruseDetectorUpdatedOnReconfigureAndAdaption) {
+ const int kFrameWidth = 1280;
+ const int kFrameHeight = 720;
+ const int kFramerate = 24;
+
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kTargetBitrate, kTargetBitrate, kTargetBitrate, 0, 0, 0);
+ test::FrameForwarder source;
+ video_stream_encoder_->SetSource(
+ &source, webrtc::DegradationPreference::MAINTAIN_RESOLUTION);
+
+ // Insert a single frame, triggering initial configuration.
+ source.IncomingCapturedFrame(CreateFrame(1, kFrameWidth, kFrameHeight));
+ video_stream_encoder_->WaitUntilTaskQueueIsIdle();
+
+ EXPECT_EQ(
+ video_stream_encoder_->overuse_detector_proxy_->GetLastTargetFramerate(),
+ kDefaultFramerate);
+
+ // Trigger reconfigure encoder (without resetting the entire instance).
+ VideoEncoderConfig video_encoder_config;
+ test::FillEncoderConfiguration(kVideoCodecVP8, 1, &video_encoder_config);
+ video_encoder_config.simulcast_layers[0].max_framerate = kFramerate;
+ video_encoder_config.max_bitrate_bps = kTargetBitrate.bps();
+ video_stream_encoder_->ConfigureEncoder(std::move(video_encoder_config),
+ kMaxPayloadLength);
+ video_stream_encoder_->WaitUntilTaskQueueIsIdle();
+
+ // Detector should be updated with fps limit from codec config.
+ EXPECT_EQ(
+ video_stream_encoder_->overuse_detector_proxy_->GetLastTargetFramerate(),
+ kFramerate);
+
+ // Trigger overuse, max framerate should be reduced.
+ VideoSendStream::Stats stats = stats_proxy_->GetStats();
+ stats.input_frame_rate = kFramerate;
+ stats_proxy_->SetMockStats(stats);
+ video_stream_encoder_->TriggerCpuOveruse();
+ video_stream_encoder_->WaitUntilTaskQueueIsIdle();
+ int adapted_framerate =
+ video_stream_encoder_->overuse_detector_proxy_->GetLastTargetFramerate();
+ EXPECT_LT(adapted_framerate, kFramerate);
+
+ // Trigger underuse, max framerate should go back to codec configured fps.
+ // Set extra low fps, to make sure it's actually reset, not just incremented.
+ stats = stats_proxy_->GetStats();
+ stats.input_frame_rate = adapted_framerate / 2;
+ stats_proxy_->SetMockStats(stats);
+ video_stream_encoder_->TriggerCpuUnderuse();
+ video_stream_encoder_->WaitUntilTaskQueueIsIdle();
+ EXPECT_EQ(
+ video_stream_encoder_->overuse_detector_proxy_->GetLastTargetFramerate(),
+ kFramerate);
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest,
+ OveruseDetectorUpdatedRespectsFramerateAfterUnderuse) {
+ const int kFrameWidth = 1280;
+ const int kFrameHeight = 720;
+ const int kLowFramerate = 15;
+ const int kHighFramerate = 25;
+
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kTargetBitrate, kTargetBitrate, kTargetBitrate, 0, 0, 0);
+ test::FrameForwarder source;
+ video_stream_encoder_->SetSource(
+ &source, webrtc::DegradationPreference::MAINTAIN_RESOLUTION);
+
+ // Trigger initial configuration.
+ VideoEncoderConfig video_encoder_config;
+ test::FillEncoderConfiguration(kVideoCodecVP8, 1, &video_encoder_config);
+ video_encoder_config.simulcast_layers[0].max_framerate = kLowFramerate;
+ video_encoder_config.max_bitrate_bps = kTargetBitrate.bps();
+ source.IncomingCapturedFrame(CreateFrame(1, kFrameWidth, kFrameHeight));
+ video_stream_encoder_->ConfigureEncoder(video_encoder_config.Copy(),
+ kMaxPayloadLength);
+ video_stream_encoder_->WaitUntilTaskQueueIsIdle();
+
+ EXPECT_EQ(
+ video_stream_encoder_->overuse_detector_proxy_->GetLastTargetFramerate(),
+ kLowFramerate);
+
+ // Trigger overuse, max framerate should be reduced.
+ VideoSendStream::Stats stats = stats_proxy_->GetStats();
+ stats.input_frame_rate = kLowFramerate;
+ stats_proxy_->SetMockStats(stats);
+ video_stream_encoder_->TriggerCpuOveruse();
+ video_stream_encoder_->WaitUntilTaskQueueIsIdle();
+ int adapted_framerate =
+ video_stream_encoder_->overuse_detector_proxy_->GetLastTargetFramerate();
+ EXPECT_LT(adapted_framerate, kLowFramerate);
+
+ // Reconfigure the encoder with a new (higher max framerate), max fps should
+ // still respect the adaptation.
+ video_encoder_config.simulcast_layers[0].max_framerate = kHighFramerate;
+ source.IncomingCapturedFrame(CreateFrame(1, kFrameWidth, kFrameHeight));
+ video_stream_encoder_->ConfigureEncoder(std::move(video_encoder_config),
+ kMaxPayloadLength);
+ video_stream_encoder_->WaitUntilTaskQueueIsIdle();
+
+ EXPECT_EQ(
+ video_stream_encoder_->overuse_detector_proxy_->GetLastTargetFramerate(),
+ adapted_framerate);
+
+ // Trigger underuse, max framerate should go back to codec configured fps.
+ stats = stats_proxy_->GetStats();
+ stats.input_frame_rate = adapted_framerate;
+ stats_proxy_->SetMockStats(stats);
+ video_stream_encoder_->TriggerCpuUnderuse();
+ video_stream_encoder_->WaitUntilTaskQueueIsIdle();
+ EXPECT_EQ(
+ video_stream_encoder_->overuse_detector_proxy_->GetLastTargetFramerate(),
+ kHighFramerate);
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest,
+ OveruseDetectorUpdatedOnDegradationPreferenceChange) {
+ const int kFrameWidth = 1280;
+ const int kFrameHeight = 720;
+ const int kFramerate = 24;
+
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kTargetBitrate, kTargetBitrate, kTargetBitrate, 0, 0, 0);
+ test::FrameForwarder source;
+ video_stream_encoder_->SetSource(
+ &source, webrtc::DegradationPreference::MAINTAIN_RESOLUTION);
+
+ // Trigger initial configuration.
+ VideoEncoderConfig video_encoder_config;
+ test::FillEncoderConfiguration(kVideoCodecVP8, 1, &video_encoder_config);
+ video_encoder_config.simulcast_layers[0].max_framerate = kFramerate;
+ video_encoder_config.max_bitrate_bps = kTargetBitrate.bps();
+ source.IncomingCapturedFrame(CreateFrame(1, kFrameWidth, kFrameHeight));
+ video_stream_encoder_->ConfigureEncoder(std::move(video_encoder_config),
+ kMaxPayloadLength);
+ video_stream_encoder_->WaitUntilTaskQueueIsIdle();
+
+ EXPECT_EQ(
+ video_stream_encoder_->overuse_detector_proxy_->GetLastTargetFramerate(),
+ kFramerate);
+
+ // Trigger overuse, max framerate should be reduced.
+ VideoSendStream::Stats stats = stats_proxy_->GetStats();
+ stats.input_frame_rate = kFramerate;
+ stats_proxy_->SetMockStats(stats);
+ video_stream_encoder_->TriggerCpuOveruse();
+ video_stream_encoder_->WaitUntilTaskQueueIsIdle();
+ int adapted_framerate =
+ video_stream_encoder_->overuse_detector_proxy_->GetLastTargetFramerate();
+ EXPECT_LT(adapted_framerate, kFramerate);
+
+ // Change degradation preference to not enable framerate scaling. Target
+ // framerate should be changed to codec defined limit.
+ video_stream_encoder_->SetSourceAndWaitForFramerateUpdated(
+ &source, webrtc::DegradationPreference::MAINTAIN_FRAMERATE);
+ EXPECT_EQ(
+ video_stream_encoder_->overuse_detector_proxy_->GetLastTargetFramerate(),
+ kFramerate);
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest, DropsFramesAndScalesWhenBitrateIsTooLow) {
+ const int kTooLowBitrateForFrameSizeBps = 10000;
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ DataRate::BitsPerSec(kTooLowBitrateForFrameSizeBps),
+ DataRate::BitsPerSec(kTooLowBitrateForFrameSizeBps),
+ DataRate::BitsPerSec(kTooLowBitrateForFrameSizeBps), 0, 0, 0);
+ const int kWidth = 640;
+ const int kHeight = 360;
+
+ video_source_.IncomingCapturedFrame(CreateFrame(1, kWidth, kHeight));
+
+ // Expect to drop this frame, the wait should time out.
+ ExpectDroppedFrame();
+
+ // Expect the sink_wants to specify a scaled frame.
+ EXPECT_TRUE_WAIT(
+ video_source_.sink_wants().max_pixel_count < kWidth * kHeight, 5000);
+
+ int last_pixel_count = video_source_.sink_wants().max_pixel_count;
+
+ // Next frame is scaled.
+ video_source_.IncomingCapturedFrame(
+ CreateFrame(2, kWidth * 3 / 4, kHeight * 3 / 4));
+
+ // Expect to drop this frame, the wait should time out.
+ ExpectDroppedFrame();
+
+ EXPECT_TRUE_WAIT(
+ video_source_.sink_wants().max_pixel_count < last_pixel_count, 5000);
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest,
+ NumberOfDroppedFramesLimitedWhenBitrateIsTooLow) {
+ const int kTooLowBitrateForFrameSizeBps = 10000;
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ DataRate::BitsPerSec(kTooLowBitrateForFrameSizeBps),
+ DataRate::BitsPerSec(kTooLowBitrateForFrameSizeBps),
+ DataRate::BitsPerSec(kTooLowBitrateForFrameSizeBps), 0, 0, 0);
+ const int kWidth = 640;
+ const int kHeight = 360;
+
+ // We expect the n initial frames to get dropped.
+ int i;
+ for (i = 1; i <= kMaxInitialFramedrop; ++i) {
+ video_source_.IncomingCapturedFrame(CreateFrame(i, kWidth, kHeight));
+ ExpectDroppedFrame();
+ }
+ // The n+1th frame should not be dropped, even though it's size is too large.
+ video_source_.IncomingCapturedFrame(CreateFrame(i, kWidth, kHeight));
+ WaitForEncodedFrame(i);
+
+ // Expect the sink_wants to specify a scaled frame.
+ EXPECT_LT(video_source_.sink_wants().max_pixel_count, kWidth * kHeight);
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest,
+ InitialFrameDropOffWithMaintainResolutionPreference) {
+ const int kWidth = 640;
+ const int kHeight = 360;
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kLowTargetBitrate, kLowTargetBitrate, kLowTargetBitrate, 0, 0, 0);
+
+ // Set degradation preference.
+ video_stream_encoder_->SetSource(
+ &video_source_, webrtc::DegradationPreference::MAINTAIN_RESOLUTION);
+
+ video_source_.IncomingCapturedFrame(CreateFrame(1, kWidth, kHeight));
+ // Frame should not be dropped, even if it's too large.
+ WaitForEncodedFrame(1);
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest, InitialFrameDropOffWhenEncoderDisabledScaling) {
+ const int kWidth = 640;
+ const int kHeight = 360;
+ fake_encoder_.SetQualityScaling(false);
+
+ VideoEncoderConfig video_encoder_config;
+ test::FillEncoderConfiguration(kVideoCodecVP8, 1, &video_encoder_config);
+ // Make format different, to force recreation of encoder.
+ video_encoder_config.video_format.parameters["foo"] = "foo";
+ video_stream_encoder_->ConfigureEncoder(std::move(video_encoder_config),
+ kMaxPayloadLength);
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kLowTargetBitrate, kLowTargetBitrate, kLowTargetBitrate, 0, 0, 0);
+
+ // Force quality scaler reconfiguration by resetting the source.
+ video_stream_encoder_->SetSource(&video_source_,
+ webrtc::DegradationPreference::BALANCED);
+
+ video_source_.IncomingCapturedFrame(CreateFrame(1, kWidth, kHeight));
+ // Frame should not be dropped, even if it's too large.
+ WaitForEncodedFrame(1);
+
+ video_stream_encoder_->Stop();
+ fake_encoder_.SetQualityScaling(true);
+}
+
+TEST_F(VideoStreamEncoderTest, InitialFrameDropActivatesWhenBweDrops) {
+ webrtc::test::ScopedKeyValueConfig field_trials(
+ field_trials_,
+ "WebRTC-Video-QualityScalerSettings/"
+ "initial_bitrate_interval_ms:1000,initial_bitrate_factor:0.2/");
+ // Reset encoder for field trials to take effect.
+ ConfigureEncoder(video_encoder_config_.Copy());
+ const int kNotTooLowBitrateForFrameSizeBps = kTargetBitrate.bps() * 0.2;
+ const int kTooLowBitrateForFrameSizeBps = kTargetBitrate.bps() * 0.19;
+ const int kWidth = 640;
+ const int kHeight = 360;
+
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kTargetBitrate, kTargetBitrate, kTargetBitrate, 0, 0, 0);
+ video_source_.IncomingCapturedFrame(CreateFrame(1, kWidth, kHeight));
+ // Frame should not be dropped.
+ WaitForEncodedFrame(1);
+
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ DataRate::BitsPerSec(kNotTooLowBitrateForFrameSizeBps),
+ DataRate::BitsPerSec(kNotTooLowBitrateForFrameSizeBps),
+ DataRate::BitsPerSec(kNotTooLowBitrateForFrameSizeBps), 0, 0, 0);
+ video_source_.IncomingCapturedFrame(CreateFrame(2, kWidth, kHeight));
+ // Frame should not be dropped.
+ WaitForEncodedFrame(2);
+
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ DataRate::BitsPerSec(kTooLowBitrateForFrameSizeBps),
+ DataRate::BitsPerSec(kTooLowBitrateForFrameSizeBps),
+ DataRate::BitsPerSec(kTooLowBitrateForFrameSizeBps), 0, 0, 0);
+ video_source_.IncomingCapturedFrame(CreateFrame(3, kWidth, kHeight));
+ // Expect to drop this frame, the wait should time out.
+ ExpectDroppedFrame();
+
+ // Expect the sink_wants to specify a scaled frame.
+ EXPECT_TRUE_WAIT(
+ video_source_.sink_wants().max_pixel_count < kWidth * kHeight, 5000);
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest,
+ InitialFrameDropNotReactivatedWhenBweDropsWhenScalingDisabled) {
+ webrtc::test::ScopedKeyValueConfig field_trials(
+ field_trials_,
+ "WebRTC-Video-QualityScalerSettings/"
+ "initial_bitrate_interval_ms:1000,initial_bitrate_factor:0.2/");
+ fake_encoder_.SetQualityScaling(false);
+ ConfigureEncoder(video_encoder_config_.Copy());
+ const int kNotTooLowBitrateForFrameSizeBps = kTargetBitrate.bps() * 0.2;
+ const int kTooLowBitrateForFrameSizeBps = kTargetBitrate.bps() * 0.19;
+ const int kWidth = 640;
+ const int kHeight = 360;
+
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kTargetBitrate, kTargetBitrate, kTargetBitrate, 0, 0, 0);
+ video_source_.IncomingCapturedFrame(CreateFrame(1, kWidth, kHeight));
+ // Frame should not be dropped.
+ WaitForEncodedFrame(1);
+
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ DataRate::BitsPerSec(kNotTooLowBitrateForFrameSizeBps),
+ DataRate::BitsPerSec(kNotTooLowBitrateForFrameSizeBps),
+ DataRate::BitsPerSec(kNotTooLowBitrateForFrameSizeBps), 0, 0, 0);
+ video_source_.IncomingCapturedFrame(CreateFrame(2, kWidth, kHeight));
+ // Frame should not be dropped.
+ WaitForEncodedFrame(2);
+
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ DataRate::BitsPerSec(kTooLowBitrateForFrameSizeBps),
+ DataRate::BitsPerSec(kTooLowBitrateForFrameSizeBps),
+ DataRate::BitsPerSec(kTooLowBitrateForFrameSizeBps), 0, 0, 0);
+ video_source_.IncomingCapturedFrame(CreateFrame(3, kWidth, kHeight));
+ // Not dropped since quality scaling is disabled.
+ WaitForEncodedFrame(3);
+
+ // Expect the sink_wants to specify a scaled frame.
+ video_stream_encoder_->WaitUntilTaskQueueIsIdle();
+ EXPECT_THAT(video_source_.sink_wants(), ResolutionMax());
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest, InitialFrameDropActivatesWhenLayersChange) {
+ const DataRate kLowTargetBitrate = DataRate::KilobitsPerSec(400);
+ // Set simulcast.
+ ResetEncoder("VP8", 3, 1, 1, false);
+ fake_encoder_.SetQualityScaling(true);
+ const int kWidth = 1280;
+ const int kHeight = 720;
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kLowTargetBitrate, kLowTargetBitrate, kLowTargetBitrate, 0, 0, 0);
+ video_source_.IncomingCapturedFrame(CreateFrame(1, kWidth, kHeight));
+ // Frame should not be dropped.
+ WaitForEncodedFrame(1);
+
+ // Trigger QVGA "singlecast"
+ // Update the config.
+ VideoEncoderConfig video_encoder_config;
+ webrtc::VideoEncoder::EncoderInfo encoder_info;
+ test::FillEncoderConfiguration(PayloadStringToCodecType("VP8"), 3,
+ &video_encoder_config);
+ video_encoder_config.video_stream_factory =
+ rtc::make_ref_counted<cricket::EncoderStreamFactory>(
+ "VP8", /*max qp*/ 56, /*screencast*/ false,
+ /*screenshare enabled*/ false, encoder_info);
+ for (auto& layer : video_encoder_config.simulcast_layers) {
+ layer.num_temporal_layers = 1;
+ layer.max_framerate = kDefaultFramerate;
+ }
+ video_encoder_config.max_bitrate_bps = kSimulcastTargetBitrate.bps();
+ video_encoder_config.content_type =
+ VideoEncoderConfig::ContentType::kRealtimeVideo;
+
+ video_encoder_config.simulcast_layers[0].active = true;
+ video_encoder_config.simulcast_layers[1].active = false;
+ video_encoder_config.simulcast_layers[2].active = false;
+
+ video_stream_encoder_->ConfigureEncoder(video_encoder_config.Copy(),
+ kMaxPayloadLength);
+ video_stream_encoder_->WaitUntilTaskQueueIsIdle();
+
+ video_source_.IncomingCapturedFrame(CreateFrame(2, kWidth, kHeight));
+ // Frame should not be dropped.
+ WaitForEncodedFrame(2);
+
+ // Trigger HD "singlecast"
+ video_encoder_config.simulcast_layers[0].active = false;
+ video_encoder_config.simulcast_layers[1].active = false;
+ video_encoder_config.simulcast_layers[2].active = true;
+
+ video_stream_encoder_->ConfigureEncoder(video_encoder_config.Copy(),
+ kMaxPayloadLength);
+ video_stream_encoder_->WaitUntilTaskQueueIsIdle();
+
+ video_source_.IncomingCapturedFrame(CreateFrame(3, kWidth, kHeight));
+ // Frame should be dropped because of initial frame drop.
+ ExpectDroppedFrame();
+
+ // Expect the sink_wants to specify a scaled frame.
+ EXPECT_TRUE_WAIT(
+ video_source_.sink_wants().max_pixel_count < kWidth * kHeight, 5000);
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest, InitialFrameDropActivatesWhenSVCLayersChange) {
+ const DataRate kLowTargetBitrate = DataRate::KilobitsPerSec(400);
+ // Set simulcast.
+ ResetEncoder("VP9", 1, 1, 3, false);
+ fake_encoder_.SetQualityScaling(true);
+ const int kWidth = 1280;
+ const int kHeight = 720;
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kLowTargetBitrate, kLowTargetBitrate, kLowTargetBitrate, 0, 0, 0);
+ video_source_.IncomingCapturedFrame(CreateFrame(1, kWidth, kHeight));
+ // Frame should not be dropped.
+ WaitForEncodedFrame(1);
+
+ // Trigger QVGA "singlecast"
+ // Update the config.
+ VideoEncoderConfig video_encoder_config;
+ test::FillEncoderConfiguration(PayloadStringToCodecType("VP9"), 1,
+ &video_encoder_config);
+ VideoCodecVP9 vp9_settings = VideoEncoder::GetDefaultVp9Settings();
+ vp9_settings.numberOfSpatialLayers = 3;
+ // Since only one layer is active - automatic resize should be enabled.
+ vp9_settings.automaticResizeOn = true;
+ video_encoder_config.encoder_specific_settings =
+ rtc::make_ref_counted<VideoEncoderConfig::Vp9EncoderSpecificSettings>(
+ vp9_settings);
+ video_encoder_config.max_bitrate_bps = kSimulcastTargetBitrate.bps();
+ video_encoder_config.content_type =
+ VideoEncoderConfig::ContentType::kRealtimeVideo;
+ // Currently simulcast layers `active` flags are used to inidicate
+ // which SVC layers are active.
+ video_encoder_config.simulcast_layers.resize(3);
+
+ video_encoder_config.simulcast_layers[0].active = true;
+ video_encoder_config.simulcast_layers[1].active = false;
+ video_encoder_config.simulcast_layers[2].active = false;
+
+ video_stream_encoder_->ConfigureEncoder(video_encoder_config.Copy(),
+ kMaxPayloadLength);
+ video_stream_encoder_->WaitUntilTaskQueueIsIdle();
+
+ video_source_.IncomingCapturedFrame(CreateFrame(2, kWidth, kHeight));
+ // Frame should not be dropped.
+ WaitForEncodedFrame(2);
+
+ // Trigger HD "singlecast"
+ video_encoder_config.simulcast_layers[0].active = false;
+ video_encoder_config.simulcast_layers[1].active = false;
+ video_encoder_config.simulcast_layers[2].active = true;
+
+ video_stream_encoder_->ConfigureEncoder(video_encoder_config.Copy(),
+ kMaxPayloadLength);
+ video_stream_encoder_->WaitUntilTaskQueueIsIdle();
+
+ video_source_.IncomingCapturedFrame(CreateFrame(3, kWidth, kHeight));
+ // Frame should be dropped because of initial frame drop.
+ ExpectDroppedFrame();
+
+ // Expect the sink_wants to specify a scaled frame.
+ EXPECT_TRUE_WAIT(
+ video_source_.sink_wants().max_pixel_count < kWidth * kHeight, 5000);
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest,
+ EncoderMaxAndMinBitratesUsedIfMiddleStreamActive) {
+ const VideoEncoder::ResolutionBitrateLimits kEncoderLimits270p(
+ 480 * 270, 34 * 1000, 12 * 1000, 1234 * 1000);
+ const VideoEncoder::ResolutionBitrateLimits kEncoderLimits360p(
+ 640 * 360, 43 * 1000, 21 * 1000, 2345 * 1000);
+ const VideoEncoder::ResolutionBitrateLimits kEncoderLimits720p(
+ 1280 * 720, 54 * 1000, 31 * 1000, 2500 * 1000);
+ fake_encoder_.SetResolutionBitrateLimits(
+ {kEncoderLimits270p, kEncoderLimits360p, kEncoderLimits720p});
+
+ VideoEncoderConfig video_encoder_config;
+ test::FillEncoderConfiguration(PayloadStringToCodecType("VP9"), 1,
+ &video_encoder_config);
+ VideoCodecVP9 vp9_settings = VideoEncoder::GetDefaultVp9Settings();
+ vp9_settings.numberOfSpatialLayers = 3;
+ // Since only one layer is active - automatic resize should be enabled.
+ vp9_settings.automaticResizeOn = true;
+ video_encoder_config.encoder_specific_settings =
+ rtc::make_ref_counted<VideoEncoderConfig::Vp9EncoderSpecificSettings>(
+ vp9_settings);
+ video_encoder_config.max_bitrate_bps = kSimulcastTargetBitrate.bps();
+ video_encoder_config.content_type =
+ VideoEncoderConfig::ContentType::kRealtimeVideo;
+ // Simulcast layers are used to indicate which spatial layers are active.
+ video_encoder_config.simulcast_layers.resize(3);
+ video_encoder_config.simulcast_layers[0].active = false;
+ video_encoder_config.simulcast_layers[1].active = true;
+ video_encoder_config.simulcast_layers[2].active = false;
+
+ video_stream_encoder_->ConfigureEncoder(video_encoder_config.Copy(),
+ kMaxPayloadLength);
+
+ // The encoder bitrate limits for 360p should be used.
+ video_source_.IncomingCapturedFrame(CreateFrame(1, 1280, 720));
+ video_stream_encoder_->WaitUntilTaskQueueIsIdle();
+ EXPECT_EQ(fake_encoder_.config().numberOfSimulcastStreams, 1);
+ EXPECT_EQ(fake_encoder_.config().codecType, VideoCodecType::kVideoCodecVP9);
+ EXPECT_EQ(fake_encoder_.config().VP9().numberOfSpatialLayers, 2);
+ EXPECT_TRUE(fake_encoder_.config().spatialLayers[0].active);
+ EXPECT_EQ(640, fake_encoder_.config().spatialLayers[0].width);
+ EXPECT_EQ(360, fake_encoder_.config().spatialLayers[0].height);
+ EXPECT_EQ(static_cast<uint32_t>(kEncoderLimits360p.min_bitrate_bps),
+ fake_encoder_.config().spatialLayers[0].minBitrate * 1000);
+ EXPECT_EQ(static_cast<uint32_t>(kEncoderLimits360p.max_bitrate_bps),
+ fake_encoder_.config().spatialLayers[0].maxBitrate * 1000);
+
+ // The encoder bitrate limits for 270p should be used.
+ video_source_.IncomingCapturedFrame(CreateFrame(2, 960, 540));
+ video_stream_encoder_->WaitUntilTaskQueueIsIdle();
+ EXPECT_EQ(fake_encoder_.config().numberOfSimulcastStreams, 1);
+ EXPECT_EQ(fake_encoder_.config().codecType, VideoCodecType::kVideoCodecVP9);
+ EXPECT_EQ(fake_encoder_.config().VP9().numberOfSpatialLayers, 2);
+ EXPECT_TRUE(fake_encoder_.config().spatialLayers[0].active);
+ EXPECT_EQ(480, fake_encoder_.config().spatialLayers[0].width);
+ EXPECT_EQ(270, fake_encoder_.config().spatialLayers[0].height);
+ EXPECT_EQ(static_cast<uint32_t>(kEncoderLimits270p.min_bitrate_bps),
+ fake_encoder_.config().spatialLayers[0].minBitrate * 1000);
+ EXPECT_EQ(static_cast<uint32_t>(kEncoderLimits270p.max_bitrate_bps),
+ fake_encoder_.config().spatialLayers[0].maxBitrate * 1000);
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest,
+ DefaultMaxAndMinBitratesUsedIfMiddleStreamActive) {
+ VideoEncoderConfig video_encoder_config;
+ test::FillEncoderConfiguration(PayloadStringToCodecType("VP9"), 1,
+ &video_encoder_config);
+ VideoCodecVP9 vp9_settings = VideoEncoder::GetDefaultVp9Settings();
+ vp9_settings.numberOfSpatialLayers = 3;
+ // Since only one layer is active - automatic resize should be enabled.
+ vp9_settings.automaticResizeOn = true;
+ video_encoder_config.encoder_specific_settings =
+ rtc::make_ref_counted<VideoEncoderConfig::Vp9EncoderSpecificSettings>(
+ vp9_settings);
+ video_encoder_config.max_bitrate_bps = kSimulcastTargetBitrate.bps();
+ video_encoder_config.content_type =
+ VideoEncoderConfig::ContentType::kRealtimeVideo;
+ // Simulcast layers are used to indicate which spatial layers are active.
+ video_encoder_config.simulcast_layers.resize(3);
+ video_encoder_config.simulcast_layers[0].active = false;
+ video_encoder_config.simulcast_layers[1].active = true;
+ video_encoder_config.simulcast_layers[2].active = false;
+
+ video_stream_encoder_->ConfigureEncoder(video_encoder_config.Copy(),
+ kMaxPayloadLength);
+
+ // The default bitrate limits for 360p should be used.
+ const absl::optional<VideoEncoder::ResolutionBitrateLimits> kLimits360p =
+ EncoderInfoSettings::GetDefaultSinglecastBitrateLimitsForResolution(
+ kVideoCodecVP9, 640 * 360);
+ video_source_.IncomingCapturedFrame(CreateFrame(1, 1280, 720));
+ video_stream_encoder_->WaitUntilTaskQueueIsIdle();
+ EXPECT_EQ(fake_encoder_.config().numberOfSimulcastStreams, 1);
+ EXPECT_EQ(fake_encoder_.config().codecType, VideoCodecType::kVideoCodecVP9);
+ EXPECT_EQ(fake_encoder_.config().VP9().numberOfSpatialLayers, 2);
+ EXPECT_TRUE(fake_encoder_.config().spatialLayers[0].active);
+ EXPECT_EQ(640, fake_encoder_.config().spatialLayers[0].width);
+ EXPECT_EQ(360, fake_encoder_.config().spatialLayers[0].height);
+ EXPECT_EQ(static_cast<uint32_t>(kLimits360p->min_bitrate_bps),
+ fake_encoder_.config().spatialLayers[0].minBitrate * 1000);
+ EXPECT_EQ(static_cast<uint32_t>(kLimits360p->max_bitrate_bps),
+ fake_encoder_.config().spatialLayers[0].maxBitrate * 1000);
+
+ // The default bitrate limits for 270p should be used.
+ const absl::optional<VideoEncoder::ResolutionBitrateLimits> kLimits270p =
+ EncoderInfoSettings::GetDefaultSinglecastBitrateLimitsForResolution(
+ kVideoCodecVP9, 480 * 270);
+ video_source_.IncomingCapturedFrame(CreateFrame(2, 960, 540));
+ video_stream_encoder_->WaitUntilTaskQueueIsIdle();
+ EXPECT_EQ(fake_encoder_.config().numberOfSimulcastStreams, 1);
+ EXPECT_EQ(fake_encoder_.config().codecType, VideoCodecType::kVideoCodecVP9);
+ EXPECT_EQ(fake_encoder_.config().VP9().numberOfSpatialLayers, 2);
+ EXPECT_TRUE(fake_encoder_.config().spatialLayers[0].active);
+ EXPECT_EQ(480, fake_encoder_.config().spatialLayers[0].width);
+ EXPECT_EQ(270, fake_encoder_.config().spatialLayers[0].height);
+ EXPECT_EQ(static_cast<uint32_t>(kLimits270p->min_bitrate_bps),
+ fake_encoder_.config().spatialLayers[0].minBitrate * 1000);
+ EXPECT_EQ(static_cast<uint32_t>(kLimits270p->max_bitrate_bps),
+ fake_encoder_.config().spatialLayers[0].maxBitrate * 1000);
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest, DefaultMaxAndMinBitratesNotUsedIfDisabled) {
+ webrtc::test::ScopedKeyValueConfig field_trials(
+ field_trials_, "WebRTC-DefaultBitrateLimitsKillSwitch/Enabled/");
+ VideoEncoderConfig video_encoder_config;
+ test::FillEncoderConfiguration(PayloadStringToCodecType("VP9"), 1,
+ &video_encoder_config);
+ VideoCodecVP9 vp9_settings = VideoEncoder::GetDefaultVp9Settings();
+ vp9_settings.numberOfSpatialLayers = 3;
+ // Since only one layer is active - automatic resize should be enabled.
+ vp9_settings.automaticResizeOn = true;
+ video_encoder_config.encoder_specific_settings =
+ rtc::make_ref_counted<VideoEncoderConfig::Vp9EncoderSpecificSettings>(
+ vp9_settings);
+ video_encoder_config.max_bitrate_bps = kSimulcastTargetBitrate.bps();
+ video_encoder_config.content_type =
+ VideoEncoderConfig::ContentType::kRealtimeVideo;
+ // Simulcast layers are used to indicate which spatial layers are active.
+ video_encoder_config.simulcast_layers.resize(3);
+ video_encoder_config.simulcast_layers[0].active = false;
+ video_encoder_config.simulcast_layers[1].active = true;
+ video_encoder_config.simulcast_layers[2].active = false;
+
+ // Reset encoder for field trials to take effect.
+ ConfigureEncoder(video_encoder_config.Copy());
+
+ video_stream_encoder_->ConfigureEncoder(video_encoder_config.Copy(),
+ kMaxPayloadLength);
+
+ // The default bitrate limits for 360p should not be used.
+ const absl::optional<VideoEncoder::ResolutionBitrateLimits> kLimits360p =
+ EncoderInfoSettings::GetDefaultSinglecastBitrateLimitsForResolution(
+ kVideoCodecVP9, 640 * 360);
+ video_source_.IncomingCapturedFrame(CreateFrame(1, 1280, 720));
+ video_stream_encoder_->WaitUntilTaskQueueIsIdle();
+ EXPECT_EQ(fake_encoder_.config().numberOfSimulcastStreams, 1);
+ EXPECT_EQ(fake_encoder_.config().codecType, kVideoCodecVP9);
+ EXPECT_EQ(fake_encoder_.config().VP9().numberOfSpatialLayers, 2);
+ EXPECT_TRUE(fake_encoder_.config().spatialLayers[0].active);
+ EXPECT_EQ(640, fake_encoder_.config().spatialLayers[0].width);
+ EXPECT_EQ(360, fake_encoder_.config().spatialLayers[0].height);
+ EXPECT_NE(static_cast<uint32_t>(kLimits360p->max_bitrate_bps),
+ fake_encoder_.config().spatialLayers[0].maxBitrate * 1000);
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest, SinglecastBitrateLimitsNotUsedForOneStream) {
+ ResetEncoder("VP9", /*num_streams=*/1, /*num_temporal_layers=*/1,
+ /*num_spatial_layers=*/1, /*screenshare=*/false);
+
+ // The default singlecast bitrate limits for 720p should not be used.
+ const absl::optional<VideoEncoder::ResolutionBitrateLimits> kLimits720p =
+ EncoderInfoSettings::GetDefaultSinglecastBitrateLimitsForResolution(
+ kVideoCodecVP9, 1280 * 720);
+ video_source_.IncomingCapturedFrame(CreateFrame(1, 1280, 720));
+ video_stream_encoder_->WaitUntilTaskQueueIsIdle();
+ EXPECT_EQ(fake_encoder_.config().numberOfSimulcastStreams, 1);
+ EXPECT_EQ(fake_encoder_.config().codecType, VideoCodecType::kVideoCodecVP9);
+ EXPECT_EQ(fake_encoder_.config().VP9().numberOfSpatialLayers, 1);
+ EXPECT_TRUE(fake_encoder_.config().spatialLayers[0].active);
+ EXPECT_EQ(1280, fake_encoder_.config().spatialLayers[0].width);
+ EXPECT_EQ(720, fake_encoder_.config().spatialLayers[0].height);
+ EXPECT_NE(static_cast<uint32_t>(kLimits720p->max_bitrate_bps),
+ fake_encoder_.config().spatialLayers[0].maxBitrate * 1000);
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest,
+ EncoderMaxAndMinBitratesNotUsedIfLowestStreamActive) {
+ const VideoEncoder::ResolutionBitrateLimits kEncoderLimits180p(
+ 320 * 180, 34 * 1000, 12 * 1000, 1234 * 1000);
+ const VideoEncoder::ResolutionBitrateLimits kEncoderLimits720p(
+ 1280 * 720, 54 * 1000, 31 * 1000, 2500 * 1000);
+ fake_encoder_.SetResolutionBitrateLimits(
+ {kEncoderLimits180p, kEncoderLimits720p});
+
+ VideoEncoderConfig video_encoder_config;
+ test::FillEncoderConfiguration(PayloadStringToCodecType("VP9"), 1,
+ &video_encoder_config);
+ VideoCodecVP9 vp9_settings = VideoEncoder::GetDefaultVp9Settings();
+ vp9_settings.numberOfSpatialLayers = 3;
+ // Since only one layer is active - automatic resize should be enabled.
+ vp9_settings.automaticResizeOn = true;
+ video_encoder_config.encoder_specific_settings =
+ rtc::make_ref_counted<VideoEncoderConfig::Vp9EncoderSpecificSettings>(
+ vp9_settings);
+ video_encoder_config.max_bitrate_bps = kSimulcastTargetBitrate.bps();
+ video_encoder_config.content_type =
+ VideoEncoderConfig::ContentType::kRealtimeVideo;
+ // Simulcast layers are used to indicate which spatial layers are active.
+ video_encoder_config.simulcast_layers.resize(3);
+ video_encoder_config.simulcast_layers[0].active = true;
+ video_encoder_config.simulcast_layers[1].active = false;
+ video_encoder_config.simulcast_layers[2].active = false;
+
+ video_stream_encoder_->ConfigureEncoder(video_encoder_config.Copy(),
+ kMaxPayloadLength);
+
+ // Limits not applied on lowest stream, limits for 180p should not be used.
+ video_source_.IncomingCapturedFrame(CreateFrame(1, 1280, 720));
+ video_stream_encoder_->WaitUntilTaskQueueIsIdle();
+ EXPECT_EQ(fake_encoder_.config().numberOfSimulcastStreams, 1);
+ EXPECT_EQ(fake_encoder_.config().codecType, VideoCodecType::kVideoCodecVP9);
+ EXPECT_EQ(fake_encoder_.config().VP9().numberOfSpatialLayers, 3);
+ EXPECT_TRUE(fake_encoder_.config().spatialLayers[0].active);
+ EXPECT_EQ(320, fake_encoder_.config().spatialLayers[0].width);
+ EXPECT_EQ(180, fake_encoder_.config().spatialLayers[0].height);
+ EXPECT_NE(static_cast<uint32_t>(kEncoderLimits180p.min_bitrate_bps),
+ fake_encoder_.config().spatialLayers[0].minBitrate * 1000);
+ EXPECT_NE(static_cast<uint32_t>(kEncoderLimits180p.max_bitrate_bps),
+ fake_encoder_.config().spatialLayers[0].maxBitrate * 1000);
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest,
+ InitialFrameDropActivatesWhenResolutionIncreases) {
+ const int kWidth = 640;
+ const int kHeight = 360;
+
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kTargetBitrate, kTargetBitrate, kTargetBitrate, 0, 0, 0);
+ video_source_.IncomingCapturedFrame(CreateFrame(1, kWidth / 2, kHeight / 2));
+ // Frame should not be dropped.
+ WaitForEncodedFrame(1);
+
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kLowTargetBitrate, kLowTargetBitrate, kLowTargetBitrate, 0, 0, 0);
+ video_source_.IncomingCapturedFrame(CreateFrame(2, kWidth / 2, kHeight / 2));
+ // Frame should not be dropped, bitrate not too low for frame.
+ WaitForEncodedFrame(2);
+
+ // Incoming resolution increases.
+ video_source_.IncomingCapturedFrame(CreateFrame(3, kWidth, kHeight));
+ // Expect to drop this frame, bitrate too low for frame.
+ ExpectDroppedFrame();
+
+ // Expect the sink_wants to specify a scaled frame.
+ EXPECT_TRUE_WAIT(
+ video_source_.sink_wants().max_pixel_count < kWidth * kHeight, 5000);
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest, InitialFrameDropIsNotReactivatedWhenAdaptingUp) {
+ const int kWidth = 640;
+ const int kHeight = 360;
+ // So that quality scaling doesn't happen by itself.
+ fake_encoder_.SetQp(kQpHigh);
+
+ AdaptingFrameForwarder source(&time_controller_);
+ source.set_adaptation_enabled(true);
+ video_stream_encoder_->SetSource(
+ &source, webrtc::DegradationPreference::MAINTAIN_FRAMERATE);
+
+ int timestamp = 1;
+
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kTargetBitrate, kTargetBitrate, kTargetBitrate, 0, 0, 0);
+ source.IncomingCapturedFrame(CreateFrame(timestamp, kWidth, kHeight));
+ WaitForEncodedFrame(timestamp);
+ timestamp += 9000;
+ // Long pause to disable all first BWE drop logic.
+ AdvanceTime(TimeDelta::Millis(1000));
+
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kLowTargetBitrate, kLowTargetBitrate, kLowTargetBitrate, 0, 0, 0);
+ source.IncomingCapturedFrame(CreateFrame(timestamp, kWidth, kHeight));
+ // Not dropped frame, as initial frame drop is disabled by now.
+ WaitForEncodedFrame(timestamp);
+ timestamp += 9000;
+ AdvanceTime(TimeDelta::Millis(100));
+
+ // Quality adaptation down.
+ video_stream_encoder_->TriggerQualityLow();
+
+ // Adaptation has an effect.
+ EXPECT_TRUE_WAIT(source.sink_wants().max_pixel_count < kWidth * kHeight,
+ 5000);
+
+ // Frame isn't dropped as initial frame dropper is disabled.
+ source.IncomingCapturedFrame(CreateFrame(timestamp, kWidth, kHeight));
+ WaitForEncodedFrame(timestamp);
+ timestamp += 9000;
+ AdvanceTime(TimeDelta::Millis(100));
+
+ // Quality adaptation up.
+ video_stream_encoder_->TriggerQualityHigh();
+
+ // Adaptation has an effect.
+ EXPECT_TRUE_WAIT(source.sink_wants().max_pixel_count > kWidth * kHeight,
+ 5000);
+
+ source.IncomingCapturedFrame(CreateFrame(timestamp, kWidth, kHeight));
+ // Frame should not be dropped, as initial framedropper is off.
+ WaitForEncodedFrame(timestamp);
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest,
+ FrameDroppedWhenResolutionIncreasesAndLinkAllocationIsLow) {
+ const int kMinStartBps360p = 222000;
+ fake_encoder_.SetResolutionBitrateLimits(
+ {VideoEncoder::ResolutionBitrateLimits(320 * 180, 0, 30000, 400000),
+ VideoEncoder::ResolutionBitrateLimits(640 * 360, kMinStartBps360p, 30000,
+ 800000)});
+
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ DataRate::BitsPerSec(kMinStartBps360p - 1), // target_bitrate
+ DataRate::BitsPerSec(kMinStartBps360p - 1), // stable_target_bitrate
+ DataRate::BitsPerSec(kMinStartBps360p - 1), // link_allocation
+ 0, 0, 0);
+ // Frame should not be dropped, bitrate not too low for frame.
+ video_source_.IncomingCapturedFrame(CreateFrame(1, 320, 180));
+ WaitForEncodedFrame(1);
+
+ // Incoming resolution increases, initial frame drop activates.
+ // Frame should be dropped, link allocation too low for frame.
+ video_source_.IncomingCapturedFrame(CreateFrame(2, 640, 360));
+ ExpectDroppedFrame();
+
+ // Expect sink_wants to specify a scaled frame.
+ EXPECT_TRUE_WAIT(video_source_.sink_wants().max_pixel_count < 640 * 360,
+ 5000);
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest,
+ FrameNotDroppedWhenResolutionIncreasesAndLinkAllocationIsHigh) {
+ const int kMinStartBps360p = 222000;
+ fake_encoder_.SetResolutionBitrateLimits(
+ {VideoEncoder::ResolutionBitrateLimits(320 * 180, 0, 30000, 400000),
+ VideoEncoder::ResolutionBitrateLimits(640 * 360, kMinStartBps360p, 30000,
+ 800000)});
+
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ DataRate::BitsPerSec(kMinStartBps360p - 1), // target_bitrate
+ DataRate::BitsPerSec(kMinStartBps360p - 1), // stable_target_bitrate
+ DataRate::BitsPerSec(kMinStartBps360p), // link_allocation
+ 0, 0, 0);
+ // Frame should not be dropped, bitrate not too low for frame.
+ video_source_.IncomingCapturedFrame(CreateFrame(1, 320, 180));
+ WaitForEncodedFrame(1);
+
+ // Incoming resolution increases, initial frame drop activates.
+ // Frame should be dropped, link allocation not too low for frame.
+ video_source_.IncomingCapturedFrame(CreateFrame(2, 640, 360));
+ WaitForEncodedFrame(2);
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest, RampsUpInQualityWhenBwIsHigh) {
+ webrtc::test::ScopedKeyValueConfig field_trials(
+ field_trials_,
+ "WebRTC-Video-QualityRampupSettings/"
+ "min_pixels:921600,min_duration_ms:2000/");
+
+ const int kWidth = 1280;
+ const int kHeight = 720;
+ const int kFps = 10;
+ max_framerate_ = kFps;
+
+ // Reset encoder for field trials to take effect.
+ VideoEncoderConfig config = video_encoder_config_.Copy();
+ config.max_bitrate_bps = kTargetBitrate.bps();
+ DataRate max_bitrate = DataRate::BitsPerSec(config.max_bitrate_bps);
+ ConfigureEncoder(std::move(config));
+ fake_encoder_.SetQp(kQpLow);
+
+ // Enable MAINTAIN_FRAMERATE preference.
+ AdaptingFrameForwarder source(&time_controller_);
+ source.set_adaptation_enabled(true);
+ video_stream_encoder_->SetSource(&source,
+ DegradationPreference::MAINTAIN_FRAMERATE);
+
+ // Start at low bitrate.
+ const DataRate kLowBitrate = DataRate::KilobitsPerSec(200);
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kLowBitrate, kLowBitrate, kLowBitrate, 0, 0, 0);
+
+ // Expect first frame to be dropped and resolution to be limited.
+ const int64_t kFrameIntervalMs = 1000 / kFps;
+ int64_t timestamp_ms = kFrameIntervalMs;
+ source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
+ ExpectDroppedFrame();
+ EXPECT_TRUE_WAIT(source.sink_wants().max_pixel_count < kWidth * kHeight,
+ 5000);
+
+ // Increase bitrate to encoder max.
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ max_bitrate, max_bitrate, max_bitrate, 0, 0, 0);
+
+ // Insert frames and advance `min_duration_ms`.
+ const int64_t start_bw_high_ms = CurrentTimeMs();
+ for (size_t i = 1; i <= 10; i++) {
+ timestamp_ms += kFrameIntervalMs;
+ source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
+ WaitForEncodedFrame(timestamp_ms);
+ }
+
+ // Advance to `min_duration_ms` - 1, frame should not trigger high BW.
+ int64_t elapsed_bw_high_ms = CurrentTimeMs() - start_bw_high_ms;
+ AdvanceTime(TimeDelta::Millis(2000 - elapsed_bw_high_ms - 1));
+ timestamp_ms += kFrameIntervalMs;
+ source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
+ WaitForEncodedFrame(timestamp_ms);
+ EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_LT(source.sink_wants().max_pixel_count, kWidth * kHeight);
+
+ // Frame should trigger high BW and release quality limitation.
+ timestamp_ms += kFrameIntervalMs;
+ source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
+ WaitForEncodedFrame(timestamp_ms);
+ // The ramp-up code involves the adaptation queue, give it time to execute.
+ // TODO(hbos): Can we await an appropriate event instead?
+ video_stream_encoder_->WaitUntilTaskQueueIsIdle();
+ EXPECT_THAT(source.sink_wants(), FpsMaxResolutionMax());
+
+ // Frame should not be adapted.
+ timestamp_ms += kFrameIntervalMs;
+ source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
+ WaitForEncodedFrame(kWidth, kHeight);
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution);
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest,
+ QualityScalerAdaptationsRemovedWhenQualityScalingDisabled) {
+ webrtc::test::ScopedKeyValueConfig field_trials(
+ field_trials_, "WebRTC-Video-QualityScaling/Disabled/");
+ AdaptingFrameForwarder source(&time_controller_);
+ source.set_adaptation_enabled(true);
+ video_stream_encoder_->SetSource(&source,
+ DegradationPreference::MAINTAIN_FRAMERATE);
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kTargetBitrate, kTargetBitrate, kTargetBitrate, 0, 0, 0);
+ fake_encoder_.SetQp(kQpHigh + 1);
+ const int kWidth = 1280;
+ const int kHeight = 720;
+ const int64_t kFrameIntervalMs = 100;
+ int64_t timestamp_ms = kFrameIntervalMs;
+ for (size_t i = 1; i <= 100; i++) {
+ timestamp_ms += kFrameIntervalMs;
+ source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
+ WaitForEncodedFrame(timestamp_ms);
+ }
+ // Wait for QualityScaler, which will wait for 2000*2.5 ms until checking QP
+ // for the first time.
+ // TODO(eshr): We should avoid these waits by using threads with simulated
+ // time.
+ EXPECT_TRUE_WAIT(stats_proxy_->GetStats().bw_limited_resolution,
+ 2000 * 2.5 * 2);
+ timestamp_ms += kFrameIntervalMs;
+ source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
+ WaitForEncodedFrame(timestamp_ms);
+ video_stream_encoder_->WaitUntilTaskQueueIsIdle();
+ EXPECT_THAT(source.sink_wants(), WantsMaxPixels(Lt(kWidth * kHeight)));
+ EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution);
+
+ // Disable Quality scaling by turning off scaler on the encoder and
+ // reconfiguring.
+ fake_encoder_.SetQualityScaling(false);
+ video_stream_encoder_->ConfigureEncoder(video_encoder_config_.Copy(),
+ kMaxPayloadLength);
+ video_stream_encoder_->WaitUntilTaskQueueIsIdle();
+ AdvanceTime(TimeDelta::Zero());
+ // Since we turned off the quality scaler, the adaptations made by it are
+ // removed.
+ EXPECT_THAT(source.sink_wants(), ResolutionMax());
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution);
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest,
+ ResolutionNotAdaptedForTooSmallFrame_MaintainFramerateMode) {
+ const int kTooSmallWidth = 10;
+ const int kTooSmallHeight = 10;
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kTargetBitrate, kTargetBitrate, kTargetBitrate, 0, 0, 0);
+
+ // Enable MAINTAIN_FRAMERATE preference, no initial limitation.
+ test::FrameForwarder source;
+ video_stream_encoder_->SetSource(
+ &source, webrtc::DegradationPreference::MAINTAIN_FRAMERATE);
+ EXPECT_THAT(source.sink_wants(), UnlimitedSinkWants());
+ EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_resolution);
+
+ // Trigger adapt down, too small frame, expect no change.
+ source.IncomingCapturedFrame(CreateFrame(1, kTooSmallWidth, kTooSmallHeight));
+ WaitForEncodedFrame(1);
+ video_stream_encoder_->TriggerCpuOveruse();
+ EXPECT_THAT(source.sink_wants(), FpsMaxResolutionMax());
+ EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_resolution);
+ EXPECT_EQ(0, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest,
+ ResolutionNotAdaptedForTooSmallFrame_BalancedMode) {
+ const int kTooSmallWidth = 10;
+ const int kTooSmallHeight = 10;
+ const int kFpsLimit = 7;
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kTargetBitrate, kTargetBitrate, kTargetBitrate, 0, 0, 0);
+
+ // Enable BALANCED preference, no initial limitation.
+ test::FrameForwarder source;
+ video_stream_encoder_->SetSource(&source,
+ webrtc::DegradationPreference::BALANCED);
+ EXPECT_THAT(source.sink_wants(), UnlimitedSinkWants());
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_framerate);
+
+ // Trigger adapt down, expect limited framerate.
+ source.IncomingCapturedFrame(CreateFrame(1, kTooSmallWidth, kTooSmallHeight));
+ WaitForEncodedFrame(1);
+ video_stream_encoder_->TriggerQualityLow();
+ EXPECT_THAT(source.sink_wants(), FpsMatchesResolutionMax(Eq(kFpsLimit)));
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_framerate);
+ EXPECT_EQ(1, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Trigger adapt down, too small frame, expect no change.
+ source.IncomingCapturedFrame(CreateFrame(2, kTooSmallWidth, kTooSmallHeight));
+ WaitForEncodedFrame(2);
+ video_stream_encoder_->TriggerQualityLow();
+ EXPECT_THAT(source.sink_wants(), FpsMatchesResolutionMax(Eq(kFpsLimit)));
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_framerate);
+ EXPECT_EQ(1, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest, FailingInitEncodeDoesntCauseCrash) {
+ fake_encoder_.ForceInitEncodeFailure(true);
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kTargetBitrate, kTargetBitrate, kTargetBitrate, 0, 0, 0);
+ ResetEncoder("VP8", 2, 1, 1, false);
+ const int kFrameWidth = 1280;
+ const int kFrameHeight = 720;
+ video_source_.IncomingCapturedFrame(
+ CreateFrame(1, kFrameWidth, kFrameHeight));
+ ExpectDroppedFrame();
+ video_stream_encoder_->Stop();
+}
+
+// TODO(sprang): Extend this with fps throttling and any "balanced" extensions.
+TEST_F(VideoStreamEncoderTest,
+ AdaptsResolutionOnOveruse_MaintainFramerateMode) {
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kTargetBitrate, kTargetBitrate, kTargetBitrate, 0, 0, 0);
+
+ const int kFrameWidth = 1280;
+ const int kFrameHeight = 720;
+ // Enabled default VideoAdapter downscaling. First step is 3/4, not 3/5 as
+ // requested by
+ // VideoStreamEncoder::VideoSourceProxy::RequestResolutionLowerThan().
+ video_source_.set_adaptation_enabled(true);
+
+ video_source_.IncomingCapturedFrame(
+ CreateFrame(1 * kFrameIntervalMs, kFrameWidth, kFrameHeight));
+ WaitForEncodedFrame(kFrameWidth, kFrameHeight);
+
+ // Trigger CPU overuse, downscale by 3/4.
+ video_stream_encoder_->TriggerCpuOveruse();
+ video_source_.IncomingCapturedFrame(
+ CreateFrame(2 * kFrameIntervalMs, kFrameWidth, kFrameHeight));
+ WaitForEncodedFrame((kFrameWidth * 3) / 4, (kFrameHeight * 3) / 4);
+
+ // Trigger CPU normal use, return to original resolution.
+ video_stream_encoder_->TriggerCpuUnderuse();
+ video_source_.IncomingCapturedFrame(
+ CreateFrame(3 * kFrameIntervalMs, kFrameWidth, kFrameHeight));
+ WaitForEncodedFrame(kFrameWidth, kFrameHeight);
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest,
+ AdaptsFramerateOnOveruse_MaintainResolutionMode) {
+ const int kFrameWidth = 1280;
+ const int kFrameHeight = 720;
+
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kTargetBitrate, kTargetBitrate, kTargetBitrate, 0, 0, 0);
+ video_stream_encoder_->SetSource(
+ &video_source_, webrtc::DegradationPreference::MAINTAIN_RESOLUTION);
+ video_source_.set_adaptation_enabled(true);
+
+ int64_t timestamp_ms = CurrentTimeMs();
+
+ video_source_.IncomingCapturedFrame(
+ CreateFrame(timestamp_ms, kFrameWidth, kFrameHeight));
+ WaitForEncodedFrame(timestamp_ms);
+
+ // Try to trigger overuse. No fps estimate available => no effect.
+ video_stream_encoder_->TriggerCpuOveruse();
+
+ // Insert frames for one second to get a stable estimate.
+ for (int i = 0; i < max_framerate_; ++i) {
+ timestamp_ms += kFrameIntervalMs;
+ video_source_.IncomingCapturedFrame(
+ CreateFrame(timestamp_ms, kFrameWidth, kFrameHeight));
+ WaitForEncodedFrame(timestamp_ms);
+ }
+
+ // Trigger CPU overuse, reduce framerate by 2/3.
+ video_stream_encoder_->TriggerCpuOveruse();
+ int num_frames_dropped = 0;
+ for (int i = 0; i < max_framerate_; ++i) {
+ timestamp_ms += kFrameIntervalMs;
+ video_source_.IncomingCapturedFrame(
+ CreateFrame(timestamp_ms, kFrameWidth, kFrameHeight));
+ if (!WaitForFrame(kFrameTimeout)) {
+ ++num_frames_dropped;
+ } else {
+ sink_.CheckLastFrameSizeMatches(kFrameWidth, kFrameHeight);
+ }
+ }
+
+ // Add some slack to account for frames dropped by the frame dropper.
+ const int kErrorMargin = 1;
+ EXPECT_NEAR(num_frames_dropped, max_framerate_ - (max_framerate_ * 2 / 3),
+ kErrorMargin);
+
+ // Trigger CPU overuse, reduce framerate by 2/3 again.
+ video_stream_encoder_->TriggerCpuOveruse();
+ num_frames_dropped = 0;
+ for (int i = 0; i <= max_framerate_; ++i) {
+ timestamp_ms += kFrameIntervalMs;
+ video_source_.IncomingCapturedFrame(
+ CreateFrame(timestamp_ms, kFrameWidth, kFrameHeight));
+ if (!WaitForFrame(kFrameTimeout)) {
+ ++num_frames_dropped;
+ } else {
+ sink_.CheckLastFrameSizeMatches(kFrameWidth, kFrameHeight);
+ }
+ }
+ EXPECT_NEAR(num_frames_dropped, max_framerate_ - (max_framerate_ * 4 / 9),
+ kErrorMargin);
+
+ // Go back up one step.
+ video_stream_encoder_->TriggerCpuUnderuse();
+ num_frames_dropped = 0;
+ for (int i = 0; i < max_framerate_; ++i) {
+ timestamp_ms += kFrameIntervalMs;
+ video_source_.IncomingCapturedFrame(
+ CreateFrame(timestamp_ms, kFrameWidth, kFrameHeight));
+ if (!WaitForFrame(kFrameTimeout)) {
+ ++num_frames_dropped;
+ } else {
+ sink_.CheckLastFrameSizeMatches(kFrameWidth, kFrameHeight);
+ }
+ }
+ EXPECT_NEAR(num_frames_dropped, max_framerate_ - (max_framerate_ * 2 / 3),
+ kErrorMargin);
+
+ // Go back up to original mode.
+ video_stream_encoder_->TriggerCpuUnderuse();
+ num_frames_dropped = 0;
+ for (int i = 0; i < max_framerate_; ++i) {
+ timestamp_ms += kFrameIntervalMs;
+ video_source_.IncomingCapturedFrame(
+ CreateFrame(timestamp_ms, kFrameWidth, kFrameHeight));
+ if (!WaitForFrame(kFrameTimeout)) {
+ ++num_frames_dropped;
+ } else {
+ sink_.CheckLastFrameSizeMatches(kFrameWidth, kFrameHeight);
+ }
+ }
+ EXPECT_NEAR(num_frames_dropped, 0, kErrorMargin);
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest, DoesntAdaptDownPastMinFramerate) {
+ const int kFramerateFps = 5;
+ const int kFrameIntervalMs = rtc::kNumMillisecsPerSec / kFramerateFps;
+ const int kFrameWidth = 1280;
+ const int kFrameHeight = 720;
+
+ // Reconfigure encoder with two temporal layers and screensharing, which will
+ // disable frame dropping and make testing easier.
+ ResetEncoder("VP8", 1, 2, 1, true);
+
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kTargetBitrate, kTargetBitrate, kTargetBitrate, 0, 0, 0);
+ video_stream_encoder_->SetSource(
+ &video_source_, webrtc::DegradationPreference::MAINTAIN_RESOLUTION);
+ video_source_.set_adaptation_enabled(true);
+
+ int64_t timestamp_ms = CurrentTimeMs();
+
+ // Trigger overuse as much as we can.
+ rtc::VideoSinkWants last_wants;
+ do {
+ last_wants = video_source_.sink_wants();
+
+ // Insert frames to get a new fps estimate...
+ for (int j = 0; j < kFramerateFps; ++j) {
+ video_source_.IncomingCapturedFrame(
+ CreateFrame(timestamp_ms, kFrameWidth, kFrameHeight));
+ if (video_source_.last_sent_width()) {
+ sink_.WaitForEncodedFrame(timestamp_ms);
+ }
+ timestamp_ms += kFrameIntervalMs;
+ AdvanceTime(TimeDelta::Millis(kFrameIntervalMs));
+ }
+ // ...and then try to adapt again.
+ video_stream_encoder_->TriggerCpuOveruse();
+ } while (video_source_.sink_wants().max_framerate_fps <
+ last_wants.max_framerate_fps);
+
+ EXPECT_THAT(video_source_.sink_wants(),
+ FpsMatchesResolutionMax(Eq(kMinFramerateFps)));
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest,
+ AdaptsResolutionAndFramerateForLowQuality_BalancedMode) {
+ const int kWidth = 1280;
+ const int kHeight = 720;
+ const int64_t kFrameIntervalMs = 150;
+ int64_t timestamp_ms = kFrameIntervalMs;
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kTargetBitrate, kTargetBitrate, kTargetBitrate, 0, 0, 0);
+
+ // Enable BALANCED preference, no initial limitation.
+ AdaptingFrameForwarder source(&time_controller_);
+ source.set_adaptation_enabled(true);
+ video_stream_encoder_->SetSource(&source,
+ webrtc::DegradationPreference::BALANCED);
+ timestamp_ms += kFrameIntervalMs;
+ source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
+ WaitForEncodedFrame(kWidth, kHeight);
+ EXPECT_THAT(source.sink_wants(), FpsMaxResolutionMax());
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_framerate);
+ EXPECT_EQ(0, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Trigger adapt down, expect scaled down resolution (960x540@30fps).
+ video_stream_encoder_->TriggerQualityLow();
+ timestamp_ms += kFrameIntervalMs;
+ source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
+ WaitForEncodedFrame(timestamp_ms);
+ EXPECT_THAT(source.sink_wants(),
+ FpsMaxResolutionMatches(Lt(kWidth * kHeight)));
+ EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_framerate);
+ EXPECT_EQ(1, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Trigger adapt down, expect scaled down resolution (640x360@30fps).
+ video_stream_encoder_->TriggerQualityLow();
+ timestamp_ms += kFrameIntervalMs;
+ source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
+ WaitForEncodedFrame(timestamp_ms);
+ EXPECT_THAT(source.sink_wants(), FpsMaxResolutionLt(source.last_wants()));
+ EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_framerate);
+ EXPECT_EQ(2, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Trigger adapt down, expect reduced fps (640x360@15fps).
+ video_stream_encoder_->TriggerQualityLow();
+ timestamp_ms += kFrameIntervalMs;
+ source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
+ WaitForEncodedFrame(timestamp_ms);
+ EXPECT_THAT(source.sink_wants(), FpsLtResolutionEq(source.last_wants()));
+ EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_framerate);
+ EXPECT_EQ(3, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Trigger adapt down, expect scaled down resolution (480x270@15fps).
+ video_stream_encoder_->TriggerQualityLow();
+ timestamp_ms += kFrameIntervalMs;
+ source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
+ WaitForEncodedFrame(timestamp_ms);
+ EXPECT_THAT(source.sink_wants(), FpsEqResolutionLt(source.last_wants()));
+ EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_framerate);
+ EXPECT_EQ(4, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Restrict bitrate, trigger adapt down, expect reduced fps (480x270@10fps).
+ video_stream_encoder_->TriggerQualityLow();
+ timestamp_ms += kFrameIntervalMs;
+ source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
+ WaitForEncodedFrame(timestamp_ms);
+ EXPECT_THAT(source.sink_wants(), FpsLtResolutionEq(source.last_wants()));
+ EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_framerate);
+ EXPECT_EQ(5, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Trigger adapt down, expect scaled down resolution (320x180@10fps).
+ video_stream_encoder_->TriggerQualityLow();
+ timestamp_ms += kFrameIntervalMs;
+ source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
+ WaitForEncodedFrame(timestamp_ms);
+ EXPECT_THAT(source.sink_wants(), FpsEqResolutionLt(source.last_wants()));
+ EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_framerate);
+ EXPECT_EQ(6, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Trigger adapt down, expect reduced fps (320x180@7fps).
+ video_stream_encoder_->TriggerQualityLow();
+ timestamp_ms += kFrameIntervalMs;
+ source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
+ WaitForEncodedFrame(timestamp_ms);
+ EXPECT_THAT(source.sink_wants(), FpsLtResolutionEq(source.last_wants()));
+ rtc::VideoSinkWants last_wants = source.sink_wants();
+ EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_framerate);
+ EXPECT_EQ(7, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Trigger adapt down, min resolution reached, expect no change.
+ video_stream_encoder_->TriggerQualityLow();
+ timestamp_ms += kFrameIntervalMs;
+ source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
+ WaitForEncodedFrame(timestamp_ms);
+ EXPECT_THAT(source.sink_wants(), FpsEqResolutionEqTo(last_wants));
+ EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_framerate);
+ EXPECT_EQ(7, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Trigger adapt up, expect increased fps (320x180@10fps).
+ video_stream_encoder_->TriggerQualityHigh();
+ timestamp_ms += kFrameIntervalMs;
+ source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
+ WaitForEncodedFrame(timestamp_ms);
+ EXPECT_THAT(source.sink_wants(), FpsGtResolutionEq(source.last_wants()));
+ EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_framerate);
+ EXPECT_EQ(8, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Trigger adapt up, expect upscaled resolution (480x270@10fps).
+ video_stream_encoder_->TriggerQualityHigh();
+ timestamp_ms += kFrameIntervalMs;
+ source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
+ WaitForEncodedFrame(timestamp_ms);
+ EXPECT_THAT(source.sink_wants(), FpsEqResolutionGt(source.last_wants()));
+ EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_framerate);
+ EXPECT_EQ(9, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Increase bitrate, trigger adapt up, expect increased fps (480x270@15fps).
+ video_stream_encoder_->TriggerQualityHigh();
+ timestamp_ms += kFrameIntervalMs;
+ source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
+ WaitForEncodedFrame(timestamp_ms);
+ EXPECT_THAT(source.sink_wants(), FpsGtResolutionEq(source.last_wants()));
+ EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_framerate);
+ EXPECT_EQ(10, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Trigger adapt up, expect upscaled resolution (640x360@15fps).
+ video_stream_encoder_->TriggerQualityHigh();
+ timestamp_ms += kFrameIntervalMs;
+ source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
+ WaitForEncodedFrame(timestamp_ms);
+ EXPECT_THAT(source.sink_wants(), FpsEqResolutionGt(source.last_wants()));
+ EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_framerate);
+ EXPECT_EQ(11, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Trigger adapt up, expect increased fps (640x360@30fps).
+ video_stream_encoder_->TriggerQualityHigh();
+ timestamp_ms += kFrameIntervalMs;
+ source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
+ WaitForEncodedFrame(timestamp_ms);
+ EXPECT_THAT(source.sink_wants(), FpsMax());
+ EXPECT_EQ(source.sink_wants().max_pixel_count,
+ source.last_wants().max_pixel_count);
+ EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_framerate);
+ EXPECT_EQ(12, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Trigger adapt up, expect upscaled resolution (960x540@30fps).
+ video_stream_encoder_->TriggerQualityHigh();
+ timestamp_ms += kFrameIntervalMs;
+ source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
+ WaitForEncodedFrame(timestamp_ms);
+ EXPECT_THAT(source.sink_wants(), FpsMaxResolutionGt(source.last_wants()));
+ EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_framerate);
+ EXPECT_EQ(13, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Trigger adapt up, expect no restriction (1280x720fps@30fps).
+ video_stream_encoder_->TriggerQualityHigh();
+ timestamp_ms += kFrameIntervalMs;
+ source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
+ WaitForEncodedFrame(kWidth, kHeight);
+ EXPECT_THAT(source.sink_wants(), FpsMaxResolutionGt(source.last_wants()));
+ EXPECT_THAT(source.sink_wants(), FpsMaxResolutionMax());
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_framerate);
+ EXPECT_EQ(14, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Trigger adapt up, expect no change.
+ video_stream_encoder_->TriggerQualityHigh();
+ EXPECT_THAT(source.sink_wants(), FpsMaxResolutionMax());
+ EXPECT_EQ(14, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest, AdaptWithTwoReasonsAndDifferentOrder_Framerate) {
+ const int kWidth = 1280;
+ const int kHeight = 720;
+ const int64_t kFrameIntervalMs = 150;
+ int64_t timestamp_ms = kFrameIntervalMs;
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kTargetBitrate, kTargetBitrate, kTargetBitrate, 0, 0, 0);
+
+ // Enable BALANCED preference, no initial limitation.
+ AdaptingFrameForwarder source(&time_controller_);
+ source.set_adaptation_enabled(true);
+ video_stream_encoder_->SetSource(&source,
+ webrtc::DegradationPreference::BALANCED);
+ timestamp_ms += kFrameIntervalMs;
+ source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
+ WaitForEncodedFrame(kWidth, kHeight);
+ EXPECT_THAT(source.sink_wants(), FpsMaxResolutionMax());
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_framerate);
+ EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_resolution);
+ EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_framerate);
+ EXPECT_EQ(0, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
+ EXPECT_EQ(0, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Trigger cpu adapt down, expect scaled down resolution (960x540@30fps).
+ video_stream_encoder_->TriggerCpuOveruse();
+ timestamp_ms += kFrameIntervalMs;
+ source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
+ WaitForEncodedFrame(timestamp_ms);
+ EXPECT_THAT(source.sink_wants(),
+ FpsMaxResolutionMatches(Lt(kWidth * kHeight)));
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_framerate);
+ EXPECT_TRUE(stats_proxy_->GetStats().cpu_limited_resolution);
+ EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_framerate);
+ EXPECT_EQ(1, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
+ EXPECT_EQ(0, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Trigger cpu adapt down, expect scaled down resolution (640x360@30fps).
+ video_stream_encoder_->TriggerCpuOveruse();
+ timestamp_ms += kFrameIntervalMs;
+ source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
+ WaitForEncodedFrame(timestamp_ms);
+ EXPECT_THAT(source.sink_wants(), FpsMaxResolutionLt(source.last_wants()));
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_framerate);
+ EXPECT_TRUE(stats_proxy_->GetStats().cpu_limited_resolution);
+ EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_framerate);
+ EXPECT_EQ(2, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
+ EXPECT_EQ(0, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Trigger quality adapt down, expect reduced fps (640x360@15fps).
+ video_stream_encoder_->TriggerQualityLow();
+ timestamp_ms += kFrameIntervalMs;
+ source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
+ WaitForEncodedFrame(timestamp_ms);
+ EXPECT_THAT(source.sink_wants(), FpsLtResolutionEq(source.last_wants()));
+ EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_framerate);
+ EXPECT_TRUE(stats_proxy_->GetStats().cpu_limited_resolution);
+ EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_framerate);
+ EXPECT_EQ(2, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
+ EXPECT_EQ(1, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Trigger cpu adapt up, expect no change since QP is most limited.
+ {
+ // Store current sink wants since we expect no change and if there is no
+ // change then last_wants() is not updated.
+ auto previous_sink_wants = source.sink_wants();
+ video_stream_encoder_->TriggerCpuUnderuse();
+ timestamp_ms += kFrameIntervalMs;
+ source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
+ WaitForEncodedFrame(timestamp_ms);
+ EXPECT_THAT(source.sink_wants(), FpsEqResolutionEqTo(previous_sink_wants));
+ EXPECT_EQ(2, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
+ EXPECT_EQ(1, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+ }
+
+ // Trigger quality adapt up, expect increased fps (640x360@30fps).
+ video_stream_encoder_->TriggerQualityHigh();
+ timestamp_ms += kFrameIntervalMs;
+ source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
+ WaitForEncodedFrame(timestamp_ms);
+ EXPECT_THAT(source.sink_wants(), FpsGtResolutionEq(source.last_wants()));
+ EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_framerate);
+ EXPECT_TRUE(stats_proxy_->GetStats().cpu_limited_resolution);
+ EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_framerate);
+ EXPECT_EQ(2, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
+ EXPECT_EQ(2, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Trigger quality adapt up and Cpu adapt up since both are most limited,
+ // expect increased resolution (960x540@30fps).
+ video_stream_encoder_->TriggerQualityHigh();
+ video_stream_encoder_->TriggerCpuUnderuse();
+ timestamp_ms += kFrameIntervalMs;
+ source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
+ WaitForEncodedFrame(timestamp_ms);
+ EXPECT_THAT(source.sink_wants(), FpsMaxResolutionGt(source.last_wants()));
+ EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_framerate);
+ EXPECT_TRUE(stats_proxy_->GetStats().cpu_limited_resolution);
+ EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_framerate);
+ EXPECT_EQ(3, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
+ EXPECT_EQ(3, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Trigger quality adapt up and Cpu adapt up since both are most limited,
+ // expect no restriction (1280x720fps@30fps).
+ video_stream_encoder_->TriggerQualityHigh();
+ video_stream_encoder_->TriggerCpuUnderuse();
+ timestamp_ms += kFrameIntervalMs;
+ source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
+ WaitForEncodedFrame(kWidth, kHeight);
+ EXPECT_THAT(source.sink_wants(), FpsMaxResolutionGt(source.last_wants()));
+ EXPECT_THAT(source.sink_wants(), FpsMaxResolutionMax());
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_framerate);
+ EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_resolution);
+ EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_framerate);
+ EXPECT_EQ(4, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
+ EXPECT_EQ(4, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Trigger adapt up, expect no change.
+ video_stream_encoder_->TriggerQualityHigh();
+ EXPECT_THAT(source.sink_wants(), FpsMaxResolutionMax());
+ EXPECT_EQ(4, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
+ EXPECT_EQ(4, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest,
+ AdaptWithTwoReasonsAndDifferentOrder_Resolution) {
+ const int kWidth = 640;
+ const int kHeight = 360;
+ const int kFpsLimit = 15;
+ const int64_t kFrameIntervalMs = 150;
+ int64_t timestamp_ms = kFrameIntervalMs;
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kTargetBitrate, kTargetBitrate, kTargetBitrate, 0, 0, 0);
+
+ // Enable BALANCED preference, no initial limitation.
+ AdaptingFrameForwarder source(&time_controller_);
+ source.set_adaptation_enabled(true);
+ video_stream_encoder_->SetSource(&source,
+ webrtc::DegradationPreference::BALANCED);
+ timestamp_ms += kFrameIntervalMs;
+ source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
+ WaitForEncodedFrame(kWidth, kHeight);
+ EXPECT_THAT(source.sink_wants(), FpsMaxResolutionMax());
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_framerate);
+ EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_resolution);
+ EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_framerate);
+ EXPECT_EQ(0, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
+ EXPECT_EQ(0, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Trigger cpu adapt down, expect scaled down framerate (640x360@15fps).
+ video_stream_encoder_->TriggerCpuOveruse();
+ timestamp_ms += kFrameIntervalMs;
+ source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
+ WaitForEncodedFrame(timestamp_ms);
+ EXPECT_THAT(source.sink_wants(), FpsMatchesResolutionMax(Eq(kFpsLimit)));
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_framerate);
+ EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_resolution);
+ EXPECT_TRUE(stats_proxy_->GetStats().cpu_limited_framerate);
+ EXPECT_EQ(1, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
+ EXPECT_EQ(0, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Trigger quality adapt down, expect scaled down resolution (480x270@15fps).
+ video_stream_encoder_->TriggerQualityLow();
+ timestamp_ms += kFrameIntervalMs;
+ source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
+ WaitForEncodedFrame(timestamp_ms);
+ EXPECT_THAT(source.sink_wants(), FpsEqResolutionLt(source.last_wants()));
+ EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_framerate);
+ EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_resolution);
+ EXPECT_TRUE(stats_proxy_->GetStats().cpu_limited_framerate);
+ EXPECT_EQ(1, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
+ EXPECT_EQ(1, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Trigger cpu adapt up, expect no change because quality is most limited.
+ {
+ auto previous_sink_wants = source.sink_wants();
+ // Store current sink wants since we expect no change ind if there is no
+ // change then last__wants() is not updated.
+ video_stream_encoder_->TriggerCpuUnderuse();
+ timestamp_ms += kFrameIntervalMs;
+ source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
+ WaitForEncodedFrame(timestamp_ms);
+ EXPECT_THAT(source.sink_wants(), FpsEqResolutionEqTo(previous_sink_wants));
+ EXPECT_EQ(1, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
+ EXPECT_EQ(1, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+ }
+
+ // Trigger quality adapt up, expect upscaled resolution (640x360@15fps).
+ video_stream_encoder_->TriggerQualityHigh();
+ timestamp_ms += kFrameIntervalMs;
+ source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
+ WaitForEncodedFrame(timestamp_ms);
+ EXPECT_THAT(source.sink_wants(), FpsEqResolutionGt(source.last_wants()));
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_framerate);
+ EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_resolution);
+ EXPECT_TRUE(stats_proxy_->GetStats().cpu_limited_framerate);
+ EXPECT_EQ(1, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
+ EXPECT_EQ(2, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Trigger quality and cpu adapt up, expect increased fps (640x360@30fps).
+ video_stream_encoder_->TriggerQualityHigh();
+ video_stream_encoder_->TriggerCpuUnderuse();
+ timestamp_ms += kFrameIntervalMs;
+ source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
+ WaitForEncodedFrame(timestamp_ms);
+ EXPECT_THAT(source.sink_wants(), FpsMaxResolutionMax());
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_framerate);
+ EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_resolution);
+ EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_framerate);
+ EXPECT_EQ(2, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
+ EXPECT_EQ(3, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Trigger adapt up, expect no change.
+ video_stream_encoder_->TriggerQualityHigh();
+ EXPECT_THAT(source.sink_wants(), FpsMaxResolutionMax());
+ EXPECT_EQ(2, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
+ EXPECT_EQ(3, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest, AcceptsFullHdAdaptedDownSimulcastFrames) {
+ const int kFrameWidth = 1920;
+ const int kFrameHeight = 1080;
+ // 2/3 of 1920.
+ const int kAdaptedFrameWidth = 1280;
+ // 2/3 of 1080.
+ const int kAdaptedFrameHeight = 720;
+ const int kFramerate = 24;
+
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kTargetBitrate, kTargetBitrate, kTargetBitrate, 0, 0, 0);
+ // Trigger reconfigure encoder (without resetting the entire instance).
+ VideoEncoderConfig video_encoder_config;
+ test::FillEncoderConfiguration(kVideoCodecVP8, 1, &video_encoder_config);
+ video_encoder_config.simulcast_layers[0].max_framerate = kFramerate;
+ video_encoder_config.max_bitrate_bps = kTargetBitrate.bps();
+ video_encoder_config.video_stream_factory =
+ rtc::make_ref_counted<CroppingVideoStreamFactory>();
+ video_stream_encoder_->ConfigureEncoder(std::move(video_encoder_config),
+ kMaxPayloadLength);
+ video_stream_encoder_->WaitUntilTaskQueueIsIdle();
+
+ video_source_.set_adaptation_enabled(true);
+
+ video_source_.IncomingCapturedFrame(
+ CreateFrame(1, kFrameWidth, kFrameHeight));
+ WaitForEncodedFrame(kFrameWidth, kFrameHeight);
+
+ // Trigger CPU overuse, downscale by 3/4.
+ video_stream_encoder_->TriggerCpuOveruse();
+ video_source_.IncomingCapturedFrame(
+ CreateFrame(2, kFrameWidth, kFrameHeight));
+ WaitForEncodedFrame(kAdaptedFrameWidth, kAdaptedFrameHeight);
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest, PeriodicallyUpdatesChannelParameters) {
+ const int kFrameWidth = 1280;
+ const int kFrameHeight = 720;
+ const int kLowFps = 2;
+ const int kHighFps = 30;
+
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kTargetBitrate, kTargetBitrate, kTargetBitrate, 0, 0, 0);
+
+ int64_t timestamp_ms = CurrentTimeMs();
+ max_framerate_ = kLowFps;
+
+ // Insert 2 seconds of 2fps video.
+ for (int i = 0; i < kLowFps * 2; ++i) {
+ video_source_.IncomingCapturedFrame(
+ CreateFrame(timestamp_ms, kFrameWidth, kFrameHeight));
+ WaitForEncodedFrame(timestamp_ms);
+ timestamp_ms += 1000 / kLowFps;
+ }
+
+ // Make sure encoder is updated with new target.
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kTargetBitrate, kTargetBitrate, kTargetBitrate, 0, 0, 0);
+ video_source_.IncomingCapturedFrame(
+ CreateFrame(timestamp_ms, kFrameWidth, kFrameHeight));
+ WaitForEncodedFrame(timestamp_ms);
+ timestamp_ms += 1000 / kLowFps;
+
+ EXPECT_EQ(kLowFps, fake_encoder_.GetConfiguredInputFramerate());
+
+ // Insert 30fps frames for just a little more than the forced update period.
+ const int kVcmTimerIntervalFrames = (kProcessIntervalMs * kHighFps) / 1000;
+ constexpr TimeDelta kFrameInterval = TimeDelta::Seconds(1) / kHighFps;
+ max_framerate_ = kHighFps;
+ for (int i = 0; i < kVcmTimerIntervalFrames + 2; ++i) {
+ video_source_.IncomingCapturedFrame(
+ CreateFrame(timestamp_ms, kFrameWidth, kFrameHeight));
+ // Wait for encoded frame, but skip ahead if it doesn't arrive as it might
+ // be dropped if the encoder hans't been updated with the new higher target
+ // framerate yet, causing it to overshoot the target bitrate and then
+ // suffering the wrath of the media optimizer.
+ TimedWaitForEncodedFrame(timestamp_ms, 2 * kFrameInterval);
+ timestamp_ms += kFrameInterval.ms();
+ }
+
+ // Don expect correct measurement just yet, but it should be higher than
+ // before.
+ EXPECT_GT(fake_encoder_.GetConfiguredInputFramerate(), kLowFps);
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest, DoesNotUpdateBitrateAllocationWhenSuspended) {
+ const int kFrameWidth = 1280;
+ const int kFrameHeight = 720;
+ ResetEncoder("FAKE", 1, 1, 1, false,
+ VideoStreamEncoder::BitrateAllocationCallbackType::
+ kVideoBitrateAllocation);
+
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kTargetBitrate, kTargetBitrate, kTargetBitrate, 0, 0, 0);
+ video_stream_encoder_->WaitUntilTaskQueueIsIdle();
+
+ // Insert a first video frame, causes another bitrate update.
+ int64_t timestamp_ms = CurrentTimeMs();
+ video_source_.IncomingCapturedFrame(
+ CreateFrame(timestamp_ms, kFrameWidth, kFrameHeight));
+ WaitForEncodedFrame(timestamp_ms);
+ EXPECT_EQ(sink_.number_of_bitrate_allocations(), 1);
+
+ // Next, simulate video suspension due to pacer queue overrun.
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ DataRate::Zero(), DataRate::Zero(), DataRate::Zero(), 0, 1, 0);
+
+ // Skip ahead until a new periodic parameter update should have occured.
+ timestamp_ms += kProcessIntervalMs;
+ AdvanceTime(TimeDelta::Millis(kProcessIntervalMs));
+
+ // No more allocations has been made.
+ video_source_.IncomingCapturedFrame(
+ CreateFrame(timestamp_ms, kFrameWidth, kFrameHeight));
+ ExpectDroppedFrame();
+ EXPECT_EQ(sink_.number_of_bitrate_allocations(), 1);
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest,
+ DefaultCpuAdaptationThresholdsForSoftwareEncoder) {
+ const int kFrameWidth = 1280;
+ const int kFrameHeight = 720;
+ const test::ScopedKeyValueConfig kFieldTrials;
+ const CpuOveruseOptions default_options(kFieldTrials);
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kTargetBitrate, kTargetBitrate, kTargetBitrate, 0, 0, 0);
+ video_source_.IncomingCapturedFrame(
+ CreateFrame(1, kFrameWidth, kFrameHeight));
+ WaitForEncodedFrame(1);
+ EXPECT_EQ(video_stream_encoder_->overuse_detector_proxy_->GetOptions()
+ .low_encode_usage_threshold_percent,
+ default_options.low_encode_usage_threshold_percent);
+ EXPECT_EQ(video_stream_encoder_->overuse_detector_proxy_->GetOptions()
+ .high_encode_usage_threshold_percent,
+ default_options.high_encode_usage_threshold_percent);
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest,
+ HigherCpuAdaptationThresholdsForHardwareEncoder) {
+ const int kFrameWidth = 1280;
+ const int kFrameHeight = 720;
+ const test::ScopedKeyValueConfig kFieldTrials;
+ CpuOveruseOptions hardware_options(kFieldTrials);
+ hardware_options.low_encode_usage_threshold_percent = 150;
+ hardware_options.high_encode_usage_threshold_percent = 200;
+ fake_encoder_.SetIsHardwareAccelerated(true);
+
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kTargetBitrate, kTargetBitrate, kTargetBitrate, 0, 0, 0);
+ video_source_.IncomingCapturedFrame(
+ CreateFrame(1, kFrameWidth, kFrameHeight));
+ WaitForEncodedFrame(1);
+ EXPECT_EQ(video_stream_encoder_->overuse_detector_proxy_->GetOptions()
+ .low_encode_usage_threshold_percent,
+ hardware_options.low_encode_usage_threshold_percent);
+ EXPECT_EQ(video_stream_encoder_->overuse_detector_proxy_->GetOptions()
+ .high_encode_usage_threshold_percent,
+ hardware_options.high_encode_usage_threshold_percent);
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest,
+ CpuAdaptationThresholdsUpdatesWhenHardwareAccelerationChange) {
+ const int kFrameWidth = 1280;
+ const int kFrameHeight = 720;
+
+ const test::ScopedKeyValueConfig kFieldTrials;
+ const CpuOveruseOptions default_options(kFieldTrials);
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kTargetBitrate, kTargetBitrate, kTargetBitrate, 0, 0, 0);
+ video_source_.IncomingCapturedFrame(
+ CreateFrame(1, kFrameWidth, kFrameHeight));
+ WaitForEncodedFrame(1);
+ EXPECT_EQ(video_stream_encoder_->overuse_detector_proxy_->GetOptions()
+ .low_encode_usage_threshold_percent,
+ default_options.low_encode_usage_threshold_percent);
+ EXPECT_EQ(video_stream_encoder_->overuse_detector_proxy_->GetOptions()
+ .high_encode_usage_threshold_percent,
+ default_options.high_encode_usage_threshold_percent);
+
+ CpuOveruseOptions hardware_options(kFieldTrials);
+ hardware_options.low_encode_usage_threshold_percent = 150;
+ hardware_options.high_encode_usage_threshold_percent = 200;
+ fake_encoder_.SetIsHardwareAccelerated(true);
+
+ video_source_.IncomingCapturedFrame(
+ CreateFrame(2, kFrameWidth, kFrameHeight));
+ WaitForEncodedFrame(2);
+
+ EXPECT_EQ(video_stream_encoder_->overuse_detector_proxy_->GetOptions()
+ .low_encode_usage_threshold_percent,
+ hardware_options.low_encode_usage_threshold_percent);
+ EXPECT_EQ(video_stream_encoder_->overuse_detector_proxy_->GetOptions()
+ .high_encode_usage_threshold_percent,
+ hardware_options.high_encode_usage_threshold_percent);
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest, DropsFramesWhenEncoderOvershoots) {
+ const int kFrameWidth = 320;
+ const int kFrameHeight = 240;
+ const int kFps = 30;
+ const DataRate kTargetBitrate = DataRate::KilobitsPerSec(120);
+ const int kNumFramesInRun = kFps * 5; // Runs of five seconds.
+
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kTargetBitrate, kTargetBitrate, kTargetBitrate, 0, 0, 0);
+
+ int64_t timestamp_ms = CurrentTimeMs();
+ max_framerate_ = kFps;
+
+ // Insert 3 seconds of video, verify number of drops with normal bitrate.
+ fake_encoder_.SimulateOvershoot(1.0);
+ int num_dropped = 0;
+ for (int i = 0; i < kNumFramesInRun; ++i) {
+ video_source_.IncomingCapturedFrame(
+ CreateFrame(timestamp_ms, kFrameWidth, kFrameHeight));
+ // Wait up to two frame durations for a frame to arrive.
+ if (!TimedWaitForEncodedFrame(timestamp_ms,
+ 2 * TimeDelta::Seconds(1) / kFps)) {
+ ++num_dropped;
+ }
+ timestamp_ms += 1000 / kFps;
+ }
+
+ // Framerate should be measured to be near the expected target rate.
+ EXPECT_NEAR(fake_encoder_.GetLastFramerate(), kFps, 1);
+
+ // Frame drops should be within 5% of expected 0%.
+ EXPECT_NEAR(num_dropped, 0, 5 * kNumFramesInRun / 100);
+
+ // Make encoder produce frames at double the expected bitrate during 3 seconds
+ // of video, verify number of drops. Rate needs to be slightly changed in
+ // order to force the rate to be reconfigured.
+ double overshoot_factor = 2.0;
+ const RateControlSettings trials =
+ RateControlSettings::ParseFromFieldTrials();
+ if (trials.UseEncoderBitrateAdjuster()) {
+ // With bitrate adjuster, when need to overshoot even more to trigger
+ // frame dropping since the adjuter will try to just lower the target
+ // bitrate rather than drop frames. If network headroom can be used, it
+ // doesn't push back as hard so we don't need quite as much overshoot.
+ // These numbers are unfortunately a bit magical but there's not trivial
+ // way to algebraically infer them.
+ overshoot_factor = 3.0;
+ }
+ fake_encoder_.SimulateOvershoot(overshoot_factor);
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kTargetBitrate + DataRate::KilobitsPerSec(1),
+ kTargetBitrate + DataRate::KilobitsPerSec(1),
+ kTargetBitrate + DataRate::KilobitsPerSec(1), 0, 0, 0);
+ num_dropped = 0;
+ for (int i = 0; i < kNumFramesInRun; ++i) {
+ video_source_.IncomingCapturedFrame(
+ CreateFrame(timestamp_ms, kFrameWidth, kFrameHeight));
+ // Wait up to two frame durations for a frame to arrive.
+ if (!TimedWaitForEncodedFrame(timestamp_ms,
+ 2 * TimeDelta::Seconds(1) / kFps)) {
+ ++num_dropped;
+ }
+ timestamp_ms += 1000 / kFps;
+ }
+
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kTargetBitrate, kTargetBitrate, kTargetBitrate, 0, 0, 0);
+
+ // Target framerate should be still be near the expected target, despite
+ // the frame drops.
+ EXPECT_NEAR(fake_encoder_.GetLastFramerate(), kFps, 1);
+
+ // Frame drops should be within 5% of expected 50%.
+ EXPECT_NEAR(num_dropped, kNumFramesInRun / 2, 5 * kNumFramesInRun / 100);
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest, ConfiguresCorrectFrameRate) {
+ const int kFrameWidth = 320;
+ const int kFrameHeight = 240;
+ const int kActualInputFps = 24;
+ const DataRate kTargetBitrate = DataRate::KilobitsPerSec(120);
+
+ ASSERT_GT(max_framerate_, kActualInputFps);
+
+ int64_t timestamp_ms = CurrentTimeMs();
+ max_framerate_ = kActualInputFps;
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kTargetBitrate, kTargetBitrate, kTargetBitrate, 0, 0, 0);
+
+ // Insert 3 seconds of video, with an input fps lower than configured max.
+ for (int i = 0; i < kActualInputFps * 3; ++i) {
+ video_source_.IncomingCapturedFrame(
+ CreateFrame(timestamp_ms, kFrameWidth, kFrameHeight));
+ // Wait up to two frame durations for a frame to arrive.
+ WaitForEncodedFrame(timestamp_ms);
+ timestamp_ms += 1000 / kActualInputFps;
+ }
+
+ EXPECT_NEAR(kActualInputFps, fake_encoder_.GetLastFramerate(), 1);
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest, AccumulatesUpdateRectOnDroppedFrames) {
+ VideoFrame::UpdateRect rect;
+ test::FrameForwarder source;
+ video_stream_encoder_->SetSource(&source,
+ DegradationPreference::MAINTAIN_FRAMERATE);
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kTargetBitrate, kTargetBitrate, kTargetBitrate, 0, 0, 0);
+
+ source.IncomingCapturedFrame(CreateFrameWithUpdatedPixel(1, nullptr, 0));
+ WaitForEncodedFrame(1);
+ // On the very first frame full update should be forced.
+ rect = fake_encoder_.GetLastUpdateRect();
+ EXPECT_EQ(rect.offset_x, 0);
+ EXPECT_EQ(rect.offset_y, 0);
+ EXPECT_EQ(rect.height, codec_height_);
+ EXPECT_EQ(rect.width, codec_width_);
+ // Frame with NTP timestamp 2 will be dropped due to outstanding frames
+ // scheduled for processing during encoder queue processing of frame 2.
+ source.IncomingCapturedFrame(CreateFrameWithUpdatedPixel(2, nullptr, 1));
+ source.IncomingCapturedFrame(CreateFrameWithUpdatedPixel(3, nullptr, 10));
+ WaitForEncodedFrame(3);
+ // Updates to pixels 1 and 10 should be accumulated to one 10x1 rect.
+ rect = fake_encoder_.GetLastUpdateRect();
+ EXPECT_EQ(rect.offset_x, 1);
+ EXPECT_EQ(rect.offset_y, 0);
+ EXPECT_EQ(rect.width, 10);
+ EXPECT_EQ(rect.height, 1);
+
+ source.IncomingCapturedFrame(CreateFrameWithUpdatedPixel(4, nullptr, 0));
+ WaitForEncodedFrame(4);
+ // Previous frame was encoded, so no accumulation should happen.
+ rect = fake_encoder_.GetLastUpdateRect();
+ EXPECT_EQ(rect.offset_x, 0);
+ EXPECT_EQ(rect.offset_y, 0);
+ EXPECT_EQ(rect.width, 1);
+ EXPECT_EQ(rect.height, 1);
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest, SetsFrameTypes) {
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kTargetBitrate, kTargetBitrate, kTargetBitrate, 0, 0, 0);
+
+ // First frame is always keyframe.
+ video_source_.IncomingCapturedFrame(CreateFrame(1, nullptr));
+ WaitForEncodedFrame(1);
+ EXPECT_THAT(
+ fake_encoder_.LastFrameTypes(),
+ ::testing::ElementsAre(VideoFrameType{VideoFrameType::kVideoFrameKey}));
+
+ // Insert delta frame.
+ video_source_.IncomingCapturedFrame(CreateFrame(2, nullptr));
+ WaitForEncodedFrame(2);
+ EXPECT_THAT(
+ fake_encoder_.LastFrameTypes(),
+ ::testing::ElementsAre(VideoFrameType{VideoFrameType::kVideoFrameDelta}));
+
+ // Request next frame be a key-frame.
+ video_stream_encoder_->SendKeyFrame();
+ video_source_.IncomingCapturedFrame(CreateFrame(3, nullptr));
+ WaitForEncodedFrame(3);
+ EXPECT_THAT(
+ fake_encoder_.LastFrameTypes(),
+ ::testing::ElementsAre(VideoFrameType{VideoFrameType::kVideoFrameKey}));
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest, SetsFrameTypesSimulcast) {
+ // Setup simulcast with three streams.
+ ResetEncoder("VP8", 3, 1, 1, false);
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kSimulcastTargetBitrate, kSimulcastTargetBitrate, kSimulcastTargetBitrate,
+ 0, 0, 0);
+ // Wait for all three layers before triggering event.
+ sink_.SetNumExpectedLayers(3);
+
+ // First frame is always keyframe.
+ video_source_.IncomingCapturedFrame(CreateFrame(1, nullptr));
+ WaitForEncodedFrame(1);
+ EXPECT_THAT(fake_encoder_.LastFrameTypes(),
+ ::testing::ElementsAreArray({VideoFrameType::kVideoFrameKey,
+ VideoFrameType::kVideoFrameKey,
+ VideoFrameType::kVideoFrameKey}));
+
+ // Insert delta frame.
+ video_source_.IncomingCapturedFrame(CreateFrame(2, nullptr));
+ WaitForEncodedFrame(2);
+ EXPECT_THAT(fake_encoder_.LastFrameTypes(),
+ ::testing::ElementsAreArray({VideoFrameType::kVideoFrameDelta,
+ VideoFrameType::kVideoFrameDelta,
+ VideoFrameType::kVideoFrameDelta}));
+
+ // Request next frame be a key-frame.
+ // Only first stream is configured to produce key-frame.
+ video_stream_encoder_->SendKeyFrame();
+ video_source_.IncomingCapturedFrame(CreateFrame(3, nullptr));
+ WaitForEncodedFrame(3);
+
+ // TODO(webrtc:10615): Map keyframe request to spatial layer. Currently
+ // keyframe request on any layer triggers keyframe on all layers.
+ EXPECT_THAT(fake_encoder_.LastFrameTypes(),
+ ::testing::ElementsAreArray({VideoFrameType::kVideoFrameKey,
+ VideoFrameType::kVideoFrameKey,
+ VideoFrameType::kVideoFrameKey}));
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest, DoesNotRewriteH264BitstreamWithOptimalSps) {
+ // SPS contains VUI with restrictions on the maximum number of reordered
+ // pictures, there is no need to rewrite the bitstream to enable faster
+ // decoding.
+ ResetEncoder("H264", 1, 1, 1, false);
+
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kTargetBitrate, kTargetBitrate, kTargetBitrate, 0, 0, 0);
+ video_stream_encoder_->WaitUntilTaskQueueIsIdle();
+
+ fake_encoder_.SetEncodedImageData(
+ EncodedImageBuffer::Create(kOptimalSps, sizeof(kOptimalSps)));
+
+ video_source_.IncomingCapturedFrame(CreateFrame(1, nullptr));
+ WaitForEncodedFrame(1);
+
+ EXPECT_THAT(sink_.GetLastEncodedImageData(),
+ testing::ElementsAreArray(kOptimalSps));
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest, RewritesH264BitstreamWithNonOptimalSps) {
+ // SPS does not contain VUI, the bitstream is will be rewritten with added
+ // VUI with restrictions on the maximum number of reordered pictures to
+ // enable faster decoding.
+ uint8_t original_sps[] = {0, 0, 0, 1, H264::NaluType::kSps,
+ 0x00, 0x00, 0x03, 0x03, 0xF4,
+ 0x05, 0x03, 0xC7, 0xC0};
+ ResetEncoder("H264", 1, 1, 1, false);
+
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kTargetBitrate, kTargetBitrate, kTargetBitrate, 0, 0, 0);
+ video_stream_encoder_->WaitUntilTaskQueueIsIdle();
+
+ fake_encoder_.SetEncodedImageData(
+ EncodedImageBuffer::Create(original_sps, sizeof(original_sps)));
+
+ video_source_.IncomingCapturedFrame(CreateFrame(1, nullptr));
+ WaitForEncodedFrame(1);
+
+ EXPECT_THAT(sink_.GetLastEncodedImageData(),
+ testing::ElementsAreArray(kOptimalSps));
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest, CopiesVideoFrameMetadataAfterDownscale) {
+ const int kFrameWidth = 1280;
+ const int kFrameHeight = 720;
+ const DataRate kTargetBitrate =
+ DataRate::KilobitsPerSec(300); // Too low for HD resolution.
+
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kTargetBitrate, kTargetBitrate, kTargetBitrate, 0, 0, 0);
+ video_stream_encoder_->WaitUntilTaskQueueIsIdle();
+
+ // Insert a first video frame. It should be dropped because of downscale in
+ // resolution.
+ int64_t timestamp_ms = CurrentTimeMs();
+ VideoFrame frame = CreateFrame(timestamp_ms, kFrameWidth, kFrameHeight);
+ frame.set_rotation(kVideoRotation_270);
+ video_source_.IncomingCapturedFrame(frame);
+
+ ExpectDroppedFrame();
+
+ // Second frame is downscaled.
+ timestamp_ms = CurrentTimeMs();
+ frame = CreateFrame(timestamp_ms, kFrameWidth / 2, kFrameHeight / 2);
+ frame.set_rotation(kVideoRotation_90);
+ video_source_.IncomingCapturedFrame(frame);
+
+ WaitForEncodedFrame(timestamp_ms);
+ sink_.CheckLastFrameRotationMatches(kVideoRotation_90);
+
+ // Insert another frame, also downscaled.
+ timestamp_ms = CurrentTimeMs();
+ frame = CreateFrame(timestamp_ms, kFrameWidth / 2, kFrameHeight / 2);
+ frame.set_rotation(kVideoRotation_180);
+ video_source_.IncomingCapturedFrame(frame);
+
+ WaitForEncodedFrame(timestamp_ms);
+ sink_.CheckLastFrameRotationMatches(kVideoRotation_180);
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest, BandwidthAllocationLowerBound) {
+ const int kFrameWidth = 320;
+ const int kFrameHeight = 180;
+
+ // Initial rate.
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ /*target_bitrate=*/DataRate::KilobitsPerSec(300),
+ /*stable_target_bitrate=*/DataRate::KilobitsPerSec(300),
+ /*link_allocation=*/DataRate::KilobitsPerSec(300),
+ /*fraction_lost=*/0,
+ /*round_trip_time_ms=*/0,
+ /*cwnd_reduce_ratio=*/0);
+
+ // Insert a first video frame so that encoder gets configured.
+ int64_t timestamp_ms = CurrentTimeMs();
+ VideoFrame frame = CreateFrame(timestamp_ms, kFrameWidth, kFrameHeight);
+ frame.set_rotation(kVideoRotation_270);
+ video_source_.IncomingCapturedFrame(frame);
+ WaitForEncodedFrame(timestamp_ms);
+
+ // Set a target rate below the minimum allowed by the codec settings.
+ VideoCodec codec_config = fake_encoder_.config();
+ DataRate min_rate = DataRate::KilobitsPerSec(codec_config.minBitrate);
+ DataRate target_rate = min_rate - DataRate::KilobitsPerSec(1);
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ /*target_bitrate=*/target_rate,
+ /*stable_target_bitrate=*/target_rate,
+ /*link_allocation=*/target_rate,
+ /*fraction_lost=*/0,
+ /*round_trip_time_ms=*/0,
+ /*cwnd_reduce_ratio=*/0);
+ video_stream_encoder_->WaitUntilTaskQueueIsIdle();
+
+ // Target bitrate and bandwidth allocation should both be capped at min_rate.
+ auto rate_settings = fake_encoder_.GetAndResetLastRateControlSettings();
+ ASSERT_TRUE(rate_settings.has_value());
+ DataRate allocation_sum =
+ DataRate::BitsPerSec(rate_settings->bitrate.get_sum_bps());
+ EXPECT_EQ(min_rate, allocation_sum);
+ EXPECT_EQ(rate_settings->bandwidth_allocation, min_rate);
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest, EncoderRatesPropagatedOnReconfigure) {
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kTargetBitrate, kTargetBitrate, kTargetBitrate, 0, 0, 0);
+ // Capture a frame and wait for it to synchronize with the encoder thread.
+ int64_t timestamp_ms = CurrentTimeMs();
+ video_source_.IncomingCapturedFrame(CreateFrame(timestamp_ms, nullptr));
+ WaitForEncodedFrame(1);
+
+ auto prev_rate_settings = fake_encoder_.GetAndResetLastRateControlSettings();
+ ASSERT_TRUE(prev_rate_settings.has_value());
+ EXPECT_EQ(static_cast<int>(prev_rate_settings->framerate_fps),
+ kDefaultFramerate);
+
+ // Send 1s of video to ensure the framerate is stable at kDefaultFramerate.
+ for (int i = 0; i < 2 * kDefaultFramerate; i++) {
+ timestamp_ms += 1000 / kDefaultFramerate;
+ video_source_.IncomingCapturedFrame(CreateFrame(timestamp_ms, nullptr));
+ WaitForEncodedFrame(timestamp_ms);
+ }
+ EXPECT_EQ(static_cast<int>(fake_encoder_.GetLastFramerate()),
+ kDefaultFramerate);
+ // Capture larger frame to trigger a reconfigure.
+ codec_height_ *= 2;
+ codec_width_ *= 2;
+ timestamp_ms += 1000 / kDefaultFramerate;
+ video_source_.IncomingCapturedFrame(CreateFrame(timestamp_ms, nullptr));
+ WaitForEncodedFrame(timestamp_ms);
+
+ EXPECT_EQ(2, sink_.number_of_reconfigurations());
+ auto current_rate_settings =
+ fake_encoder_.GetAndResetLastRateControlSettings();
+ // Ensure we have actually reconfigured twice
+ // The rate settings should have been set again even though
+ // they haven't changed.
+ ASSERT_TRUE(current_rate_settings.has_value());
+ EXPECT_EQ(prev_rate_settings, current_rate_settings);
+
+ video_stream_encoder_->Stop();
+}
+
+struct MockEncoderSwitchRequestCallback : public EncoderSwitchRequestCallback {
+ MOCK_METHOD(void, RequestEncoderFallback, (), (override));
+ MOCK_METHOD(void,
+ RequestEncoderSwitch,
+ (const webrtc::SdpVideoFormat& format,
+ bool allow_default_fallback),
+ (override));
+};
+
+TEST_F(VideoStreamEncoderTest, EncoderSelectorCurrentEncoderIsSignaled) {
+ constexpr int kDontCare = 100;
+ StrictMock<MockEncoderSelector> encoder_selector;
+ auto encoder_factory = std::make_unique<test::VideoEncoderProxyFactory>(
+ &fake_encoder_, &encoder_selector);
+ video_send_config_.encoder_settings.encoder_factory = encoder_factory.get();
+
+ // Reset encoder for new configuration to take effect.
+ ConfigureEncoder(video_encoder_config_.Copy());
+
+ EXPECT_CALL(encoder_selector, OnCurrentEncoder);
+
+ video_source_.IncomingCapturedFrame(
+ CreateFrame(kDontCare, kDontCare, kDontCare));
+ AdvanceTime(TimeDelta::Zero());
+ video_stream_encoder_->Stop();
+
+ // The encoders produced by the VideoEncoderProxyFactory have a pointer back
+ // to it's factory, so in order for the encoder instance in the
+ // `video_stream_encoder_` to be destroyed before the `encoder_factory` we
+ // reset the `video_stream_encoder_` here.
+ video_stream_encoder_.reset();
+}
+
+TEST_F(VideoStreamEncoderTest, EncoderSelectorBitrateSwitch) {
+ constexpr int kDontCare = 100;
+
+ NiceMock<MockEncoderSelector> encoder_selector;
+ StrictMock<MockEncoderSwitchRequestCallback> switch_callback;
+ video_send_config_.encoder_settings.encoder_switch_request_callback =
+ &switch_callback;
+ auto encoder_factory = std::make_unique<test::VideoEncoderProxyFactory>(
+ &fake_encoder_, &encoder_selector);
+ video_send_config_.encoder_settings.encoder_factory = encoder_factory.get();
+
+ // Reset encoder for new configuration to take effect.
+ ConfigureEncoder(video_encoder_config_.Copy());
+
+ ON_CALL(encoder_selector, OnAvailableBitrate)
+ .WillByDefault(Return(SdpVideoFormat("AV1")));
+ EXPECT_CALL(switch_callback,
+ RequestEncoderSwitch(Field(&SdpVideoFormat::name, "AV1"),
+ /*allow_default_fallback=*/false));
+
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ /*target_bitrate=*/DataRate::KilobitsPerSec(50),
+ /*stable_target_bitrate=*/DataRate::KilobitsPerSec(kDontCare),
+ /*link_allocation=*/DataRate::KilobitsPerSec(kDontCare),
+ /*fraction_lost=*/0,
+ /*round_trip_time_ms=*/0,
+ /*cwnd_reduce_ratio=*/0);
+ AdvanceTime(TimeDelta::Zero());
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest, EncoderSelectorResolutionSwitch) {
+ NiceMock<MockEncoderSelector> encoder_selector;
+ StrictMock<MockEncoderSwitchRequestCallback> switch_callback;
+ video_send_config_.encoder_settings.encoder_switch_request_callback =
+ &switch_callback;
+ auto encoder_factory = std::make_unique<test::VideoEncoderProxyFactory>(
+ &fake_encoder_, &encoder_selector);
+ video_send_config_.encoder_settings.encoder_factory = encoder_factory.get();
+
+ // Reset encoder for new configuration to take effect.
+ ConfigureEncoder(video_encoder_config_.Copy());
+
+ EXPECT_CALL(encoder_selector, OnResolutionChange(RenderResolution(640, 480)))
+ .WillOnce(Return(absl::nullopt));
+ EXPECT_CALL(encoder_selector, OnResolutionChange(RenderResolution(320, 240)))
+ .WillOnce(Return(SdpVideoFormat("AV1")));
+ EXPECT_CALL(switch_callback,
+ RequestEncoderSwitch(Field(&SdpVideoFormat::name, "AV1"),
+ /*allow_default_fallback=*/false));
+
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ /*target_bitrate=*/DataRate::KilobitsPerSec(800),
+ /*stable_target_bitrate=*/DataRate::KilobitsPerSec(1000),
+ /*link_allocation=*/DataRate::KilobitsPerSec(1000),
+ /*fraction_lost=*/0,
+ /*round_trip_time_ms=*/0,
+ /*cwnd_reduce_ratio=*/0);
+
+ video_source_.IncomingCapturedFrame(CreateFrame(1, 640, 480));
+ video_source_.IncomingCapturedFrame(CreateFrame(2, 640, 480));
+ video_source_.IncomingCapturedFrame(CreateFrame(3, 320, 240));
+
+ AdvanceTime(TimeDelta::Zero());
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest, EncoderSelectorBrokenEncoderSwitch) {
+ constexpr int kSufficientBitrateToNotDrop = 1000;
+ constexpr int kDontCare = 100;
+
+ NiceMock<MockVideoEncoder> video_encoder;
+ NiceMock<MockEncoderSelector> encoder_selector;
+ StrictMock<MockEncoderSwitchRequestCallback> switch_callback;
+ video_send_config_.encoder_settings.encoder_switch_request_callback =
+ &switch_callback;
+ auto encoder_factory = std::make_unique<test::VideoEncoderProxyFactory>(
+ &video_encoder, &encoder_selector);
+ video_send_config_.encoder_settings.encoder_factory = encoder_factory.get();
+
+ // Reset encoder for new configuration to take effect.
+ ConfigureEncoder(video_encoder_config_.Copy());
+
+ // The VideoStreamEncoder needs some bitrate before it can start encoding,
+ // setting some bitrate so that subsequent calls to WaitForEncodedFrame does
+ // not fail.
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ /*target_bitrate=*/DataRate::KilobitsPerSec(kSufficientBitrateToNotDrop),
+ /*stable_target_bitrate=*/
+ DataRate::KilobitsPerSec(kSufficientBitrateToNotDrop),
+ /*link_allocation=*/DataRate::KilobitsPerSec(kSufficientBitrateToNotDrop),
+ /*fraction_lost=*/0,
+ /*round_trip_time_ms=*/0,
+ /*cwnd_reduce_ratio=*/0);
+
+ ON_CALL(video_encoder, Encode)
+ .WillByDefault(Return(WEBRTC_VIDEO_CODEC_ENCODER_FAILURE));
+ ON_CALL(encoder_selector, OnEncoderBroken)
+ .WillByDefault(Return(SdpVideoFormat("AV2")));
+
+ rtc::Event encode_attempted;
+ EXPECT_CALL(switch_callback,
+ RequestEncoderSwitch(Field(&SdpVideoFormat::name, "AV2"),
+ /*allow_default_fallback=*/true))
+ .WillOnce([&encode_attempted]() { encode_attempted.Set(); });
+
+ video_source_.IncomingCapturedFrame(CreateFrame(1, kDontCare, kDontCare));
+ encode_attempted.Wait(TimeDelta::Seconds(3));
+
+ AdvanceTime(TimeDelta::Zero());
+
+ video_stream_encoder_->Stop();
+
+ // The encoders produced by the VideoEncoderProxyFactory have a pointer back
+ // to it's factory, so in order for the encoder instance in the
+ // `video_stream_encoder_` to be destroyed before the `encoder_factory` we
+ // reset the `video_stream_encoder_` here.
+ video_stream_encoder_.reset();
+}
+
+TEST_F(VideoStreamEncoderTest, SwitchEncoderOnInitFailureWithEncoderSelector) {
+ NiceMock<MockVideoEncoder> video_encoder;
+ NiceMock<MockEncoderSelector> encoder_selector;
+ StrictMock<MockEncoderSwitchRequestCallback> switch_callback;
+ video_send_config_.encoder_settings.encoder_switch_request_callback =
+ &switch_callback;
+ auto encoder_factory = std::make_unique<test::VideoEncoderProxyFactory>(
+ &video_encoder, &encoder_selector);
+ video_send_config_.encoder_settings.encoder_factory = encoder_factory.get();
+
+ // Reset encoder for new configuration to take effect.
+ ConfigureEncoder(video_encoder_config_.Copy());
+
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kTargetBitrate, kTargetBitrate, kTargetBitrate, /*fraction_lost=*/0,
+ /*round_trip_time_ms=*/0,
+ /*cwnd_reduce_ratio=*/0);
+ ASSERT_EQ(0, sink_.number_of_reconfigurations());
+
+ ON_CALL(video_encoder, InitEncode(_, _))
+ .WillByDefault(Return(WEBRTC_VIDEO_CODEC_ENCODER_FAILURE));
+ ON_CALL(encoder_selector, OnEncoderBroken)
+ .WillByDefault(Return(SdpVideoFormat("AV2")));
+
+ rtc::Event encode_attempted;
+ EXPECT_CALL(switch_callback,
+ RequestEncoderSwitch(Field(&SdpVideoFormat::name, "AV2"),
+ /*allow_default_fallback=*/true))
+ .WillOnce([&encode_attempted]() { encode_attempted.Set(); });
+
+ video_source_.IncomingCapturedFrame(CreateFrame(1, nullptr));
+ encode_attempted.Wait(TimeDelta::Seconds(3));
+
+ AdvanceTime(TimeDelta::Zero());
+
+ video_stream_encoder_->Stop();
+
+ // The encoders produced by the VideoEncoderProxyFactory have a pointer back
+ // to it's factory, so in order for the encoder instance in the
+ // `video_stream_encoder_` to be destroyed before the `encoder_factory` we
+ // reset the `video_stream_encoder_` here.
+ video_stream_encoder_.reset();
+}
+
+TEST_F(VideoStreamEncoderTest,
+ SwitchEncoderOnInitFailureWithoutEncoderSelector) {
+ NiceMock<MockVideoEncoder> video_encoder;
+ StrictMock<MockEncoderSwitchRequestCallback> switch_callback;
+ video_send_config_.encoder_settings.encoder_switch_request_callback =
+ &switch_callback;
+ auto encoder_factory = std::make_unique<test::VideoEncoderProxyFactory>(
+ &video_encoder, /*encoder_selector=*/nullptr);
+ video_send_config_.encoder_settings.encoder_factory = encoder_factory.get();
+
+ // Reset encoder for new configuration to take effect.
+ ConfigureEncoder(video_encoder_config_.Copy());
+
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kTargetBitrate, kTargetBitrate, kTargetBitrate, /*fraction_lost=*/0,
+ /*round_trip_time_ms=*/0,
+ /*cwnd_reduce_ratio=*/0);
+ ASSERT_EQ(0, sink_.number_of_reconfigurations());
+
+ ON_CALL(video_encoder, InitEncode(_, _))
+ .WillByDefault(Return(WEBRTC_VIDEO_CODEC_ENCODER_FAILURE));
+
+ rtc::Event encode_attempted;
+ EXPECT_CALL(switch_callback,
+ RequestEncoderSwitch(Field(&SdpVideoFormat::name, "VP8"),
+ /*allow_default_fallback=*/true))
+ .WillOnce([&encode_attempted]() { encode_attempted.Set(); });
+
+ video_source_.IncomingCapturedFrame(CreateFrame(1, nullptr));
+ encode_attempted.Wait(TimeDelta::Seconds(3));
+
+ AdvanceTime(TimeDelta::Zero());
+
+ video_stream_encoder_->Stop();
+
+ // The encoders produced by the VideoEncoderProxyFactory have a pointer back
+ // to it's factory, so in order for the encoder instance in the
+ // `video_stream_encoder_` to be destroyed before the `encoder_factory` we
+ // reset the `video_stream_encoder_` here.
+ video_stream_encoder_.reset();
+}
+
+TEST_F(VideoStreamEncoderTest, NullEncoderReturnSwitch) {
+ // As a variant of EncoderSelectorBrokenEncoderSwitch, when a null
+ // VideoEncoder is passed in encoder_factory, it checks whether
+ // Codec Switch occurs without a crash.
+ constexpr int kSufficientBitrateToNotDrop = 1000;
+ constexpr int kDontCare = 100;
+
+ NiceMock<MockEncoderSelector> encoder_selector;
+ StrictMock<MockEncoderSwitchRequestCallback> switch_callback;
+ video_send_config_.encoder_settings.encoder_switch_request_callback =
+ &switch_callback;
+ auto encoder_factory =
+ std::make_unique<test::VideoEncoderNullableProxyFactory>(
+ /*encoder=*/nullptr, &encoder_selector);
+ video_send_config_.encoder_settings.encoder_factory = encoder_factory.get();
+
+ // Reset encoder for new configuration to take effect.
+ ConfigureEncoder(video_encoder_config_.Copy());
+ // The VideoStreamEncoder needs some bitrate before it can start encoding,
+ // setting some bitrate so that subsequent calls to WaitForEncodedFrame does
+ // not fail.
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ /*target_bitrate=*/DataRate::KilobitsPerSec(kSufficientBitrateToNotDrop),
+ /*stable_target_bitrate=*/
+ DataRate::KilobitsPerSec(kSufficientBitrateToNotDrop),
+ /*link_allocation=*/DataRate::KilobitsPerSec(kSufficientBitrateToNotDrop),
+ /*fraction_lost=*/0,
+ /*round_trip_time_ms=*/0,
+ /*cwnd_reduce_ratio=*/0);
+ ON_CALL(encoder_selector, OnEncoderBroken)
+ .WillByDefault(Return(SdpVideoFormat("AV2")));
+ rtc::Event encode_attempted;
+ EXPECT_CALL(switch_callback,
+ RequestEncoderSwitch(Field(&SdpVideoFormat::name, "AV2"),
+ /*allow_default_fallback=*/_))
+ .WillOnce([&encode_attempted]() { encode_attempted.Set(); });
+
+ video_source_.IncomingCapturedFrame(CreateFrame(1, kDontCare, kDontCare));
+ encode_attempted.Wait(TimeDelta::Seconds(3));
+
+ AdvanceTime(TimeDelta::Zero());
+
+ video_stream_encoder_->Stop();
+
+ // The encoders produced by the VideoEncoderProxyFactory have a pointer back
+ // to it's factory, so in order for the encoder instance in the
+ // `video_stream_encoder_` to be destroyed before the `encoder_factory` we
+ // reset the `video_stream_encoder_` here.
+ video_stream_encoder_.reset();
+}
+
+TEST_F(VideoStreamEncoderTest,
+ AllocationPropagatedToEncoderWhenTargetRateChanged) {
+ const int kFrameWidth = 320;
+ const int kFrameHeight = 180;
+
+ // Set initial rate.
+ auto rate = DataRate::KilobitsPerSec(100);
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ /*target_bitrate=*/rate,
+ /*stable_target_bitrate=*/rate,
+ /*link_allocation=*/rate,
+ /*fraction_lost=*/0,
+ /*round_trip_time_ms=*/0,
+ /*cwnd_reduce_ratio=*/0);
+
+ // Insert a first video frame so that encoder gets configured.
+ int64_t timestamp_ms = CurrentTimeMs();
+ VideoFrame frame = CreateFrame(timestamp_ms, kFrameWidth, kFrameHeight);
+ frame.set_rotation(kVideoRotation_270);
+ video_source_.IncomingCapturedFrame(frame);
+ WaitForEncodedFrame(timestamp_ms);
+ EXPECT_EQ(1, fake_encoder_.GetNumSetRates());
+
+ // Change of target bitrate propagates to the encoder.
+ auto new_stable_rate = rate - DataRate::KilobitsPerSec(5);
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ /*target_bitrate=*/new_stable_rate,
+ /*stable_target_bitrate=*/new_stable_rate,
+ /*link_allocation=*/rate,
+ /*fraction_lost=*/0,
+ /*round_trip_time_ms=*/0,
+ /*cwnd_reduce_ratio=*/0);
+ video_stream_encoder_->WaitUntilTaskQueueIsIdle();
+ EXPECT_EQ(2, fake_encoder_.GetNumSetRates());
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest,
+ AllocationNotPropagatedToEncoderWhenTargetRateUnchanged) {
+ const int kFrameWidth = 320;
+ const int kFrameHeight = 180;
+
+ // Set initial rate.
+ auto rate = DataRate::KilobitsPerSec(100);
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ /*target_bitrate=*/rate,
+ /*stable_target_bitrate=*/rate,
+ /*link_allocation=*/rate,
+ /*fraction_lost=*/0,
+ /*round_trip_time_ms=*/0,
+ /*cwnd_reduce_ratio=*/0);
+
+ // Insert a first video frame so that encoder gets configured.
+ int64_t timestamp_ms = CurrentTimeMs();
+ VideoFrame frame = CreateFrame(timestamp_ms, kFrameWidth, kFrameHeight);
+ frame.set_rotation(kVideoRotation_270);
+ video_source_.IncomingCapturedFrame(frame);
+ WaitForEncodedFrame(timestamp_ms);
+ EXPECT_EQ(1, fake_encoder_.GetNumSetRates());
+
+ // Set a higher target rate without changing the link_allocation. Should not
+ // reset encoder's rate.
+ auto new_stable_rate = rate - DataRate::KilobitsPerSec(5);
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ /*target_bitrate=*/rate,
+ /*stable_target_bitrate=*/new_stable_rate,
+ /*link_allocation=*/rate,
+ /*fraction_lost=*/0,
+ /*round_trip_time_ms=*/0,
+ /*cwnd_reduce_ratio=*/0);
+ video_stream_encoder_->WaitUntilTaskQueueIsIdle();
+ EXPECT_EQ(1, fake_encoder_.GetNumSetRates());
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest, AutomaticAnimationDetection) {
+ test::ScopedKeyValueConfig field_trials(
+ field_trials_,
+ "WebRTC-AutomaticAnimationDetectionScreenshare/"
+ "enabled:true,min_fps:20,min_duration_ms:1000,min_area_ratio:0.8/");
+ const int kFramerateFps = 30;
+ const int kWidth = 1920;
+ const int kHeight = 1080;
+ const int kNumFrames = 2 * kFramerateFps; // >1 seconds of frames.
+ // Works on screenshare mode.
+ ResetEncoder("VP8", 1, 1, 1, /*screenshare*/ true);
+ // We rely on the automatic resolution adaptation, but we handle framerate
+ // adaptation manually by mocking the stats proxy.
+ video_source_.set_adaptation_enabled(true);
+
+ // BALANCED degradation preference is required for this feature.
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kTargetBitrate, kTargetBitrate, kTargetBitrate, 0, 0, 0);
+ video_stream_encoder_->SetSource(&video_source_,
+ webrtc::DegradationPreference::BALANCED);
+ EXPECT_THAT(video_source_.sink_wants(), UnlimitedSinkWants());
+
+ VideoFrame frame = CreateFrame(1, kWidth, kHeight);
+ frame.set_update_rect(VideoFrame::UpdateRect{0, 0, kWidth, kHeight});
+
+ // Pass enough frames with the full update to trigger animation detection.
+ for (int i = 0; i < kNumFrames; ++i) {
+ int64_t timestamp_ms = CurrentTimeMs();
+ frame.set_ntp_time_ms(timestamp_ms);
+ frame.set_timestamp_us(timestamp_ms * 1000);
+ video_source_.IncomingCapturedFrame(frame);
+ WaitForEncodedFrame(timestamp_ms);
+ }
+
+ // Resolution should be limited.
+ rtc::VideoSinkWants expected;
+ expected.max_framerate_fps = kFramerateFps;
+ expected.max_pixel_count = 1280 * 720 + 1;
+ EXPECT_THAT(video_source_.sink_wants(), FpsEqResolutionLt(expected));
+
+ // Pass one frame with no known update.
+ // Resolution cap should be removed immediately.
+ int64_t timestamp_ms = CurrentTimeMs();
+ frame.set_ntp_time_ms(timestamp_ms);
+ frame.set_timestamp_us(timestamp_ms * 1000);
+ frame.clear_update_rect();
+
+ video_source_.IncomingCapturedFrame(frame);
+ WaitForEncodedFrame(timestamp_ms);
+
+ // Resolution should be unlimited now.
+ EXPECT_THAT(video_source_.sink_wants(),
+ FpsMatchesResolutionMax(Eq(kFramerateFps)));
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest, ConfiguresVp9SvcAtOddResolutions) {
+ const int kWidth = 720; // 540p adapted down.
+ const int kHeight = 405;
+ const int kNumFrames = 3;
+ // Works on screenshare mode.
+ ResetEncoder("VP9", /*num_streams=*/1, /*num_temporal_layers=*/1,
+ /*num_spatial_layers=*/2, /*screenshare=*/true);
+
+ video_source_.set_adaptation_enabled(true);
+
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kTargetBitrate, kTargetBitrate, kTargetBitrate, 0, 0, 0);
+
+ VideoFrame frame = CreateFrame(1, kWidth, kHeight);
+
+ // Pass enough frames with the full update to trigger animation detection.
+ for (int i = 0; i < kNumFrames; ++i) {
+ int64_t timestamp_ms = CurrentTimeMs();
+ frame.set_ntp_time_ms(timestamp_ms);
+ frame.set_timestamp_us(timestamp_ms * 1000);
+ video_source_.IncomingCapturedFrame(frame);
+ WaitForEncodedFrame(timestamp_ms);
+ }
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest, EncoderResetAccordingToParameterChange) {
+ const float downscale_factors[] = {4.0, 2.0, 1.0};
+ const int number_layers =
+ sizeof(downscale_factors) / sizeof(downscale_factors[0]);
+ VideoEncoderConfig config;
+ webrtc::VideoEncoder::EncoderInfo encoder_info;
+ test::FillEncoderConfiguration(kVideoCodecVP8, number_layers, &config);
+ for (int i = 0; i < number_layers; ++i) {
+ config.simulcast_layers[i].scale_resolution_down_by = downscale_factors[i];
+ config.simulcast_layers[i].active = true;
+ }
+ config.video_stream_factory =
+ rtc::make_ref_counted<cricket::EncoderStreamFactory>(
+ "VP8", /*max qp*/ 56, /*screencast*/ false,
+ /*screenshare enabled*/ false, encoder_info);
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kSimulcastTargetBitrate, kSimulcastTargetBitrate, kSimulcastTargetBitrate,
+ 0, 0, 0);
+
+ // First initialization.
+ // Encoder should be initialized. Next frame should be key frame.
+ video_stream_encoder_->ConfigureEncoder(config.Copy(), kMaxPayloadLength);
+ sink_.SetNumExpectedLayers(number_layers);
+ int64_t timestamp_ms = kFrameIntervalMs;
+ video_source_.IncomingCapturedFrame(CreateFrame(timestamp_ms, 1280, 720));
+ WaitForEncodedFrame(timestamp_ms);
+ EXPECT_EQ(1, fake_encoder_.GetNumInitializations());
+ EXPECT_THAT(fake_encoder_.LastFrameTypes(),
+ ::testing::ElementsAreArray({VideoFrameType::kVideoFrameKey,
+ VideoFrameType::kVideoFrameKey,
+ VideoFrameType::kVideoFrameKey}));
+
+ // Disable top layer.
+ // Encoder shouldn't be re-initialized. Next frame should be delta frame.
+ config.simulcast_layers[number_layers - 1].active = false;
+ video_stream_encoder_->ConfigureEncoder(config.Copy(), kMaxPayloadLength);
+ sink_.SetNumExpectedLayers(number_layers - 1);
+ timestamp_ms += kFrameIntervalMs;
+ video_source_.IncomingCapturedFrame(CreateFrame(timestamp_ms, 1280, 720));
+ WaitForEncodedFrame(timestamp_ms);
+ EXPECT_EQ(1, fake_encoder_.GetNumInitializations());
+ EXPECT_THAT(fake_encoder_.LastFrameTypes(),
+ ::testing::ElementsAreArray({VideoFrameType::kVideoFrameDelta,
+ VideoFrameType::kVideoFrameDelta,
+ VideoFrameType::kVideoFrameDelta}));
+
+ // Re-enable top layer.
+ // Encoder should be re-initialized. Next frame should be key frame.
+ config.simulcast_layers[number_layers - 1].active = true;
+ video_stream_encoder_->ConfigureEncoder(config.Copy(), kMaxPayloadLength);
+ sink_.SetNumExpectedLayers(number_layers);
+ timestamp_ms += kFrameIntervalMs;
+ video_source_.IncomingCapturedFrame(CreateFrame(timestamp_ms, 1280, 720));
+ WaitForEncodedFrame(timestamp_ms);
+ EXPECT_EQ(2, fake_encoder_.GetNumInitializations());
+ EXPECT_THAT(fake_encoder_.LastFrameTypes(),
+ ::testing::ElementsAreArray({VideoFrameType::kVideoFrameKey,
+ VideoFrameType::kVideoFrameKey,
+ VideoFrameType::kVideoFrameKey}));
+
+ // Top layer max rate change.
+ // Encoder shouldn't be re-initialized. Next frame should be delta frame.
+ config.simulcast_layers[number_layers - 1].max_bitrate_bps -= 100;
+ video_stream_encoder_->ConfigureEncoder(config.Copy(), kMaxPayloadLength);
+ sink_.SetNumExpectedLayers(number_layers);
+ timestamp_ms += kFrameIntervalMs;
+ video_source_.IncomingCapturedFrame(CreateFrame(timestamp_ms, 1280, 720));
+ WaitForEncodedFrame(timestamp_ms);
+ EXPECT_EQ(2, fake_encoder_.GetNumInitializations());
+ EXPECT_THAT(fake_encoder_.LastFrameTypes(),
+ ::testing::ElementsAreArray({VideoFrameType::kVideoFrameDelta,
+ VideoFrameType::kVideoFrameDelta,
+ VideoFrameType::kVideoFrameDelta}));
+
+ // Top layer resolution change.
+ // Encoder should be re-initialized. Next frame should be key frame.
+ config.simulcast_layers[number_layers - 1].scale_resolution_down_by += 0.1;
+ video_stream_encoder_->ConfigureEncoder(config.Copy(), kMaxPayloadLength);
+ sink_.SetNumExpectedLayers(number_layers);
+ timestamp_ms += kFrameIntervalMs;
+ video_source_.IncomingCapturedFrame(CreateFrame(timestamp_ms, 1280, 720));
+ WaitForEncodedFrame(timestamp_ms);
+ EXPECT_EQ(3, fake_encoder_.GetNumInitializations());
+ EXPECT_THAT(fake_encoder_.LastFrameTypes(),
+ ::testing::ElementsAreArray({VideoFrameType::kVideoFrameKey,
+ VideoFrameType::kVideoFrameKey,
+ VideoFrameType::kVideoFrameKey}));
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest, EncoderResolutionsExposedInSinglecast) {
+ const int kFrameWidth = 1280;
+ const int kFrameHeight = 720;
+
+ SetUp();
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kTargetBitrate, kTargetBitrate, kTargetBitrate, 0, 0, 0);
+
+ // Capturing a frame should reconfigure the encoder and expose the encoder
+ // resolution, which is the same as the input frame.
+ int64_t timestamp_ms = kFrameIntervalMs;
+ video_source_.IncomingCapturedFrame(
+ CreateFrame(timestamp_ms, kFrameWidth, kFrameHeight));
+ WaitForEncodedFrame(timestamp_ms);
+ video_stream_encoder_->WaitUntilTaskQueueIsIdle();
+ EXPECT_THAT(video_source_.sink_wants().resolutions,
+ ::testing::ElementsAreArray(
+ {rtc::VideoSinkWants::FrameSize(kFrameWidth, kFrameHeight)}));
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest, EncoderResolutionsExposedInSimulcast) {
+ // Pick downscale factors such that we never encode at full resolution - this
+ // is an interesting use case. The frame resolution influences the encoder
+ // resolutions, but if no layer has `scale_resolution_down_by` == 1 then the
+ // encoder should not ask for the frame resolution. This allows video frames
+ // to have the appearence of one resolution but optimize its internal buffers
+ // for what is actually encoded.
+ const size_t kNumSimulcastLayers = 3u;
+ const float kDownscaleFactors[] = {8.0, 4.0, 2.0};
+ const int kFrameWidth = 1280;
+ const int kFrameHeight = 720;
+ const rtc::VideoSinkWants::FrameSize kLayer0Size(
+ kFrameWidth / kDownscaleFactors[0], kFrameHeight / kDownscaleFactors[0]);
+ const rtc::VideoSinkWants::FrameSize kLayer1Size(
+ kFrameWidth / kDownscaleFactors[1], kFrameHeight / kDownscaleFactors[1]);
+ const rtc::VideoSinkWants::FrameSize kLayer2Size(
+ kFrameWidth / kDownscaleFactors[2], kFrameHeight / kDownscaleFactors[2]);
+
+ VideoEncoderConfig config;
+ webrtc::VideoEncoder::EncoderInfo encoder_info;
+ test::FillEncoderConfiguration(kVideoCodecVP8, kNumSimulcastLayers, &config);
+ for (size_t i = 0; i < kNumSimulcastLayers; ++i) {
+ config.simulcast_layers[i].scale_resolution_down_by = kDownscaleFactors[i];
+ config.simulcast_layers[i].active = true;
+ }
+ config.video_stream_factory =
+ rtc::make_ref_counted<cricket::EncoderStreamFactory>(
+ "VP8", /*max qp*/ 56, /*screencast*/ false,
+ /*screenshare enabled*/ false, encoder_info);
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kSimulcastTargetBitrate, kSimulcastTargetBitrate, kSimulcastTargetBitrate,
+ 0, 0, 0);
+
+ // Capture a frame with all layers active.
+ int64_t timestamp_ms = kFrameIntervalMs;
+ sink_.SetNumExpectedLayers(kNumSimulcastLayers);
+ video_stream_encoder_->ConfigureEncoder(config.Copy(), kMaxPayloadLength);
+ video_source_.IncomingCapturedFrame(
+ CreateFrame(timestamp_ms, kFrameWidth, kFrameHeight));
+ WaitForEncodedFrame(timestamp_ms);
+ // Expect encoded resolutions to match the expected simulcast layers.
+ video_stream_encoder_->WaitUntilTaskQueueIsIdle();
+ EXPECT_THAT(
+ video_source_.sink_wants().resolutions,
+ ::testing::ElementsAreArray({kLayer0Size, kLayer1Size, kLayer2Size}));
+
+ // Capture a frame with one of the layers inactive.
+ timestamp_ms += kFrameIntervalMs;
+ config.simulcast_layers[2].active = false;
+ sink_.SetNumExpectedLayers(kNumSimulcastLayers - 1);
+ video_stream_encoder_->ConfigureEncoder(config.Copy(), kMaxPayloadLength);
+ video_source_.IncomingCapturedFrame(
+ CreateFrame(timestamp_ms, kFrameWidth, kFrameHeight));
+ WaitForEncodedFrame(timestamp_ms);
+
+ // Expect encoded resolutions to match the expected simulcast layers.
+ video_stream_encoder_->WaitUntilTaskQueueIsIdle();
+ EXPECT_THAT(video_source_.sink_wants().resolutions,
+ ::testing::ElementsAreArray({kLayer0Size, kLayer1Size}));
+
+ // Capture a frame with all but one layer turned off.
+ timestamp_ms += kFrameIntervalMs;
+ config.simulcast_layers[1].active = false;
+ sink_.SetNumExpectedLayers(kNumSimulcastLayers - 2);
+ video_stream_encoder_->ConfigureEncoder(config.Copy(), kMaxPayloadLength);
+ video_source_.IncomingCapturedFrame(
+ CreateFrame(timestamp_ms, kFrameWidth, kFrameHeight));
+ WaitForEncodedFrame(timestamp_ms);
+
+ // Expect encoded resolutions to match the expected simulcast layers.
+ video_stream_encoder_->WaitUntilTaskQueueIsIdle();
+ EXPECT_THAT(video_source_.sink_wants().resolutions,
+ ::testing::ElementsAreArray({kLayer0Size}));
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest, QpPresent_QpKept) {
+ ResetEncoder("VP8", 1, 1, 1, false);
+
+ // Force encoder reconfig.
+ video_source_.IncomingCapturedFrame(
+ CreateFrame(1, codec_width_, codec_height_));
+ video_stream_encoder_->WaitUntilTaskQueueIsIdle();
+
+ // Set QP on encoded frame and pass the frame to encode complete callback.
+ // Since QP is present QP parsing won't be triggered and the original value
+ // should be kept.
+ EncodedImage encoded_image;
+ encoded_image.qp_ = 123;
+ encoded_image.SetEncodedData(EncodedImageBuffer::Create(
+ kCodedFrameVp8Qp25, sizeof(kCodedFrameVp8Qp25)));
+ CodecSpecificInfo codec_info;
+ codec_info.codecType = kVideoCodecVP8;
+ fake_encoder_.InjectEncodedImage(encoded_image, &codec_info);
+ EXPECT_TRUE(sink_.WaitForFrame(kDefaultTimeout));
+ EXPECT_EQ(sink_.GetLastEncodedImage().qp_, 123);
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest, QpAbsent_QpParsed) {
+ ResetEncoder("VP8", 1, 1, 1, false);
+
+ // Force encoder reconfig.
+ video_source_.IncomingCapturedFrame(
+ CreateFrame(1, codec_width_, codec_height_));
+ video_stream_encoder_->WaitUntilTaskQueueIsIdle();
+
+ // Pass an encoded frame without QP to encode complete callback. QP should be
+ // parsed and set.
+ EncodedImage encoded_image;
+ encoded_image.qp_ = -1;
+ encoded_image.SetEncodedData(EncodedImageBuffer::Create(
+ kCodedFrameVp8Qp25, sizeof(kCodedFrameVp8Qp25)));
+ CodecSpecificInfo codec_info;
+ codec_info.codecType = kVideoCodecVP8;
+ fake_encoder_.InjectEncodedImage(encoded_image, &codec_info);
+ EXPECT_TRUE(sink_.WaitForFrame(kDefaultTimeout));
+ EXPECT_EQ(sink_.GetLastEncodedImage().qp_, 25);
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest, QpAbsentParsingDisabled_QpAbsent) {
+ webrtc::test::ScopedKeyValueConfig field_trials(
+ field_trials_, "WebRTC-QpParsingKillSwitch/Enabled/");
+
+ ResetEncoder("VP8", 1, 1, 1, false);
+
+ // Force encoder reconfig.
+ video_source_.IncomingCapturedFrame(
+ CreateFrame(1, codec_width_, codec_height_));
+ video_stream_encoder_->WaitUntilTaskQueueIsIdle();
+
+ EncodedImage encoded_image;
+ encoded_image.qp_ = -1;
+ encoded_image.SetEncodedData(EncodedImageBuffer::Create(
+ kCodedFrameVp8Qp25, sizeof(kCodedFrameVp8Qp25)));
+ CodecSpecificInfo codec_info;
+ codec_info.codecType = kVideoCodecVP8;
+ fake_encoder_.InjectEncodedImage(encoded_image, &codec_info);
+ EXPECT_TRUE(sink_.WaitForFrame(kDefaultTimeout));
+ EXPECT_EQ(sink_.GetLastEncodedImage().qp_, -1);
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest,
+ QualityScalingNotAllowed_QualityScalingDisabled) {
+ VideoEncoderConfig video_encoder_config = video_encoder_config_.Copy();
+
+ // Disable scaling settings in encoder info.
+ fake_encoder_.SetQualityScaling(false);
+ // Disable quality scaling in encoder config.
+ video_encoder_config.is_quality_scaling_allowed = false;
+ ConfigureEncoder(std::move(video_encoder_config));
+
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kTargetBitrate, kTargetBitrate, kTargetBitrate, 0, 0, 0);
+
+ test::FrameForwarder source;
+ video_stream_encoder_->SetSource(
+ &source, webrtc::DegradationPreference::MAINTAIN_FRAMERATE);
+ EXPECT_THAT(source.sink_wants(), UnlimitedSinkWants());
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution);
+
+ source.IncomingCapturedFrame(CreateFrame(1, 1280, 720));
+ WaitForEncodedFrame(1);
+ video_stream_encoder_->TriggerQualityLow();
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution);
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest, QualityScalingNotAllowed_IsQpTrustedSetTrue) {
+ VideoEncoderConfig video_encoder_config = video_encoder_config_.Copy();
+
+ // Disable scaling settings in encoder info.
+ fake_encoder_.SetQualityScaling(false);
+ // Set QP trusted in encoder info.
+ fake_encoder_.SetIsQpTrusted(true);
+ // Enable quality scaling in encoder config.
+ video_encoder_config.is_quality_scaling_allowed = false;
+ ConfigureEncoder(std::move(video_encoder_config));
+
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kTargetBitrate, kTargetBitrate, kTargetBitrate, 0, 0, 0);
+
+ test::FrameForwarder source;
+ video_stream_encoder_->SetSource(
+ &source, webrtc::DegradationPreference::MAINTAIN_FRAMERATE);
+ EXPECT_THAT(source.sink_wants(), UnlimitedSinkWants());
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution);
+
+ source.IncomingCapturedFrame(CreateFrame(1, 1280, 720));
+ WaitForEncodedFrame(1);
+ video_stream_encoder_->TriggerQualityLow();
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution);
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest,
+ QualityScalingNotAllowedAndQPIsTrusted_BandwidthScalerDisable) {
+ VideoEncoderConfig video_encoder_config = video_encoder_config_.Copy();
+
+ // Disable scaling settings in encoder info.
+ fake_encoder_.SetQualityScaling(false);
+ // Set QP trusted in encoder info.
+ fake_encoder_.SetIsQpTrusted(true);
+ // Enable quality scaling in encoder config.
+ video_encoder_config.is_quality_scaling_allowed = false;
+ ConfigureEncoder(std::move(video_encoder_config));
+
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kTargetBitrate, kTargetBitrate, kTargetBitrate, 0, 0, 0);
+
+ test::FrameForwarder source;
+ video_stream_encoder_->SetSource(
+ &source, webrtc::DegradationPreference::MAINTAIN_FRAMERATE);
+ EXPECT_THAT(source.sink_wants(), UnlimitedSinkWants());
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution);
+
+ source.IncomingCapturedFrame(CreateFrame(1, 1280, 720));
+ WaitForEncodedFrame(1);
+ video_stream_encoder_->TriggerQualityLow();
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution);
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest,
+ QualityScalingNotAllowedAndQPIsNotTrusted_BandwidthScalerDisable) {
+ VideoEncoderConfig video_encoder_config = video_encoder_config_.Copy();
+
+ // Disable scaling settings in encoder info.
+ fake_encoder_.SetQualityScaling(false);
+ // Set QP trusted in encoder info.
+ fake_encoder_.SetIsQpTrusted(false);
+ // Enable quality scaling in encoder config.
+ video_encoder_config.is_quality_scaling_allowed = false;
+ ConfigureEncoder(std::move(video_encoder_config));
+
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kTargetBitrate, kTargetBitrate, kTargetBitrate, 0, 0, 0);
+
+ test::FrameForwarder source;
+ video_stream_encoder_->SetSource(
+ &source, webrtc::DegradationPreference::MAINTAIN_FRAMERATE);
+ EXPECT_THAT(source.sink_wants(), UnlimitedSinkWants());
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution);
+
+ source.IncomingCapturedFrame(CreateFrame(1, 1280, 720));
+ WaitForEncodedFrame(1);
+ video_stream_encoder_->TriggerQualityLow();
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution);
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest, EncoderProvideLimitsWhenQPIsNotTrusted) {
+ // Set QP trusted in encoder info.
+ fake_encoder_.SetIsQpTrusted(false);
+
+ const int MinEncBitrateKbps = 30;
+ const int MaxEncBitrateKbps = 100;
+ const int MinStartBitrateKbp = 50;
+ const VideoEncoder::ResolutionBitrateLimits encoder_bitrate_limits(
+ /*frame_size_pixels=*/codec_width_ * codec_height_,
+ /*min_start_bitrate_bps=*/MinStartBitrateKbp,
+ /*min_bitrate_bps=*/MinEncBitrateKbps * 1000,
+ /*max_bitrate_bps=*/MaxEncBitrateKbps * 1000);
+
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kTargetBitrate, kTargetBitrate, kTargetBitrate, 0, 0, 0);
+
+ fake_encoder_.SetResolutionBitrateLimits({encoder_bitrate_limits});
+
+ VideoEncoderConfig video_encoder_config;
+ test::FillEncoderConfiguration(kVideoCodecH264, 1, &video_encoder_config);
+ video_encoder_config.max_bitrate_bps = MaxEncBitrateKbps * 1000;
+ video_encoder_config.simulcast_layers[0].min_bitrate_bps =
+ MinEncBitrateKbps * 1000;
+ video_stream_encoder_->ConfigureEncoder(video_encoder_config.Copy(),
+ kMaxPayloadLength);
+
+ video_source_.IncomingCapturedFrame(CreateFrame(1, nullptr));
+ WaitForEncodedFrame(1);
+ EXPECT_EQ(
+ MaxEncBitrateKbps,
+ static_cast<int>(bitrate_allocator_factory_.codec_config().maxBitrate));
+ EXPECT_EQ(
+ MinEncBitrateKbps,
+ static_cast<int>(bitrate_allocator_factory_.codec_config().minBitrate));
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest, EncoderDoesnotProvideLimitsWhenQPIsNotTrusted) {
+ // Set QP trusted in encoder info.
+ fake_encoder_.SetIsQpTrusted(false);
+
+ absl::optional<VideoEncoder::ResolutionBitrateLimits> suitable_bitrate_limit =
+ EncoderInfoSettings::
+ GetSinglecastBitrateLimitForResolutionWhenQpIsUntrusted(
+ codec_width_ * codec_height_,
+ EncoderInfoSettings::
+ GetDefaultSinglecastBitrateLimitsWhenQpIsUntrusted());
+ EXPECT_TRUE(suitable_bitrate_limit.has_value());
+
+ const int MaxEncBitrate = suitable_bitrate_limit->max_bitrate_bps;
+ const int MinEncBitrate = suitable_bitrate_limit->min_bitrate_bps;
+ const int TargetEncBitrate = MaxEncBitrate;
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ DataRate::BitsPerSec(TargetEncBitrate),
+ DataRate::BitsPerSec(TargetEncBitrate),
+ DataRate::BitsPerSec(TargetEncBitrate), 0, 0, 0);
+
+ VideoEncoderConfig video_encoder_config;
+ test::FillEncoderConfiguration(kVideoCodecH264, 1, &video_encoder_config);
+ video_encoder_config.max_bitrate_bps = MaxEncBitrate;
+ video_encoder_config.simulcast_layers[0].min_bitrate_bps = MinEncBitrate;
+ video_stream_encoder_->ConfigureEncoder(video_encoder_config.Copy(),
+ kMaxPayloadLength);
+
+ video_source_.IncomingCapturedFrame(CreateFrame(1, nullptr));
+ WaitForEncodedFrame(1);
+ EXPECT_EQ(
+ MaxEncBitrate / 1000,
+ static_cast<int>(bitrate_allocator_factory_.codec_config().maxBitrate));
+ EXPECT_EQ(
+ MinEncBitrate / 1000,
+ static_cast<int>(bitrate_allocator_factory_.codec_config().minBitrate));
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest, NormalComplexityWithMoreThanTwoCores) {
+ ResetEncoder("VP9", /*num_stream=*/1, /*num_temporal_layers=*/1,
+ /*num_spatial_layers=*/1,
+ /*screenshare=*/false, /*allocation_callback_type=*/
+ VideoStreamEncoder::BitrateAllocationCallbackType::
+ kVideoBitrateAllocationWhenScreenSharing,
+ /*num_cores=*/3);
+
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kTargetBitrate, kTargetBitrate, kTargetBitrate, 0, 0, 0);
+ video_source_.IncomingCapturedFrame(
+ CreateFrame(1, /*width=*/320, /*height=*/180));
+ WaitForEncodedFrame(1);
+ EXPECT_EQ(fake_encoder_.LastEncoderComplexity(),
+ VideoCodecComplexity::kComplexityNormal);
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest,
+ NormalComplexityWhenLowTierOptimizationsAreDisabled) {
+ webrtc::test::ScopedKeyValueConfig field_trials(
+ field_trials_, "WebRTC-VP9-LowTierOptimizations/Disabled/");
+
+ ResetEncoder("VP9", /*num_stream=*/1, /*num_temporal_layers=*/1,
+ /*num_spatial_layers=*/1,
+ /*screenshare=*/false, /*allocation_callback_type=*/
+ VideoStreamEncoder::BitrateAllocationCallbackType::
+ kVideoBitrateAllocationWhenScreenSharing,
+ /*num_cores=*/2);
+
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kTargetBitrate, kTargetBitrate, kTargetBitrate, 0, 0, 0);
+ video_source_.IncomingCapturedFrame(
+ CreateFrame(1, /*width=*/320, /*height=*/180));
+ WaitForEncodedFrame(1);
+ EXPECT_EQ(fake_encoder_.LastEncoderComplexity(),
+ VideoCodecComplexity::kComplexityNormal);
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest, LowComplexityWithTwoCores) {
+ ResetEncoder("VP9", /*num_stream=*/1, /*num_temporal_layers=*/1,
+ /*num_spatial_layers=*/1,
+ /*screenshare=*/false, /*allocation_callback_type=*/
+ VideoStreamEncoder::BitrateAllocationCallbackType::
+ kVideoBitrateAllocationWhenScreenSharing,
+ /*num_cores=*/2);
+
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kTargetBitrate, kTargetBitrate, kTargetBitrate, 0, 0, 0);
+ video_source_.IncomingCapturedFrame(
+ CreateFrame(1, /*width=*/320, /*height=*/180));
+ WaitForEncodedFrame(1);
+ EXPECT_EQ(fake_encoder_.LastEncoderComplexity(),
+ VideoCodecComplexity::kComplexityLow);
+ video_stream_encoder_->Stop();
+}
+
+#if !defined(WEBRTC_IOS)
+// TODO(bugs.webrtc.org/12401): Disabled because WebRTC-Video-QualityScaling is
+// disabled by default on iOS.
+TEST_F(VideoStreamEncoderTest, QualityScalingAllowed_QualityScalingEnabled) {
+ VideoEncoderConfig video_encoder_config = video_encoder_config_.Copy();
+
+ // Disable scaling settings in encoder info.
+ fake_encoder_.SetQualityScaling(false);
+ // Enable quality scaling in encoder config.
+ video_encoder_config.is_quality_scaling_allowed = true;
+ ConfigureEncoder(std::move(video_encoder_config));
+
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kTargetBitrate, kTargetBitrate, kTargetBitrate, 0, 0, 0);
+
+ test::FrameForwarder source;
+ video_stream_encoder_->SetSource(
+ &source, webrtc::DegradationPreference::MAINTAIN_FRAMERATE);
+ EXPECT_THAT(source.sink_wants(), UnlimitedSinkWants());
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution);
+
+ source.IncomingCapturedFrame(CreateFrame(1, 1280, 720));
+ WaitForEncodedFrame(1);
+ video_stream_encoder_->TriggerQualityLow();
+ EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution);
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest, QualityScalingAllowed_IsQpTrustedSetTrue) {
+ VideoEncoderConfig video_encoder_config = video_encoder_config_.Copy();
+
+ // Disable scaling settings in encoder info.
+ fake_encoder_.SetQualityScaling(false);
+ // Set QP trusted in encoder info.
+ fake_encoder_.SetIsQpTrusted(true);
+ // Enable quality scaling in encoder config.
+ video_encoder_config.is_quality_scaling_allowed = true;
+ ConfigureEncoder(std::move(video_encoder_config));
+
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kTargetBitrate, kTargetBitrate, kTargetBitrate, 0, 0, 0);
+
+ test::FrameForwarder source;
+ video_stream_encoder_->SetSource(
+ &source, webrtc::DegradationPreference::MAINTAIN_FRAMERATE);
+ EXPECT_THAT(source.sink_wants(), UnlimitedSinkWants());
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution);
+
+ source.IncomingCapturedFrame(CreateFrame(1, 1280, 720));
+ WaitForEncodedFrame(1);
+ video_stream_encoder_->TriggerQualityLow();
+ EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution);
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest, QualityScalingAllowed_IsQpTrustedSetFalse) {
+ VideoEncoderConfig video_encoder_config = video_encoder_config_.Copy();
+
+ // Disable scaling settings in encoder info.
+ fake_encoder_.SetQualityScaling(false);
+ // Set QP not trusted in encoder info.
+ fake_encoder_.SetIsQpTrusted(false);
+ // Enable quality scaling in encoder config.
+ video_encoder_config.is_quality_scaling_allowed = true;
+ ConfigureEncoder(std::move(video_encoder_config));
+
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kTargetBitrate, kTargetBitrate, kTargetBitrate, 0, 0, 0);
+
+ test::FrameForwarder source;
+ video_stream_encoder_->SetSource(
+ &source, webrtc::DegradationPreference::MAINTAIN_FRAMERATE);
+ EXPECT_THAT(source.sink_wants(), UnlimitedSinkWants());
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution);
+
+ source.IncomingCapturedFrame(CreateFrame(1, 1280, 720));
+ WaitForEncodedFrame(1);
+ video_stream_encoder_->TriggerQualityLow();
+ // When quality_scaler doesn't work and is_quality_scaling_allowed is
+ // true,the bandwidth_quality_scaler_ works,so bw_limited_resolution is true.
+ EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution);
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest,
+ QualityScalingAllowedAndQPIsTrusted_BandwidthScalerDisable) {
+ VideoEncoderConfig video_encoder_config = video_encoder_config_.Copy();
+
+ // Disable scaling settings in encoder info.
+ fake_encoder_.SetQualityScaling(false);
+ // Set QP trusted in encoder info.
+ fake_encoder_.SetIsQpTrusted(true);
+ // Enable quality scaling in encoder config.
+ video_encoder_config.is_quality_scaling_allowed = true;
+ ConfigureEncoder(std::move(video_encoder_config));
+
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kTargetBitrate, kTargetBitrate, kTargetBitrate, 0, 0, 0);
+
+ test::FrameForwarder source;
+ video_stream_encoder_->SetSource(
+ &source, webrtc::DegradationPreference::MAINTAIN_FRAMERATE);
+ EXPECT_THAT(source.sink_wants(), UnlimitedSinkWants());
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution);
+
+ source.IncomingCapturedFrame(CreateFrame(1, 1280, 720));
+ WaitForEncodedFrame(1);
+ video_stream_encoder_->TriggerQualityLow();
+ // bandwidth_quality_scaler isn't working, but quality_scaler is working.
+ EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution);
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest,
+ QualityScalingAllowedAndQPIsNotTrusted_BandwidthScalerEnabled) {
+ VideoEncoderConfig video_encoder_config = video_encoder_config_.Copy();
+
+ // Disable scaling settings in encoder info.
+ fake_encoder_.SetQualityScaling(false);
+ // Set QP trusted in encoder info.
+ fake_encoder_.SetIsQpTrusted(false);
+ // Enable quality scaling in encoder config.
+ video_encoder_config.is_quality_scaling_allowed = true;
+ ConfigureEncoder(std::move(video_encoder_config));
+
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kTargetBitrate, kTargetBitrate, kTargetBitrate, 0, 0, 0);
+
+ test::FrameForwarder source;
+ video_stream_encoder_->SetSource(
+ &source, webrtc::DegradationPreference::MAINTAIN_FRAMERATE);
+ EXPECT_THAT(source.sink_wants(), UnlimitedSinkWants());
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution);
+
+ source.IncomingCapturedFrame(CreateFrame(1, 1280, 720));
+ WaitForEncodedFrame(1);
+ video_stream_encoder_->TriggerQualityLow();
+ EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution);
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest,
+ RequestsRefreshFrameAfterEarlyDroppedNativeFrame) {
+ // Send a native frame before encoder rates have been set. The encoder is
+ // seen as paused at this time.
+ rtc::Event frame_destroyed_event;
+ video_source_.IncomingCapturedFrame(CreateFakeNativeFrame(
+ /*ntp_time_ms=*/1, &frame_destroyed_event, codec_width_, codec_height_));
+
+ // Frame should be dropped and destroyed.
+ ExpectDroppedFrame();
+ EXPECT_TRUE(frame_destroyed_event.Wait(kDefaultTimeout));
+ EXPECT_EQ(video_source_.refresh_frames_requested_, 0);
+
+ // Set bitrates, unpausing the encoder and triggering a request for a refresh
+ // frame.
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kTargetBitrate, kTargetBitrate, kTargetBitrate, 0, 0, 0);
+ video_stream_encoder_->WaitUntilTaskQueueIsIdle();
+ EXPECT_EQ(video_source_.refresh_frames_requested_, 1);
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest, RecreatesEncoderWhenEnableVp9SpatialLayer) {
+ // Set up encoder to use VP9 SVC using two spatial layers.
+ fake_encoder_.SetTemporalLayersSupported(/*spatial_idx=*/0, true);
+ fake_encoder_.SetTemporalLayersSupported(/*spatial_idx*/ 1, true);
+ VideoEncoderConfig video_encoder_config;
+ test::FillEncoderConfiguration(VideoCodecType::kVideoCodecVP9,
+ /* num_streams*/ 1, &video_encoder_config);
+ video_encoder_config.max_bitrate_bps = 2 * kTargetBitrate.bps();
+ video_encoder_config.content_type =
+ VideoEncoderConfig::ContentType::kRealtimeVideo;
+ VideoCodecVP9 vp9_settings = VideoEncoder::GetDefaultVp9Settings();
+ vp9_settings.numberOfSpatialLayers = 2;
+ vp9_settings.numberOfTemporalLayers = 2;
+ vp9_settings.interLayerPred = InterLayerPredMode::kOn;
+ vp9_settings.automaticResizeOn = false;
+ video_encoder_config.encoder_specific_settings =
+ rtc::make_ref_counted<VideoEncoderConfig::Vp9EncoderSpecificSettings>(
+ vp9_settings);
+ video_encoder_config.spatial_layers = GetSvcConfig(1280, 720,
+ /*fps=*/30.0,
+ /*first_active_layer=*/0,
+ /*num_spatial_layers=*/2,
+ /*num_temporal_layers=*/3,
+ /*is_screenshare=*/false);
+ ConfigureEncoder(video_encoder_config.Copy(),
+ VideoStreamEncoder::BitrateAllocationCallbackType::
+ kVideoLayersAllocation);
+
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kTargetBitrate, kTargetBitrate, kTargetBitrate, 0, 0, 0);
+
+ video_source_.IncomingCapturedFrame(CreateFrame(CurrentTimeMs(), 1280, 720));
+ WaitForEncodedFrame(CurrentTimeMs());
+ EXPECT_EQ(fake_encoder_.GetNumInitializations(), 1);
+
+ // Turn off the top spatial layer. This does not require an encoder reset.
+ video_encoder_config.spatial_layers[1].active = false;
+ video_stream_encoder_->ConfigureEncoder(video_encoder_config.Copy(),
+ kMaxPayloadLength, nullptr);
+
+ time_controller_.AdvanceTime(TimeDelta::Millis(33));
+ video_source_.IncomingCapturedFrame(CreateFrame(CurrentTimeMs(), 1280, 720));
+ WaitForEncodedFrame(CurrentTimeMs());
+ EXPECT_EQ(fake_encoder_.GetNumInitializations(), 1);
+
+ // Turn on the top spatial layer again, this does require an encoder reset.
+ video_encoder_config.spatial_layers[1].active = true;
+ video_stream_encoder_->ConfigureEncoder(video_encoder_config.Copy(),
+ kMaxPayloadLength, nullptr);
+
+ time_controller_.AdvanceTime(TimeDelta::Millis(33));
+ video_source_.IncomingCapturedFrame(CreateFrame(CurrentTimeMs(), 1280, 720));
+ WaitForEncodedFrame(CurrentTimeMs());
+ EXPECT_EQ(fake_encoder_.GetNumInitializations(), 2);
+
+ video_stream_encoder_->Stop();
+}
+
+#endif // !defined(WEBRTC_IOS)
+
+// Test parameters: (VideoCodecType codec, bool allow_i420_conversion)
+class VideoStreamEncoderWithRealEncoderTest
+ : public VideoStreamEncoderTest,
+ public ::testing::WithParamInterface<std::pair<VideoCodecType, bool>> {
+ public:
+ VideoStreamEncoderWithRealEncoderTest()
+ : VideoStreamEncoderTest(),
+ codec_type_(std::get<0>(GetParam())),
+ allow_i420_conversion_(std::get<1>(GetParam())) {}
+
+ void SetUp() override {
+ VideoStreamEncoderTest::SetUp();
+ std::unique_ptr<VideoEncoder> encoder;
+ switch (codec_type_) {
+ case kVideoCodecVP8:
+ encoder = VP8Encoder::Create();
+ break;
+ case kVideoCodecVP9:
+ encoder = VP9Encoder::Create();
+ break;
+ case kVideoCodecAV1:
+ encoder = CreateLibaomAv1Encoder();
+ break;
+ case kVideoCodecH264:
+ encoder =
+ H264Encoder::Create(cricket::VideoCodec(cricket::kH264CodecName));
+ break;
+ case kVideoCodecMultiplex:
+ mock_encoder_factory_for_multiplex_ =
+ std::make_unique<MockVideoEncoderFactory>();
+ EXPECT_CALL(*mock_encoder_factory_for_multiplex_, Die);
+ EXPECT_CALL(*mock_encoder_factory_for_multiplex_, CreateVideoEncoder)
+ .WillRepeatedly([] { return VP8Encoder::Create(); });
+ encoder = std::make_unique<MultiplexEncoderAdapter>(
+ mock_encoder_factory_for_multiplex_.get(), SdpVideoFormat("VP8"),
+ false);
+ break;
+ default:
+ RTC_DCHECK_NOTREACHED();
+ }
+ ConfigureEncoderAndBitrate(codec_type_, std::move(encoder));
+ }
+
+ void TearDown() override {
+ video_stream_encoder_->Stop();
+ // Ensure `video_stream_encoder_` is destroyed before
+ // `encoder_proxy_factory_`.
+ video_stream_encoder_.reset();
+ VideoStreamEncoderTest::TearDown();
+ }
+
+ protected:
+ void ConfigureEncoderAndBitrate(VideoCodecType codec_type,
+ std::unique_ptr<VideoEncoder> encoder) {
+ // Configure VSE to use the encoder.
+ encoder_ = std::move(encoder);
+ encoder_proxy_factory_ = std::make_unique<test::VideoEncoderProxyFactory>(
+ encoder_.get(), &encoder_selector_);
+ video_send_config_.encoder_settings.encoder_factory =
+ encoder_proxy_factory_.get();
+ VideoEncoderConfig video_encoder_config;
+ test::FillEncoderConfiguration(codec_type, 1, &video_encoder_config);
+ video_encoder_config_ = video_encoder_config.Copy();
+ ConfigureEncoder(video_encoder_config_.Copy());
+
+ // Set bitrate to ensure frame is not dropped.
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kTargetBitrate, kTargetBitrate, kTargetBitrate, 0, 0, 0);
+ }
+
+ const VideoCodecType codec_type_;
+ const bool allow_i420_conversion_;
+ NiceMock<MockEncoderSelector> encoder_selector_;
+ std::unique_ptr<test::VideoEncoderProxyFactory> encoder_proxy_factory_;
+ std::unique_ptr<VideoEncoder> encoder_;
+ std::unique_ptr<MockVideoEncoderFactory> mock_encoder_factory_for_multiplex_;
+};
+
+TEST_P(VideoStreamEncoderWithRealEncoderTest, EncoderMapsNativeI420) {
+ auto native_i420_frame = test::CreateMappableNativeFrame(
+ 1, VideoFrameBuffer::Type::kI420, codec_width_, codec_height_);
+ video_source_.IncomingCapturedFrame(native_i420_frame);
+ WaitForEncodedFrame(codec_width_, codec_height_);
+
+ auto mappable_native_buffer =
+ test::GetMappableNativeBufferFromVideoFrame(native_i420_frame);
+ std::vector<rtc::scoped_refptr<VideoFrameBuffer>> mapped_frame_buffers =
+ mappable_native_buffer->GetMappedFramedBuffers();
+ ASSERT_EQ(mapped_frame_buffers.size(), 1u);
+ EXPECT_EQ(mapped_frame_buffers[0]->width(), codec_width_);
+ EXPECT_EQ(mapped_frame_buffers[0]->height(), codec_height_);
+ EXPECT_EQ(mapped_frame_buffers[0]->type(), VideoFrameBuffer::Type::kI420);
+}
+
+TEST_P(VideoStreamEncoderWithRealEncoderTest, EncoderMapsNativeNV12) {
+ auto native_nv12_frame = test::CreateMappableNativeFrame(
+ 1, VideoFrameBuffer::Type::kNV12, codec_width_, codec_height_);
+ video_source_.IncomingCapturedFrame(native_nv12_frame);
+ WaitForEncodedFrame(codec_width_, codec_height_);
+
+ auto mappable_native_buffer =
+ test::GetMappableNativeBufferFromVideoFrame(native_nv12_frame);
+ std::vector<rtc::scoped_refptr<VideoFrameBuffer>> mapped_frame_buffers =
+ mappable_native_buffer->GetMappedFramedBuffers();
+ ASSERT_EQ(mapped_frame_buffers.size(), 1u);
+ EXPECT_EQ(mapped_frame_buffers[0]->width(), codec_width_);
+ EXPECT_EQ(mapped_frame_buffers[0]->height(), codec_height_);
+ EXPECT_EQ(mapped_frame_buffers[0]->type(), VideoFrameBuffer::Type::kNV12);
+
+ if (!allow_i420_conversion_) {
+ EXPECT_FALSE(mappable_native_buffer->DidConvertToI420());
+ }
+}
+
+TEST_P(VideoStreamEncoderWithRealEncoderTest, HandlesLayerToggling) {
+ if (codec_type_ == kVideoCodecMultiplex) {
+ // Multiplex codec here uses wrapped mock codecs, ignore for this test.
+ return;
+ }
+
+ const size_t kNumSpatialLayers = 3u;
+ const float kDownscaleFactors[] = {4.0, 2.0, 1.0};
+ const int kFrameWidth = 1280;
+ const int kFrameHeight = 720;
+ const rtc::VideoSinkWants::FrameSize kLayer0Size(
+ kFrameWidth / kDownscaleFactors[0], kFrameHeight / kDownscaleFactors[0]);
+ const rtc::VideoSinkWants::FrameSize kLayer1Size(
+ kFrameWidth / kDownscaleFactors[1], kFrameHeight / kDownscaleFactors[1]);
+ const rtc::VideoSinkWants::FrameSize kLayer2Size(
+ kFrameWidth / kDownscaleFactors[2], kFrameHeight / kDownscaleFactors[2]);
+
+ VideoEncoderConfig config;
+ webrtc::VideoEncoder::EncoderInfo encoder_info;
+ if (codec_type_ == VideoCodecType::kVideoCodecVP9) {
+ test::FillEncoderConfiguration(codec_type_, 1, &config);
+ config.max_bitrate_bps = kSimulcastTargetBitrate.bps();
+ VideoCodecVP9 vp9_settings = VideoEncoder::GetDefaultVp9Settings();
+ vp9_settings.numberOfSpatialLayers = kNumSpatialLayers;
+ vp9_settings.numberOfTemporalLayers = 3;
+ vp9_settings.automaticResizeOn = false;
+ config.encoder_specific_settings =
+ rtc::make_ref_counted<VideoEncoderConfig::Vp9EncoderSpecificSettings>(
+ vp9_settings);
+ config.spatial_layers = GetSvcConfig(kFrameWidth, kFrameHeight,
+ /*fps=*/30.0,
+ /*first_active_layer=*/0,
+ /*num_spatial_layers=*/3,
+ /*num_temporal_layers=*/3,
+ /*is_screenshare=*/false);
+ } else if (codec_type_ == VideoCodecType::kVideoCodecAV1) {
+ test::FillEncoderConfiguration(codec_type_, 1, &config);
+ config.max_bitrate_bps = kSimulcastTargetBitrate.bps();
+ config.spatial_layers = GetSvcConfig(kFrameWidth, kFrameHeight,
+ /*fps=*/30.0,
+ /*first_active_layer=*/0,
+ /*num_spatial_layers=*/3,
+ /*num_temporal_layers=*/3,
+ /*is_screenshare=*/false);
+ config.simulcast_layers[0].scalability_mode = ScalabilityMode::kL3T3_KEY;
+ } else {
+ // Simulcast for VP8/H264.
+ test::FillEncoderConfiguration(codec_type_, kNumSpatialLayers, &config);
+ for (size_t i = 0; i < kNumSpatialLayers; ++i) {
+ config.simulcast_layers[i].scale_resolution_down_by =
+ kDownscaleFactors[i];
+ config.simulcast_layers[i].active = true;
+ }
+ if (codec_type_ == VideoCodecType::kVideoCodecH264) {
+ // Turn off frame dropping to prevent flakiness.
+ config.frame_drop_enabled = false;
+ }
+ }
+
+ auto set_layer_active = [&](int layer_idx, bool active) {
+ if (codec_type_ == VideoCodecType::kVideoCodecVP9 ||
+ codec_type_ == VideoCodecType::kVideoCodecAV1) {
+ config.spatial_layers[layer_idx].active = active;
+ } else {
+ config.simulcast_layers[layer_idx].active = active;
+ }
+ };
+
+ config.video_stream_factory =
+ rtc::make_ref_counted<cricket::EncoderStreamFactory>(
+ CodecTypeToPayloadString(codec_type_), /*max qp*/ 56,
+ /*screencast*/ false,
+ /*screenshare enabled*/ false, encoder_info);
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kSimulcastTargetBitrate, kSimulcastTargetBitrate, kSimulcastTargetBitrate,
+ 0, 0, 0);
+
+ // Capture a frame with all layers active.
+ sink_.SetNumExpectedLayers(kNumSpatialLayers);
+ video_stream_encoder_->ConfigureEncoder(config.Copy(), kMaxPayloadLength);
+ int64_t timestamp_ms = kFrameIntervalMs;
+ video_source_.IncomingCapturedFrame(
+ CreateFrame(timestamp_ms, kFrameWidth, kFrameHeight));
+
+ WaitForEncodedFrame(kLayer2Size.width, kLayer2Size.height);
+ video_stream_encoder_->WaitUntilTaskQueueIsIdle();
+
+ // Capture a frame with one of the layers inactive.
+ set_layer_active(2, false);
+ sink_.SetNumExpectedLayers(kNumSpatialLayers - 1);
+ video_stream_encoder_->ConfigureEncoder(config.Copy(), kMaxPayloadLength);
+ timestamp_ms += kFrameIntervalMs;
+ video_source_.IncomingCapturedFrame(
+ CreateFrame(timestamp_ms, kFrameWidth, kFrameHeight));
+ WaitForEncodedFrame(kLayer1Size.width, kLayer1Size.height);
+
+ // New target bitrates signaled based on lower resolution.
+ DataRate kTwoLayerBitrate = DataRate::KilobitsPerSec(833);
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ kTwoLayerBitrate, kTwoLayerBitrate, kTwoLayerBitrate, 0, 0, 0);
+ video_stream_encoder_->WaitUntilTaskQueueIsIdle();
+
+ // Re-enable the top layer.
+ set_layer_active(2, true);
+ sink_.SetNumExpectedLayers(kNumSpatialLayers);
+ video_stream_encoder_->ConfigureEncoder(config.Copy(), kMaxPayloadLength);
+ video_stream_encoder_->WaitUntilTaskQueueIsIdle();
+
+ // Bitrate target adjusted back up to enable HD layer...
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ DataRate::KilobitsPerSec(1800), DataRate::KilobitsPerSec(1800),
+ DataRate::KilobitsPerSec(1800), 0, 0, 0);
+ video_stream_encoder_->WaitUntilTaskQueueIsIdle();
+
+ // ...then add a new frame.
+ timestamp_ms += kFrameIntervalMs;
+ video_source_.IncomingCapturedFrame(
+ CreateFrame(timestamp_ms, kFrameWidth, kFrameHeight));
+ WaitForEncodedFrame(kLayer2Size.width, kLayer2Size.height);
+ video_stream_encoder_->WaitUntilTaskQueueIsIdle();
+
+ video_stream_encoder_->Stop();
+}
+
+std::string TestParametersVideoCodecAndAllowI420ConversionToString(
+ testing::TestParamInfo<std::pair<VideoCodecType, bool>> info) {
+ VideoCodecType codec_type = std::get<0>(info.param);
+ bool allow_i420_conversion = std::get<1>(info.param);
+ std::string str;
+ switch (codec_type) {
+ case kVideoCodecGeneric:
+ str = "Generic";
+ break;
+ case kVideoCodecVP8:
+ str = "VP8";
+ break;
+ case kVideoCodecVP9:
+ str = "VP9";
+ break;
+ case kVideoCodecAV1:
+ str = "AV1";
+ break;
+ case kVideoCodecH264:
+ str = "H264";
+ break;
+ case kVideoCodecMultiplex:
+ str = "Multiplex";
+ break;
+ default:
+ RTC_DCHECK_NOTREACHED();
+ }
+ str += allow_i420_conversion ? "_AllowToI420" : "_DisallowToI420";
+ return str;
+}
+
+constexpr std::pair<VideoCodecType, bool> kVP8DisallowConversion =
+ std::make_pair(kVideoCodecVP8, /*allow_i420_conversion=*/false);
+constexpr std::pair<VideoCodecType, bool> kVP9DisallowConversion =
+ std::make_pair(kVideoCodecVP9, /*allow_i420_conversion=*/false);
+constexpr std::pair<VideoCodecType, bool> kAV1AllowConversion =
+ std::make_pair(kVideoCodecAV1, /*allow_i420_conversion=*/false);
+constexpr std::pair<VideoCodecType, bool> kMultiplexDisallowConversion =
+ std::make_pair(kVideoCodecMultiplex, /*allow_i420_conversion=*/false);
+#if defined(WEBRTC_USE_H264)
+constexpr std::pair<VideoCodecType, bool> kH264AllowConversion =
+ std::make_pair(kVideoCodecH264, /*allow_i420_conversion=*/true);
+
+// The windows compiler does not tolerate #if statements inside the
+// INSTANTIATE_TEST_SUITE_P() macro, so we have to have two definitions (with
+// and without H264).
+INSTANTIATE_TEST_SUITE_P(
+ All,
+ VideoStreamEncoderWithRealEncoderTest,
+ ::testing::Values(kVP8DisallowConversion,
+ kVP9DisallowConversion,
+ kAV1AllowConversion,
+ kMultiplexDisallowConversion,
+ kH264AllowConversion),
+ TestParametersVideoCodecAndAllowI420ConversionToString);
+#else
+INSTANTIATE_TEST_SUITE_P(
+ All,
+ VideoStreamEncoderWithRealEncoderTest,
+ ::testing::Values(kVP8DisallowConversion,
+ kVP9DisallowConversion,
+ kAV1AllowConversion,
+ kMultiplexDisallowConversion),
+ TestParametersVideoCodecAndAllowI420ConversionToString);
+#endif
+
+class ReconfigureEncoderTest : public VideoStreamEncoderTest {
+ protected:
+ void RunTest(const std::vector<VideoStream>& configs,
+ const int expected_num_init_encode) {
+ ConfigureEncoder(configs[0]);
+ OnBitrateUpdated(kTargetBitrate);
+ InsertFrameAndWaitForEncoded();
+ EXPECT_EQ(1, sink_.number_of_reconfigurations());
+ ExpectEqual(bitrate_allocator_factory_.codec_config(), configs[0]);
+ EXPECT_EQ(1, fake_encoder_.GetNumInitializations());
+ ExpectEqual(fake_encoder_.config(), configs[0]);
+
+ // Reconfigure encoder, the encoder should only be reconfigured if needed.
+ ConfigureEncoder(configs[1]);
+ InsertFrameAndWaitForEncoded();
+ EXPECT_EQ(2, sink_.number_of_reconfigurations());
+ ExpectEqual(bitrate_allocator_factory_.codec_config(), configs[1]);
+ EXPECT_EQ(expected_num_init_encode, fake_encoder_.GetNumInitializations());
+ if (expected_num_init_encode > 1)
+ ExpectEqual(fake_encoder_.config(), configs[1]);
+
+ video_stream_encoder_->Stop();
+ }
+
+ void ConfigureEncoder(const VideoStream& stream) {
+ VideoEncoderConfig config;
+ webrtc::VideoEncoder::EncoderInfo encoder_info;
+
+ test::FillEncoderConfiguration(kVideoCodecVP8, /*num_streams=*/1, &config);
+ config.max_bitrate_bps = stream.max_bitrate_bps;
+ config.simulcast_layers[0] = stream;
+ config.video_stream_factory =
+ rtc::make_ref_counted<cricket::EncoderStreamFactory>(
+ /*codec_name=*/"VP8", /*max_qp=*/0, /*is_screenshare=*/false,
+ /*conference_mode=*/false, encoder_info);
+ video_stream_encoder_->ConfigureEncoder(std::move(config),
+ kMaxPayloadLength);
+ }
+
+ void OnBitrateUpdated(DataRate bitrate) {
+ video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
+ bitrate, bitrate, bitrate, 0, 0, 0);
+ }
+
+ void InsertFrameAndWaitForEncoded() {
+ timestamp_ms_ += kFrameIntervalMs;
+ video_source_.IncomingCapturedFrame(
+ CreateFrame(timestamp_ms_, kWidth, kHeight));
+ sink_.WaitForEncodedFrame(timestamp_ms_);
+ }
+
+ void ExpectEqual(const VideoCodec& actual,
+ const VideoStream& expected) const {
+ EXPECT_EQ(actual.numberOfSimulcastStreams, 1);
+ EXPECT_EQ(actual.simulcastStream[0].maxFramerate, expected.max_framerate);
+ EXPECT_EQ(actual.simulcastStream[0].minBitrate * 1000,
+ static_cast<unsigned int>(expected.min_bitrate_bps));
+ EXPECT_EQ(actual.simulcastStream[0].maxBitrate * 1000,
+ static_cast<unsigned int>(expected.max_bitrate_bps));
+ EXPECT_EQ(actual.simulcastStream[0].width,
+ kWidth / expected.scale_resolution_down_by);
+ EXPECT_EQ(actual.simulcastStream[0].height,
+ kHeight / expected.scale_resolution_down_by);
+ EXPECT_EQ(actual.simulcastStream[0].numberOfTemporalLayers,
+ expected.num_temporal_layers);
+ EXPECT_EQ(actual.GetScalabilityMode(), expected.scalability_mode);
+ }
+
+ VideoStream DefaultConfig() const {
+ VideoStream stream;
+ stream.max_framerate = 25;
+ stream.min_bitrate_bps = 35000;
+ stream.max_bitrate_bps = 900000;
+ stream.scale_resolution_down_by = 1.0;
+ stream.num_temporal_layers = 1;
+ stream.bitrate_priority = 1.0;
+ stream.scalability_mode = absl::nullopt;
+ return stream;
+ }
+
+ const int kWidth = 640;
+ const int kHeight = 360;
+ int64_t timestamp_ms_ = 0;
+};
+
+TEST_F(ReconfigureEncoderTest, NotReconfiguredIfMaxFramerateChanges) {
+ VideoStream config1 = DefaultConfig();
+ VideoStream config2 = config1;
+ config2.max_framerate++;
+
+ RunTest({config1, config2}, /*expected_num_init_encode=*/1);
+}
+
+TEST_F(ReconfigureEncoderTest, NotReconfiguredIfMinBitrateChanges) {
+ VideoStream config1 = DefaultConfig();
+ VideoStream config2 = config1;
+ config2.min_bitrate_bps += 10000;
+
+ RunTest({config1, config2}, /*expected_num_init_encode=*/1);
+}
+
+TEST_F(ReconfigureEncoderTest, NotReconfiguredIfMaxBitrateChanges) {
+ VideoStream config1 = DefaultConfig();
+ VideoStream config2 = config1;
+ config2.max_bitrate_bps += 100000;
+
+ RunTest({config1, config2}, /*expected_num_init_encode=*/1);
+}
+
+TEST_F(ReconfigureEncoderTest, NotReconfiguredIfBitratePriorityChanges) {
+ VideoStream config1 = DefaultConfig();
+ VideoStream config2 = config1;
+ config2.bitrate_priority = config1.bitrate_priority.value() * 2.0;
+
+ RunTest({config1, config2}, /*expected_num_init_encode=*/1);
+}
+
+TEST_F(ReconfigureEncoderTest, ReconfiguredIfResolutionChanges) {
+ VideoStream config1 = DefaultConfig();
+ VideoStream config2 = config1;
+ config2.scale_resolution_down_by *= 2;
+
+ RunTest({config1, config2}, /*expected_num_init_encode=*/2);
+}
+
+TEST_F(ReconfigureEncoderTest, ReconfiguredIfNumTemporalLayerChanges) {
+ VideoStream config1 = DefaultConfig();
+ VideoStream config2 = config1;
+ config2.num_temporal_layers = config1.num_temporal_layers.value() + 1;
+
+ RunTest({config1, config2}, /*expected_num_init_encode=*/2);
+}
+
+TEST_F(ReconfigureEncoderTest, ReconfiguredIfScalabilityModeChanges) {
+ VideoStream config1 = DefaultConfig();
+ VideoStream config2 = config1;
+ config2.scalability_mode = ScalabilityMode::kL2T1;
+
+ RunTest({config1, config2}, /*expected_num_init_encode=*/2);
+}
+
+TEST_F(ReconfigureEncoderTest,
+ UpdatesNumTemporalLayersFromScalabilityModeChanges) {
+ VideoStream config1 = DefaultConfig();
+ VideoStream config2 = config1;
+ config2.scalability_mode = ScalabilityMode::kL1T2;
+ config2.num_temporal_layers = 2;
+
+ RunTest({config1, config2}, /*expected_num_init_encode=*/2);
+}
+
+// Simple test that just creates and then immediately destroys an encoder.
+// The purpose of the test is to make sure that nothing bad happens if the
+// initialization step on the encoder queue, doesn't run.
+TEST(VideoStreamEncoderSimpleTest, CreateDestroy) {
+ class SuperLazyTaskQueue : public webrtc::TaskQueueBase {
+ public:
+ SuperLazyTaskQueue() = default;
+ ~SuperLazyTaskQueue() override = default;
+
+ private:
+ void Delete() override { delete this; }
+ void PostTask(absl::AnyInvocable<void() &&> task) override {
+ // meh.
+ }
+ void PostDelayedTask(absl::AnyInvocable<void() &&> task,
+ TimeDelta delay) override {
+ ASSERT_TRUE(false);
+ }
+ void PostDelayedHighPrecisionTask(absl::AnyInvocable<void() &&> task,
+ TimeDelta delay) override {
+ ADD_FAILURE();
+ }
+ };
+
+ // Lots of boiler plate.
+ test::ScopedKeyValueConfig field_trials;
+ GlobalSimulatedTimeController time_controller(Timestamp::Zero());
+ auto stats_proxy = std::make_unique<MockableSendStatisticsProxy>(
+ time_controller.GetClock(), VideoSendStream::Config(nullptr),
+ webrtc::VideoEncoderConfig::ContentType::kRealtimeVideo, field_trials);
+ SimpleVideoStreamEncoderFactory::MockFakeEncoder mock_fake_encoder(
+ time_controller.GetClock());
+ test::VideoEncoderProxyFactory encoder_factory(&mock_fake_encoder);
+ std::unique_ptr<VideoBitrateAllocatorFactory> bitrate_allocator_factory =
+ CreateBuiltinVideoBitrateAllocatorFactory();
+ VideoStreamEncoderSettings encoder_settings{
+ VideoEncoder::Capabilities(/*loss_notification=*/false)};
+ encoder_settings.encoder_factory = &encoder_factory;
+ encoder_settings.bitrate_allocator_factory = bitrate_allocator_factory.get();
+
+ auto adapter = std::make_unique<MockFrameCadenceAdapter>();
+ EXPECT_CALL((*adapter.get()), Initialize).WillOnce(Return());
+
+ std::unique_ptr<webrtc::TaskQueueBase, webrtc::TaskQueueDeleter>
+ encoder_queue(new SuperLazyTaskQueue());
+
+ // Construct a VideoStreamEncoder instance and let it go out of scope without
+ // doing anything else (including calling Stop()). This should be fine since
+ // the posted init task will simply be deleted.
+ auto encoder = std::make_unique<VideoStreamEncoder>(
+ time_controller.GetClock(), 1, stats_proxy.get(), encoder_settings,
+ std::make_unique<CpuOveruseDetectorProxy>(stats_proxy.get(),
+ field_trials),
+ std::move(adapter), std::move(encoder_queue),
+ VideoStreamEncoder::BitrateAllocationCallbackType::
+ kVideoBitrateAllocation,
+ field_trials);
+
+ // Stop the encoder explicitly. This additional step tests if we could
+ // hang when calling stop and the TQ has been stopped and/or isn't accepting
+ // any more tasks.
+ encoder->Stop();
+}
+
+TEST(VideoStreamEncoderFrameCadenceTest, ActivatesFrameCadenceOnContentType) {
+ auto adapter = std::make_unique<MockFrameCadenceAdapter>();
+ auto* adapter_ptr = adapter.get();
+ SimpleVideoStreamEncoderFactory factory;
+ FrameCadenceAdapterInterface::Callback* video_stream_encoder_callback =
+ nullptr;
+ EXPECT_CALL(*adapter_ptr, Initialize)
+ .WillOnce(Invoke([&video_stream_encoder_callback](
+ FrameCadenceAdapterInterface::Callback* callback) {
+ video_stream_encoder_callback = callback;
+ }));
+ TaskQueueBase* encoder_queue = nullptr;
+ auto video_stream_encoder =
+ factory.Create(std::move(adapter), &encoder_queue);
+
+ // First a call before we know the frame size and hence cannot compute the
+ // number of simulcast layers.
+ EXPECT_CALL(*adapter_ptr, SetZeroHertzModeEnabled(Optional(Field(
+ &FrameCadenceAdapterInterface::
+ ZeroHertzModeParams::num_simulcast_layers,
+ Eq(0u)))));
+ VideoEncoderConfig config;
+ test::FillEncoderConfiguration(kVideoCodecVP8, 1, &config);
+ config.content_type = VideoEncoderConfig::ContentType::kScreen;
+ video_stream_encoder->ConfigureEncoder(std::move(config), 0);
+ factory.DepleteTaskQueues();
+
+ // Then a call as we've computed the number of simulcast layers after a passed
+ // frame.
+ EXPECT_CALL(*adapter_ptr, SetZeroHertzModeEnabled(Optional(Field(
+ &FrameCadenceAdapterInterface::
+ ZeroHertzModeParams::num_simulcast_layers,
+ Gt(0u)))));
+ PassAFrame(encoder_queue, video_stream_encoder_callback, /*ntp_time_ms=*/1);
+ factory.DepleteTaskQueues();
+ Mock::VerifyAndClearExpectations(adapter_ptr);
+
+ // Expect a disabled zero-hertz mode after passing realtime video.
+ EXPECT_CALL(*adapter_ptr, SetZeroHertzModeEnabled(Eq(absl::nullopt)));
+ VideoEncoderConfig config2;
+ test::FillEncoderConfiguration(kVideoCodecVP8, 1, &config2);
+ config2.content_type = VideoEncoderConfig::ContentType::kRealtimeVideo;
+ video_stream_encoder->ConfigureEncoder(std::move(config2), 0);
+ PassAFrame(encoder_queue, video_stream_encoder_callback, /*ntp_time_ms=*/2);
+ factory.DepleteTaskQueues();
+}
+
+TEST(VideoStreamEncoderFrameCadenceTest,
+ ForwardsFramesIntoFrameCadenceAdapter) {
+ auto adapter = std::make_unique<MockFrameCadenceAdapter>();
+ auto* adapter_ptr = adapter.get();
+ test::FrameForwarder video_source;
+ SimpleVideoStreamEncoderFactory factory;
+ auto video_stream_encoder = factory.Create(std::move(adapter));
+ video_stream_encoder->SetSource(
+ &video_source, webrtc::DegradationPreference::MAINTAIN_FRAMERATE);
+
+ EXPECT_CALL(*adapter_ptr, OnFrame);
+ auto buffer = rtc::make_ref_counted<NV12Buffer>(/*width=*/16, /*height=*/16);
+ video_source.IncomingCapturedFrame(
+ VideoFrame::Builder().set_video_frame_buffer(std::move(buffer)).build());
+}
+
+TEST(VideoStreamEncoderFrameCadenceTest, UsesFrameCadenceAdapterForFrameRate) {
+ auto adapter = std::make_unique<MockFrameCadenceAdapter>();
+ auto* adapter_ptr = adapter.get();
+ test::FrameForwarder video_source;
+ SimpleVideoStreamEncoderFactory factory;
+ FrameCadenceAdapterInterface::Callback* video_stream_encoder_callback =
+ nullptr;
+ EXPECT_CALL(*adapter_ptr, Initialize)
+ .WillOnce(Invoke([&video_stream_encoder_callback](
+ FrameCadenceAdapterInterface::Callback* callback) {
+ video_stream_encoder_callback = callback;
+ }));
+ TaskQueueBase* encoder_queue = nullptr;
+ auto video_stream_encoder =
+ factory.Create(std::move(adapter), &encoder_queue);
+
+ // This is just to make the VSE operational. We'll feed a frame directly by
+ // the callback interface.
+ video_stream_encoder->SetSource(
+ &video_source, webrtc::DegradationPreference::MAINTAIN_FRAMERATE);
+
+ VideoEncoderConfig video_encoder_config;
+ test::FillEncoderConfiguration(kVideoCodecGeneric, 1, &video_encoder_config);
+ video_stream_encoder->ConfigureEncoder(std::move(video_encoder_config),
+ /*max_data_payload_length=*/1000);
+
+ EXPECT_CALL(*adapter_ptr, GetInputFrameRateFps);
+ EXPECT_CALL(*adapter_ptr, UpdateFrameRate);
+ PassAFrame(encoder_queue, video_stream_encoder_callback, /*ntp_time_ms=*/1);
+ factory.DepleteTaskQueues();
+}
+
+TEST(VideoStreamEncoderFrameCadenceTest,
+ DeactivatesActivatesLayersOnBitrateChanges) {
+ auto adapter = std::make_unique<MockFrameCadenceAdapter>();
+ auto* adapter_ptr = adapter.get();
+ SimpleVideoStreamEncoderFactory factory;
+ FrameCadenceAdapterInterface::Callback* video_stream_encoder_callback =
+ nullptr;
+ EXPECT_CALL(*adapter_ptr, Initialize)
+ .WillOnce(Invoke([&video_stream_encoder_callback](
+ FrameCadenceAdapterInterface::Callback* callback) {
+ video_stream_encoder_callback = callback;
+ }));
+ TaskQueueBase* encoder_queue = nullptr;
+ auto video_stream_encoder =
+ factory.Create(std::move(adapter), &encoder_queue);
+
+ // Configure 2 simulcast layers. FillEncoderConfiguration sets min bitrates to
+ // {150000, 450000}.
+ VideoEncoderConfig video_encoder_config;
+ test::FillEncoderConfiguration(kVideoCodecVP8, 2, &video_encoder_config);
+ video_stream_encoder->ConfigureEncoder(video_encoder_config.Copy(),
+ kMaxPayloadLength);
+ // Ensure an encoder is created.
+ PassAFrame(encoder_queue, video_stream_encoder_callback, /*ntp_time_ms=*/1);
+
+ // Both layers enabled at 1 MBit/s.
+ video_stream_encoder->OnBitrateUpdated(
+ DataRate::KilobitsPerSec(1000), DataRate::KilobitsPerSec(1000),
+ DataRate::KilobitsPerSec(1000), 0, 0, 0);
+ EXPECT_CALL(*adapter_ptr, UpdateLayerStatus(0, /*enabled=*/true));
+ EXPECT_CALL(*adapter_ptr, UpdateLayerStatus(1, /*enabled=*/true));
+ factory.DepleteTaskQueues();
+ Mock::VerifyAndClearExpectations(adapter_ptr);
+
+ // Layer 1 disabled at 200 KBit/s.
+ video_stream_encoder->OnBitrateUpdated(
+ DataRate::KilobitsPerSec(200), DataRate::KilobitsPerSec(200),
+ DataRate::KilobitsPerSec(200), 0, 0, 0);
+ EXPECT_CALL(*adapter_ptr, UpdateLayerStatus(0, /*enabled=*/true));
+ EXPECT_CALL(*adapter_ptr, UpdateLayerStatus(1, /*enabled=*/false));
+ factory.DepleteTaskQueues();
+ Mock::VerifyAndClearExpectations(adapter_ptr);
+
+ // All layers off at suspended video.
+ video_stream_encoder->OnBitrateUpdated(DataRate::Zero(), DataRate::Zero(),
+ DataRate::Zero(), 0, 0, 0);
+ EXPECT_CALL(*adapter_ptr, UpdateLayerStatus(0, /*enabled=*/false));
+ EXPECT_CALL(*adapter_ptr, UpdateLayerStatus(1, /*enabled=*/false));
+ factory.DepleteTaskQueues();
+ Mock::VerifyAndClearExpectations(adapter_ptr);
+
+ // Both layers enabled again back at 1 MBit/s.
+ video_stream_encoder->OnBitrateUpdated(
+ DataRate::KilobitsPerSec(1000), DataRate::KilobitsPerSec(1000),
+ DataRate::KilobitsPerSec(1000), 0, 0, 0);
+ EXPECT_CALL(*adapter_ptr, UpdateLayerStatus(0, /*enabled=*/true));
+ EXPECT_CALL(*adapter_ptr, UpdateLayerStatus(1, /*enabled=*/true));
+ factory.DepleteTaskQueues();
+}
+
+TEST(VideoStreamEncoderFrameCadenceTest, UpdatesQualityConvergence) {
+ auto adapter = std::make_unique<MockFrameCadenceAdapter>();
+ auto* adapter_ptr = adapter.get();
+ SimpleVideoStreamEncoderFactory factory;
+ FrameCadenceAdapterInterface::Callback* video_stream_encoder_callback =
+ nullptr;
+ EXPECT_CALL(*adapter_ptr, Initialize)
+ .WillOnce(Invoke([&video_stream_encoder_callback](
+ FrameCadenceAdapterInterface::Callback* callback) {
+ video_stream_encoder_callback = callback;
+ }));
+ TaskQueueBase* encoder_queue = nullptr;
+ auto video_stream_encoder =
+ factory.Create(std::move(adapter), &encoder_queue);
+
+ // Configure 2 simulcast layers and setup 1 MBit/s to unpause the encoder.
+ VideoEncoderConfig video_encoder_config;
+ test::FillEncoderConfiguration(kVideoCodecVP8, 2, &video_encoder_config);
+ video_stream_encoder->ConfigureEncoder(video_encoder_config.Copy(),
+ kMaxPayloadLength);
+ video_stream_encoder->OnBitrateUpdated(
+ DataRate::KilobitsPerSec(1000), DataRate::KilobitsPerSec(1000),
+ DataRate::KilobitsPerSec(1000), 0, 0, 0);
+
+ // Pass a frame which has unconverged results.
+ PassAFrame(encoder_queue, video_stream_encoder_callback, /*ntp_time_ms=*/1);
+ EXPECT_CALL(factory.GetMockFakeEncoder(), EncodeHook)
+ .WillRepeatedly(Invoke([](EncodedImage& encoded_image,
+ rtc::scoped_refptr<EncodedImageBuffer> buffer) {
+ encoded_image.qp_ = kVp8SteadyStateQpThreshold + 1;
+ CodecSpecificInfo codec_specific;
+ codec_specific.codecType = kVideoCodecVP8;
+ return codec_specific;
+ }));
+ EXPECT_CALL(*adapter_ptr, UpdateLayerQualityConvergence(0, false));
+ EXPECT_CALL(*adapter_ptr, UpdateLayerQualityConvergence(1, false));
+ factory.DepleteTaskQueues();
+ Mock::VerifyAndClearExpectations(adapter_ptr);
+ Mock::VerifyAndClearExpectations(&factory.GetMockFakeEncoder());
+
+ // Pass a frame which converges in layer 0 and not in layer 1.
+ PassAFrame(encoder_queue, video_stream_encoder_callback, /*ntp_time_ms=*/2);
+ EXPECT_CALL(factory.GetMockFakeEncoder(), EncodeHook)
+ .WillRepeatedly(Invoke([](EncodedImage& encoded_image,
+ rtc::scoped_refptr<EncodedImageBuffer> buffer) {
+ // This sets spatial index 0 content to be at target quality, while
+ // index 1 content is not.
+ encoded_image.qp_ = kVp8SteadyStateQpThreshold +
+ (encoded_image.SpatialIndex() == 0 ? 0 : 1);
+ CodecSpecificInfo codec_specific;
+ codec_specific.codecType = kVideoCodecVP8;
+ return codec_specific;
+ }));
+ EXPECT_CALL(*adapter_ptr, UpdateLayerQualityConvergence(0, true));
+ EXPECT_CALL(*adapter_ptr, UpdateLayerQualityConvergence(1, false));
+ factory.DepleteTaskQueues();
+ Mock::VerifyAndClearExpectations(adapter_ptr);
+ Mock::VerifyAndClearExpectations(&factory.GetMockFakeEncoder());
+}
+
+TEST(VideoStreamEncoderFrameCadenceTest,
+ RequestsRefreshFramesWhenCadenceAdapterInstructs) {
+ auto adapter = std::make_unique<MockFrameCadenceAdapter>();
+ auto* adapter_ptr = adapter.get();
+ MockVideoSourceInterface mock_source;
+ SimpleVideoStreamEncoderFactory factory;
+ FrameCadenceAdapterInterface::Callback* video_stream_encoder_callback =
+ nullptr;
+ EXPECT_CALL(*adapter_ptr, Initialize)
+ .WillOnce(Invoke([&video_stream_encoder_callback](
+ FrameCadenceAdapterInterface::Callback* callback) {
+ video_stream_encoder_callback = callback;
+ }));
+ TaskQueueBase* encoder_queue = nullptr;
+ auto video_stream_encoder =
+ factory.Create(std::move(adapter), &encoder_queue);
+ video_stream_encoder->SetSource(
+ &mock_source, webrtc::DegradationPreference::MAINTAIN_FRAMERATE);
+ VideoEncoderConfig config;
+ config.content_type = VideoEncoderConfig::ContentType::kScreen;
+ test::FillEncoderConfiguration(kVideoCodecVP8, 1, &config);
+ video_stream_encoder->ConfigureEncoder(std::move(config), 0);
+ PassAFrame(encoder_queue, video_stream_encoder_callback, /*ntp_time_ms=*/2);
+ // Ensure the encoder is set up.
+ factory.DepleteTaskQueues();
+
+ EXPECT_CALL(*adapter_ptr, ProcessKeyFrameRequest)
+ .WillOnce(Invoke([video_stream_encoder_callback] {
+ video_stream_encoder_callback->RequestRefreshFrame();
+ }));
+ EXPECT_CALL(mock_source, RequestRefreshFrame);
+ video_stream_encoder->SendKeyFrame();
+ factory.DepleteTaskQueues();
+ Mock::VerifyAndClearExpectations(adapter_ptr);
+ Mock::VerifyAndClearExpectations(&mock_source);
+
+ EXPECT_CALL(*adapter_ptr, ProcessKeyFrameRequest);
+ EXPECT_CALL(mock_source, RequestRefreshFrame).Times(0);
+ video_stream_encoder->SendKeyFrame();
+ factory.DepleteTaskQueues();
+}
+
+TEST(VideoStreamEncoderFrameCadenceTest,
+ RequestsRefreshFrameForEarlyZeroHertzKeyFrameRequest) {
+ SimpleVideoStreamEncoderFactory factory;
+ auto encoder_queue =
+ factory.GetTimeController()->GetTaskQueueFactory()->CreateTaskQueue(
+ "EncoderQueue", TaskQueueFactory::Priority::NORMAL);
+
+ // Enables zero-hertz mode.
+ test::ScopedKeyValueConfig field_trials(
+ "WebRTC-ZeroHertzScreenshare/Enabled/");
+ auto adapter = FrameCadenceAdapterInterface::Create(
+ factory.GetTimeController()->GetClock(), encoder_queue.get(),
+ field_trials);
+ FrameCadenceAdapterInterface* adapter_ptr = adapter.get();
+
+ MockVideoSourceInterface mock_source;
+ auto video_stream_encoder = factory.CreateWithEncoderQueue(
+ std::move(adapter), std::move(encoder_queue), &field_trials);
+
+ video_stream_encoder->SetSource(
+ &mock_source, webrtc::DegradationPreference::MAINTAIN_FRAMERATE);
+ VideoEncoderConfig config;
+ config.content_type = VideoEncoderConfig::ContentType::kScreen;
+ test::FillEncoderConfiguration(kVideoCodecVP8, 1, &config);
+ video_stream_encoder->ConfigureEncoder(std::move(config), 0);
+
+ // Eventually expect a refresh frame request when requesting a key frame
+ // before initializing zero-hertz mode. This can happen in reality because the
+ // threads invoking key frame requests and constraints setup aren't
+ // synchronized.
+ EXPECT_CALL(mock_source, RequestRefreshFrame);
+ video_stream_encoder->SendKeyFrame();
+ constexpr int kMaxFps = 30;
+ adapter_ptr->OnConstraintsChanged(VideoTrackSourceConstraints{0, kMaxFps});
+ factory.GetTimeController()->AdvanceTime(
+ TimeDelta::Seconds(1) *
+ FrameCadenceAdapterInterface::kOnDiscardedFrameRefreshFramePeriod /
+ kMaxFps);
+}
+
+} // namespace webrtc